Line data Source code
1 : use crate::pageserver_client::PageserverClient;
2 : use crate::persistence::Persistence;
3 : use crate::service;
4 : use pageserver_api::controller_api::PlacementPolicy;
5 : use pageserver_api::models::{
6 : LocationConfig, LocationConfigMode, LocationConfigSecondary, TenantConfig,
7 : };
8 : use pageserver_api::shard::{ShardIdentity, TenantShardId};
9 : use pageserver_client::mgmt_api;
10 : use reqwest::StatusCode;
11 : use std::collections::HashMap;
12 : use std::sync::Arc;
13 : use std::time::{Duration, Instant};
14 : use tokio_util::sync::CancellationToken;
15 : use utils::backoff::exponential_backoff;
16 : use utils::failpoint_support;
17 : use utils::generation::Generation;
18 : use utils::id::{NodeId, TimelineId};
19 : use utils::lsn::Lsn;
20 : use utils::pausable_failpoint;
21 : use utils::sync::gate::GateGuard;
22 :
23 : use crate::compute_hook::{ComputeHook, NotifyError};
24 : use crate::node::Node;
25 : use crate::tenant_shard::{IntentState, ObservedState, ObservedStateLocation};
26 :
27 : const DEFAULT_HEATMAP_PERIOD: &str = "60s";
28 :
29 : /// Object with the lifetime of the background reconcile task that is created
30 : /// for tenants which have a difference between their intent and observed states.
31 : pub(super) struct Reconciler {
32 : /// See [`crate::tenant_shard::TenantShard`] for the meanings of these fields: they are a snapshot
33 : /// of a tenant's state from when we spawned a reconcile task.
34 : pub(super) tenant_shard_id: TenantShardId,
35 : pub(crate) shard: ShardIdentity,
36 : pub(crate) placement_policy: PlacementPolicy,
37 : pub(crate) generation: Option<Generation>,
38 : pub(crate) intent: TargetState,
39 :
40 : /// Nodes not referenced by [`Self::intent`], from which we should try
41 : /// to detach this tenant shard.
42 : pub(crate) detach: Vec<Node>,
43 :
44 : /// Configuration specific to this reconciler
45 : pub(crate) reconciler_config: ReconcilerConfig,
46 :
47 : pub(crate) config: TenantConfig,
48 : pub(crate) observed: ObservedState,
49 :
50 : pub(crate) service_config: service::Config,
51 :
52 : /// A hook to notify the running postgres instances when we change the location
53 : /// of a tenant. Use this via [`Self::compute_notify`] to update our failure flag
54 : /// and guarantee eventual retries.
55 : pub(crate) compute_hook: Arc<ComputeHook>,
56 :
57 : /// To avoid stalling if the cloud control plane is unavailable, we may proceed
58 : /// past failures in [`ComputeHook::notify`], but we _must_ remember that we failed
59 : /// so that we can set [`crate::tenant_shard::TenantShard::pending_compute_notification`] to ensure a later retry.
60 : pub(crate) compute_notify_failure: bool,
61 :
62 : /// Reconciler is responsible for keeping alive semaphore units that limit concurrency on how many
63 : /// we will spawn.
64 : pub(crate) _resource_units: ReconcileUnits,
65 :
66 : /// A means to abort background reconciliation: it is essential to
67 : /// call this when something changes in the original TenantShard that
68 : /// will make this reconciliation impossible or unnecessary, for
69 : /// example when a pageserver node goes offline, or the PlacementPolicy for
70 : /// the tenant is changed.
71 : pub(crate) cancel: CancellationToken,
72 :
73 : /// Reconcilers are registered with a Gate so that during a graceful shutdown we
74 : /// can wait for all the reconcilers to respond to their cancellation tokens.
75 : pub(crate) _gate_guard: GateGuard,
76 :
77 : /// Access to persistent storage for updating generation numbers
78 : pub(crate) persistence: Arc<Persistence>,
79 : }
80 :
81 : pub(crate) struct ReconcilerConfigBuilder {
82 : config: ReconcilerConfig,
83 : }
84 :
85 : impl ReconcilerConfigBuilder {
86 0 : pub(crate) fn new() -> Self {
87 0 : Self {
88 0 : config: ReconcilerConfig::default(),
89 0 : }
90 0 : }
91 :
92 0 : pub(crate) fn secondary_warmup_timeout(self, value: Duration) -> Self {
93 0 : Self {
94 0 : config: ReconcilerConfig {
95 0 : secondary_warmup_timeout: Some(value),
96 0 : ..self.config
97 0 : },
98 0 : }
99 0 : }
100 :
101 0 : pub(crate) fn secondary_download_request_timeout(self, value: Duration) -> Self {
102 0 : Self {
103 0 : config: ReconcilerConfig {
104 0 : secondary_download_request_timeout: Some(value),
105 0 : ..self.config
106 0 : },
107 0 : }
108 0 : }
109 :
110 0 : pub(crate) fn build(self) -> ReconcilerConfig {
111 0 : self.config
112 0 : }
113 : }
114 :
115 : #[derive(Default, Debug, Copy, Clone)]
116 : pub(crate) struct ReconcilerConfig {
117 : // During live migration give up on warming-up the secondary
118 : // after this timeout.
119 : secondary_warmup_timeout: Option<Duration>,
120 :
121 : // During live migrations this is the amount of time that
122 : // the pagserver will hold our poll.
123 : secondary_download_request_timeout: Option<Duration>,
124 : }
125 :
126 : impl ReconcilerConfig {
127 0 : pub(crate) fn get_secondary_warmup_timeout(&self) -> Duration {
128 : const SECONDARY_WARMUP_TIMEOUT_DEFAULT: Duration = Duration::from_secs(300);
129 0 : self.secondary_warmup_timeout
130 0 : .unwrap_or(SECONDARY_WARMUP_TIMEOUT_DEFAULT)
131 0 : }
132 :
133 0 : pub(crate) fn get_secondary_download_request_timeout(&self) -> Duration {
134 : const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT_DEFAULT: Duration = Duration::from_secs(20);
135 0 : self.secondary_download_request_timeout
136 0 : .unwrap_or(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT_DEFAULT)
137 0 : }
138 : }
139 :
140 : /// RAII resource units granted to a Reconciler, which it should keep alive until it finishes doing I/O
141 : pub(crate) struct ReconcileUnits {
142 : _sem_units: tokio::sync::OwnedSemaphorePermit,
143 : }
144 :
145 : impl ReconcileUnits {
146 0 : pub(crate) fn new(sem_units: tokio::sync::OwnedSemaphorePermit) -> Self {
147 0 : Self {
148 0 : _sem_units: sem_units,
149 0 : }
150 0 : }
151 : }
152 :
153 : /// This is a snapshot of [`crate::tenant_shard::IntentState`], but it does not do any
154 : /// reference counting for Scheduler. The IntentState is what the scheduler works with,
155 : /// and the TargetState is just the instruction for a particular Reconciler run.
156 : #[derive(Debug)]
157 : pub(crate) struct TargetState {
158 : pub(crate) attached: Option<Node>,
159 : pub(crate) secondary: Vec<Node>,
160 : }
161 :
162 : impl TargetState {
163 0 : pub(crate) fn from_intent(nodes: &HashMap<NodeId, Node>, intent: &IntentState) -> Self {
164 0 : Self {
165 0 : attached: intent.get_attached().map(|n| {
166 0 : nodes
167 0 : .get(&n)
168 0 : .expect("Intent attached referenced non-existent node")
169 0 : .clone()
170 0 : }),
171 0 : secondary: intent
172 0 : .get_secondary()
173 0 : .iter()
174 0 : .map(|n| {
175 0 : nodes
176 0 : .get(n)
177 0 : .expect("Intent secondary referenced non-existent node")
178 0 : .clone()
179 0 : })
180 0 : .collect(),
181 0 : }
182 0 : }
183 : }
184 :
185 0 : #[derive(thiserror::Error, Debug)]
186 : pub(crate) enum ReconcileError {
187 : #[error(transparent)]
188 : Remote(#[from] mgmt_api::Error),
189 : #[error(transparent)]
190 : Notify(#[from] NotifyError),
191 : #[error("Cancelled")]
192 : Cancel,
193 : #[error(transparent)]
194 : Other(#[from] anyhow::Error),
195 : }
196 :
197 : impl Reconciler {
198 0 : async fn location_config(
199 0 : &mut self,
200 0 : node: &Node,
201 0 : config: LocationConfig,
202 0 : flush_ms: Option<Duration>,
203 0 : lazy: bool,
204 0 : ) -> Result<(), ReconcileError> {
205 0 : if !node.is_available() && config.mode == LocationConfigMode::Detached {
206 : // Attempts to detach from offline nodes may be imitated without doing I/O: a node which is offline
207 : // will get fully reconciled wrt the shard's intent state when it is reactivated, irrespective of
208 : // what we put into `observed`, in [`crate::service::Service::node_activate_reconcile`]
209 0 : tracing::info!("Node {node} is unavailable during detach: proceeding anyway, it will be detached on next activation");
210 0 : self.observed.locations.remove(&node.get_id());
211 0 : return Ok(());
212 0 : }
213 0 :
214 0 : self.observed
215 0 : .locations
216 0 : .insert(node.get_id(), ObservedStateLocation { conf: None });
217 0 :
218 0 : // TODO: amend locations that use long-polling: they will hit this timeout.
219 0 : let timeout = Duration::from_secs(25);
220 0 :
221 0 : tracing::info!("location_config({node}) calling: {:?}", config);
222 0 : let tenant_shard_id = self.tenant_shard_id;
223 0 : let config_ref = &config;
224 0 : match node
225 0 : .with_client_retries(
226 0 : |client| async move {
227 0 : let config = config_ref.clone();
228 0 : client
229 0 : .location_config(tenant_shard_id, config.clone(), flush_ms, lazy)
230 0 : .await
231 0 : },
232 0 : &self.service_config.jwt_token,
233 0 : 1,
234 0 : 3,
235 0 : timeout,
236 0 : &self.cancel,
237 0 : )
238 0 : .await
239 : {
240 0 : Some(Ok(_)) => {}
241 0 : Some(Err(e)) => return Err(e.into()),
242 0 : None => return Err(ReconcileError::Cancel),
243 : };
244 0 : tracing::info!("location_config({node}) complete: {:?}", config);
245 :
246 0 : match config.mode {
247 0 : LocationConfigMode::Detached => {
248 0 : self.observed.locations.remove(&node.get_id());
249 0 : }
250 0 : _ => {
251 0 : self.observed
252 0 : .locations
253 0 : .insert(node.get_id(), ObservedStateLocation { conf: Some(config) });
254 0 : }
255 : }
256 :
257 0 : Ok(())
258 0 : }
259 :
260 0 : fn get_node(&self, node_id: &NodeId) -> Option<&Node> {
261 0 : if let Some(node) = self.intent.attached.as_ref() {
262 0 : if node.get_id() == *node_id {
263 0 : return Some(node);
264 0 : }
265 0 : }
266 :
267 0 : if let Some(node) = self
268 0 : .intent
269 0 : .secondary
270 0 : .iter()
271 0 : .find(|n| n.get_id() == *node_id)
272 : {
273 0 : return Some(node);
274 0 : }
275 :
276 0 : if let Some(node) = self.detach.iter().find(|n| n.get_id() == *node_id) {
277 0 : return Some(node);
278 0 : }
279 0 :
280 0 : None
281 0 : }
282 :
283 0 : async fn maybe_live_migrate(&mut self) -> Result<(), ReconcileError> {
284 0 : let destination = if let Some(node) = &self.intent.attached {
285 0 : match self.observed.locations.get(&node.get_id()) {
286 0 : Some(conf) => {
287 : // We will do a live migration only if the intended destination is not
288 : // currently in an attached state.
289 0 : match &conf.conf {
290 0 : Some(conf) if conf.mode == LocationConfigMode::Secondary => {
291 0 : // Fall through to do a live migration
292 0 : node
293 : }
294 : None | Some(_) => {
295 : // Attached or uncertain: don't do a live migration, proceed
296 : // with a general-case reconciliation
297 0 : tracing::info!("maybe_live_migrate: destination is None or attached");
298 0 : return Ok(());
299 : }
300 : }
301 : }
302 : None => {
303 : // Our destination is not attached: maybe live migrate if some other
304 : // node is currently attached. Fall through.
305 0 : node
306 : }
307 : }
308 : } else {
309 : // No intent to be attached
310 0 : tracing::info!("maybe_live_migrate: no attached intent");
311 0 : return Ok(());
312 : };
313 :
314 0 : let mut origin = None;
315 0 : for (node_id, state) in &self.observed.locations {
316 0 : if let Some(observed_conf) = &state.conf {
317 0 : if observed_conf.mode == LocationConfigMode::AttachedSingle {
318 : // We will only attempt live migration if the origin is not offline: this
319 : // avoids trying to do it while reconciling after responding to an HA failover.
320 0 : if let Some(node) = self.get_node(node_id) {
321 0 : if node.is_available() {
322 0 : origin = Some(node.clone());
323 0 : break;
324 0 : }
325 0 : }
326 0 : }
327 0 : }
328 : }
329 :
330 0 : let Some(origin) = origin else {
331 0 : tracing::info!("maybe_live_migrate: no origin found");
332 0 : return Ok(());
333 : };
334 :
335 : // We have an origin and a destination: proceed to do the live migration
336 0 : tracing::info!("Live migrating {}->{}", origin, destination);
337 0 : self.live_migrate(origin, destination.clone()).await?;
338 :
339 0 : Ok(())
340 0 : }
341 :
342 0 : async fn get_lsns(
343 0 : &self,
344 0 : tenant_shard_id: TenantShardId,
345 0 : node: &Node,
346 0 : ) -> anyhow::Result<HashMap<TimelineId, Lsn>> {
347 0 : let client = PageserverClient::new(
348 0 : node.get_id(),
349 0 : node.base_url(),
350 0 : self.service_config.jwt_token.as_deref(),
351 0 : );
352 :
353 0 : let timelines = client.timeline_list(&tenant_shard_id).await?;
354 0 : Ok(timelines
355 0 : .into_iter()
356 0 : .map(|t| (t.timeline_id, t.last_record_lsn))
357 0 : .collect())
358 0 : }
359 :
360 0 : async fn secondary_download(
361 0 : &self,
362 0 : tenant_shard_id: TenantShardId,
363 0 : node: &Node,
364 0 : ) -> Result<(), ReconcileError> {
365 0 : // This is not the timeout for a request, but the total amount of time we're willing to wait
366 0 : // for a secondary location to get up to date before
367 0 : let total_download_timeout = self.reconciler_config.get_secondary_warmup_timeout();
368 0 :
369 0 : // This the long-polling interval for the secondary download requests we send to destination pageserver
370 0 : // during a migration.
371 0 : let request_download_timeout = self
372 0 : .reconciler_config
373 0 : .get_secondary_download_request_timeout();
374 0 :
375 0 : let started_at = Instant::now();
376 :
377 : loop {
378 0 : let (status, progress) = match node
379 0 : .with_client_retries(
380 0 : |client| async move {
381 0 : client
382 0 : .tenant_secondary_download(
383 0 : tenant_shard_id,
384 0 : Some(request_download_timeout),
385 0 : )
386 0 : .await
387 0 : },
388 0 : &self.service_config.jwt_token,
389 0 : 1,
390 0 : 3,
391 0 : request_download_timeout * 2,
392 0 : &self.cancel,
393 0 : )
394 0 : .await
395 : {
396 0 : None => Err(ReconcileError::Cancel),
397 0 : Some(Ok(v)) => Ok(v),
398 0 : Some(Err(e)) => {
399 0 : // Give up, but proceed: it's unfortunate if we couldn't freshen the destination before
400 0 : // attaching, but we should not let an issue with a secondary location stop us proceeding
401 0 : // with a live migration.
402 0 : tracing::warn!("Failed to prepare by downloading layers on node {node}: {e})");
403 0 : return Ok(());
404 : }
405 0 : }?;
406 :
407 0 : if status == StatusCode::OK {
408 0 : tracing::info!(
409 0 : "Downloads to {} complete: {}/{} layers, {}/{} bytes",
410 : node,
411 : progress.layers_downloaded,
412 : progress.layers_total,
413 : progress.bytes_downloaded,
414 : progress.bytes_total
415 : );
416 0 : return Ok(());
417 0 : } else if status == StatusCode::ACCEPTED {
418 0 : let total_runtime = started_at.elapsed();
419 0 : if total_runtime > total_download_timeout {
420 0 : tracing::warn!("Timed out after {}ms downloading layers to {node}. Progress so far: {}/{} layers, {}/{} bytes",
421 0 : total_runtime.as_millis(),
422 : progress.layers_downloaded,
423 : progress.layers_total,
424 : progress.bytes_downloaded,
425 : progress.bytes_total
426 : );
427 : // Give up, but proceed: an incompletely warmed destination doesn't prevent migration working,
428 : // it just makes the I/O performance for users less good.
429 0 : return Ok(());
430 0 : }
431 0 :
432 0 : // Log and proceed around the loop to retry. We don't sleep between requests, because our HTTP call
433 0 : // to the pageserver is a long-poll.
434 0 : tracing::info!(
435 0 : "Downloads to {} not yet complete: {}/{} layers, {}/{} bytes",
436 : node,
437 : progress.layers_downloaded,
438 : progress.layers_total,
439 : progress.bytes_downloaded,
440 : progress.bytes_total
441 : );
442 0 : }
443 : }
444 0 : }
445 :
446 0 : async fn await_lsn(
447 0 : &self,
448 0 : tenant_shard_id: TenantShardId,
449 0 : node: &Node,
450 0 : baseline: HashMap<TimelineId, Lsn>,
451 0 : ) -> anyhow::Result<()> {
452 : loop {
453 0 : let latest = match self.get_lsns(tenant_shard_id, node).await {
454 0 : Ok(l) => l,
455 0 : Err(e) => {
456 0 : tracing::info!("🕑 Can't get LSNs on node {node} yet, waiting ({e})",);
457 0 : std::thread::sleep(Duration::from_millis(500));
458 0 : continue;
459 : }
460 : };
461 :
462 0 : let mut any_behind: bool = false;
463 0 : for (timeline_id, baseline_lsn) in &baseline {
464 0 : match latest.get(timeline_id) {
465 0 : Some(latest_lsn) => {
466 0 : tracing::info!("🕑 LSN origin {baseline_lsn} vs destination {latest_lsn}");
467 0 : if latest_lsn < baseline_lsn {
468 0 : any_behind = true;
469 0 : }
470 : }
471 0 : None => {
472 0 : // Expected timeline isn't yet visible on migration destination.
473 0 : // (IRL we would have to account for timeline deletion, but this
474 0 : // is just test helper)
475 0 : any_behind = true;
476 0 : }
477 : }
478 : }
479 :
480 0 : if !any_behind {
481 0 : tracing::info!("✅ LSN caught up. Proceeding...");
482 0 : break;
483 0 : } else {
484 0 : std::thread::sleep(Duration::from_millis(500));
485 0 : }
486 : }
487 :
488 0 : Ok(())
489 0 : }
490 :
491 0 : pub async fn live_migrate(
492 0 : &mut self,
493 0 : origin_ps: Node,
494 0 : dest_ps: Node,
495 0 : ) -> Result<(), ReconcileError> {
496 0 : // `maybe_live_migrate` is responsibble for sanity of inputs
497 0 : assert!(origin_ps.get_id() != dest_ps.get_id());
498 :
499 0 : fn build_location_config(
500 0 : shard: &ShardIdentity,
501 0 : config: &TenantConfig,
502 0 : mode: LocationConfigMode,
503 0 : generation: Option<Generation>,
504 0 : secondary_conf: Option<LocationConfigSecondary>,
505 0 : ) -> LocationConfig {
506 0 : LocationConfig {
507 0 : mode,
508 0 : generation: generation.map(|g| g.into().unwrap()),
509 0 : secondary_conf,
510 0 : tenant_conf: config.clone(),
511 0 : shard_number: shard.number.0,
512 0 : shard_count: shard.count.literal(),
513 0 : shard_stripe_size: shard.stripe_size.0,
514 0 : }
515 0 : }
516 :
517 0 : tracing::info!("🔁 Switching origin node {origin_ps} to stale mode",);
518 :
519 : // FIXME: it is incorrect to use self.generation here, we should use the generation
520 : // from the ObservedState of the origin pageserver (it might be older than self.generation)
521 0 : let stale_conf = build_location_config(
522 0 : &self.shard,
523 0 : &self.config,
524 0 : LocationConfigMode::AttachedStale,
525 0 : self.generation,
526 0 : None,
527 0 : );
528 0 : self.location_config(&origin_ps, stale_conf, Some(Duration::from_secs(10)), false)
529 0 : .await?;
530 :
531 0 : let baseline_lsns = Some(self.get_lsns(self.tenant_shard_id, &origin_ps).await?);
532 :
533 : // If we are migrating to a destination that has a secondary location, warm it up first
534 0 : if let Some(destination_conf) = self.observed.locations.get(&dest_ps.get_id()) {
535 0 : if let Some(destination_conf) = &destination_conf.conf {
536 0 : if destination_conf.mode == LocationConfigMode::Secondary {
537 0 : tracing::info!("🔁 Downloading latest layers to destination node {dest_ps}",);
538 0 : self.secondary_download(self.tenant_shard_id, &dest_ps)
539 0 : .await?;
540 0 : }
541 0 : }
542 0 : }
543 :
544 : // Increment generation before attaching to new pageserver
545 : self.generation = Some(
546 0 : self.persistence
547 0 : .increment_generation(self.tenant_shard_id, dest_ps.get_id())
548 0 : .await?,
549 : );
550 :
551 0 : let dest_conf = build_location_config(
552 0 : &self.shard,
553 0 : &self.config,
554 0 : LocationConfigMode::AttachedMulti,
555 0 : self.generation,
556 0 : None,
557 0 : );
558 0 :
559 0 : tracing::info!("🔁 Attaching to pageserver {dest_ps}");
560 0 : self.location_config(&dest_ps, dest_conf, None, false)
561 0 : .await?;
562 :
563 0 : if let Some(baseline) = baseline_lsns {
564 0 : tracing::info!("🕑 Waiting for LSN to catch up...");
565 0 : self.await_lsn(self.tenant_shard_id, &dest_ps, baseline)
566 0 : .await?;
567 0 : }
568 :
569 0 : tracing::info!("🔁 Notifying compute to use pageserver {dest_ps}");
570 :
571 : // During a live migration it is unhelpful to proceed if we couldn't notify compute: if we detach
572 : // the origin without notifying compute, we will render the tenant unavailable.
573 0 : let mut notify_attempts = 0;
574 0 : while let Err(e) = self.compute_notify().await {
575 0 : match e {
576 0 : NotifyError::Fatal(_) => return Err(ReconcileError::Notify(e)),
577 0 : NotifyError::ShuttingDown => return Err(ReconcileError::Cancel),
578 : _ => {
579 0 : tracing::warn!(
580 0 : "Live migration blocked by compute notification error, retrying: {e}"
581 : );
582 : }
583 : }
584 :
585 0 : exponential_backoff(
586 0 : notify_attempts,
587 0 : // Generous waits: control plane operations which might be blocking us usually complete on the order
588 0 : // of hundreds to thousands of milliseconds, so no point busy polling.
589 0 : 1.0,
590 0 : 10.0,
591 0 : &self.cancel,
592 0 : )
593 0 : .await;
594 0 : notify_attempts += 1;
595 : }
596 :
597 0 : pausable_failpoint!("reconciler-live-migrate-post-notify");
598 :
599 : // Downgrade the origin to secondary. If the tenant's policy is PlacementPolicy::Attached(0), then
600 : // this location will be deleted in the general case reconciliation that runs after this.
601 0 : let origin_secondary_conf = build_location_config(
602 0 : &self.shard,
603 0 : &self.config,
604 0 : LocationConfigMode::Secondary,
605 0 : None,
606 0 : Some(LocationConfigSecondary { warm: true }),
607 0 : );
608 0 : self.location_config(&origin_ps, origin_secondary_conf.clone(), None, false)
609 0 : .await?;
610 : // TODO: we should also be setting the ObservedState on earlier API calls, in case we fail
611 : // partway through. In fact, all location conf API calls should be in a wrapper that sets
612 : // the observed state to None, then runs, then sets it to what we wrote.
613 0 : self.observed.locations.insert(
614 0 : origin_ps.get_id(),
615 0 : ObservedStateLocation {
616 0 : conf: Some(origin_secondary_conf),
617 0 : },
618 0 : );
619 0 :
620 0 : tracing::info!("🔁 Switching to AttachedSingle mode on node {dest_ps}",);
621 0 : let dest_final_conf = build_location_config(
622 0 : &self.shard,
623 0 : &self.config,
624 0 : LocationConfigMode::AttachedSingle,
625 0 : self.generation,
626 0 : None,
627 0 : );
628 0 : self.location_config(&dest_ps, dest_final_conf.clone(), None, false)
629 0 : .await?;
630 0 : self.observed.locations.insert(
631 0 : dest_ps.get_id(),
632 0 : ObservedStateLocation {
633 0 : conf: Some(dest_final_conf),
634 0 : },
635 0 : );
636 0 :
637 0 : tracing::info!("✅ Migration complete");
638 :
639 0 : Ok(())
640 0 : }
641 :
642 0 : async fn maybe_refresh_observed(&mut self) -> Result<(), ReconcileError> {
643 : // If the attached node has uncertain state, read it from the pageserver before proceeding: this
644 : // is important to avoid spurious generation increments.
645 : //
646 : // We don't need to do this for secondary/detach locations because it's harmless to just PUT their
647 : // location conf, whereas for attached locations it can interrupt clients if we spuriously destroy/recreate
648 : // the `Timeline` object in the pageserver.
649 :
650 0 : let Some(attached_node) = self.intent.attached.as_ref() else {
651 : // Nothing to do
652 0 : return Ok(());
653 : };
654 :
655 0 : if matches!(
656 0 : self.observed.locations.get(&attached_node.get_id()),
657 : Some(ObservedStateLocation { conf: None })
658 : ) {
659 0 : let tenant_shard_id = self.tenant_shard_id;
660 0 : let observed_conf = match attached_node
661 0 : .with_client_retries(
662 0 : |client| async move { client.get_location_config(tenant_shard_id).await },
663 0 : &self.service_config.jwt_token,
664 0 : 1,
665 0 : 1,
666 0 : Duration::from_secs(5),
667 0 : &self.cancel,
668 0 : )
669 0 : .await
670 : {
671 0 : Some(Ok(observed)) => Some(observed),
672 0 : Some(Err(mgmt_api::Error::ApiError(status, _msg)))
673 0 : if status == StatusCode::NOT_FOUND =>
674 0 : {
675 0 : None
676 : }
677 0 : Some(Err(e)) => return Err(e.into()),
678 0 : None => return Err(ReconcileError::Cancel),
679 : };
680 0 : tracing::info!("Scanned location configuration on {attached_node}: {observed_conf:?}");
681 0 : match observed_conf {
682 0 : Some(conf) => {
683 0 : // Pageserver returned a state: update it in observed. This may still be an indeterminate (None) state,
684 0 : // if internally the pageserver's TenantSlot was being mutated (e.g. some long running API call is still running)
685 0 : self.observed
686 0 : .locations
687 0 : .insert(attached_node.get_id(), ObservedStateLocation { conf });
688 0 : }
689 0 : None => {
690 0 : // Pageserver returned 404: we have confirmation that there is no state for this shard on that pageserver.
691 0 : self.observed.locations.remove(&attached_node.get_id());
692 0 : }
693 : }
694 0 : }
695 :
696 0 : Ok(())
697 0 : }
698 :
699 : /// Reconciling a tenant makes API calls to pageservers until the observed state
700 : /// matches the intended state.
701 : ///
702 : /// First we apply special case handling (e.g. for live migrations), and then a
703 : /// general case reconciliation where we walk through the intent by pageserver
704 : /// and call out to the pageserver to apply the desired state.
705 0 : pub(crate) async fn reconcile(&mut self) -> Result<(), ReconcileError> {
706 0 : // Prepare: if we have uncertain `observed` state for our would-be attachement location, then refresh it
707 0 : self.maybe_refresh_observed().await?;
708 :
709 : // Special case: live migration
710 0 : self.maybe_live_migrate().await?;
711 :
712 : // If the attached pageserver is not attached, do so now.
713 0 : if let Some(node) = self.intent.attached.as_ref() {
714 : // If we are in an attached policy, then generation must have been set (null generations
715 : // are only present when a tenant is initially loaded with a secondary policy)
716 0 : debug_assert!(self.generation.is_some());
717 0 : let Some(generation) = self.generation else {
718 0 : return Err(ReconcileError::Other(anyhow::anyhow!(
719 0 : "Attempted to attach with NULL generation"
720 0 : )));
721 : };
722 :
723 0 : let mut wanted_conf = attached_location_conf(
724 0 : generation,
725 0 : &self.shard,
726 0 : &self.config,
727 0 : &self.placement_policy,
728 0 : );
729 0 : match self.observed.locations.get(&node.get_id()) {
730 0 : Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {
731 0 : // Nothing to do
732 0 : tracing::info!(node_id=%node.get_id(), "Observed configuration already correct.")
733 : }
734 0 : observed => {
735 : // In all cases other than a matching observed configuration, we will
736 : // reconcile this location. This includes locations with different configurations, as well
737 : // as locations with unknown (None) observed state.
738 :
739 : // Incrementing generation is the safe general case, but is inefficient for changes that only
740 : // modify some details (e.g. the tenant's config).
741 0 : let increment_generation = match observed {
742 0 : None => true,
743 0 : Some(ObservedStateLocation { conf: None }) => true,
744 : Some(ObservedStateLocation {
745 0 : conf: Some(observed),
746 0 : }) => {
747 0 : let generations_match = observed.generation == wanted_conf.generation;
748 0 :
749 0 : // We may skip incrementing the generation if the location is already in the expected mode and
750 0 : // generation. In principle it would also be safe to skip from certain other modes (e.g. AttachedStale),
751 0 : // but such states are handled inside `live_migrate`, and if we see that state here we're cleaning up
752 0 : // after a restart/crash, so fall back to the universally safe path of incrementing generation.
753 0 : !generations_match || (observed.mode != wanted_conf.mode)
754 : }
755 : };
756 :
757 0 : if increment_generation {
758 0 : let generation = self
759 0 : .persistence
760 0 : .increment_generation(self.tenant_shard_id, node.get_id())
761 0 : .await?;
762 0 : self.generation = Some(generation);
763 0 : wanted_conf.generation = generation.into();
764 0 : }
765 0 : tracing::info!(node_id=%node.get_id(), "Observed configuration requires update.");
766 :
767 : // Because `node` comes from a ref to &self, clone it before calling into a &mut self
768 : // function: this could be avoided by refactoring the state mutated by location_config into
769 : // a separate type to Self.
770 0 : let node = node.clone();
771 0 :
772 0 : // Use lazy=true, because we may run many of Self concurrently, and do not want to
773 0 : // overload the pageserver with logical size calculations.
774 0 : self.location_config(&node, wanted_conf, None, true).await?;
775 0 : self.compute_notify().await?;
776 : }
777 : }
778 0 : }
779 :
780 : // Configure secondary locations: if these were previously attached this
781 : // implicitly downgrades them from attached to secondary.
782 0 : let mut changes = Vec::new();
783 0 : for node in &self.intent.secondary {
784 0 : let wanted_conf = secondary_location_conf(&self.shard, &self.config);
785 0 : match self.observed.locations.get(&node.get_id()) {
786 0 : Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {
787 0 : // Nothing to do
788 0 : tracing::info!(node_id=%node.get_id(), "Observed configuration already correct.")
789 : }
790 : _ => {
791 : // In all cases other than a matching observed configuration, we will
792 : // reconcile this location.
793 0 : tracing::info!(node_id=%node.get_id(), "Observed configuration requires update.");
794 0 : changes.push((node.clone(), wanted_conf))
795 : }
796 : }
797 : }
798 :
799 : // Detach any extraneous pageservers that are no longer referenced
800 : // by our intent.
801 0 : for node in &self.detach {
802 0 : changes.push((
803 0 : node.clone(),
804 0 : LocationConfig {
805 0 : mode: LocationConfigMode::Detached,
806 0 : generation: None,
807 0 : secondary_conf: None,
808 0 : shard_number: self.shard.number.0,
809 0 : shard_count: self.shard.count.literal(),
810 0 : shard_stripe_size: self.shard.stripe_size.0,
811 0 : tenant_conf: self.config.clone(),
812 0 : },
813 0 : ));
814 0 : }
815 :
816 0 : for (node, conf) in changes {
817 0 : if self.cancel.is_cancelled() {
818 0 : return Err(ReconcileError::Cancel);
819 0 : }
820 0 : self.location_config(&node, conf, None, false).await?;
821 : }
822 :
823 : // The condition below identifies a detach. We must have no attached intent and
824 : // must have been attached to something previously. Pass this information to
825 : // the [`ComputeHook`] such that it can update its tenant-wide state.
826 0 : if self.intent.attached.is_none() && !self.detach.is_empty() {
827 0 : // TODO: Consider notifying control plane about detaches. This would avoid situations
828 0 : // where the compute tries to start-up with a stale set of pageservers.
829 0 : self.compute_hook
830 0 : .handle_detach(self.tenant_shard_id, self.shard.stripe_size);
831 0 : }
832 :
833 0 : failpoint_support::sleep_millis_async!("sleep-on-reconcile-epilogue");
834 :
835 0 : Ok(())
836 0 : }
837 :
838 0 : pub(crate) async fn compute_notify(&mut self) -> Result<(), NotifyError> {
839 : // Whenever a particular Reconciler emits a notification, it is always notifying for the intended
840 : // destination.
841 0 : if let Some(node) = &self.intent.attached {
842 0 : let result = self
843 0 : .compute_hook
844 0 : .notify(
845 0 : self.tenant_shard_id,
846 0 : node.get_id(),
847 0 : self.shard.stripe_size,
848 0 : &self.cancel,
849 0 : )
850 0 : .await;
851 0 : if let Err(e) = &result {
852 : // It is up to the caller whether they want to drop out on this error, but they don't have to:
853 : // in general we should avoid letting unavailability of the cloud control plane stop us from
854 : // making progress.
855 0 : if !matches!(e, NotifyError::ShuttingDown) {
856 0 : tracing::warn!("Failed to notify compute of attached pageserver {node}: {e}");
857 0 : }
858 :
859 : // Set this flag so that in our ReconcileResult we will set the flag on the shard that it
860 : // needs to retry at some point.
861 0 : self.compute_notify_failure = true;
862 0 : }
863 0 : result
864 : } else {
865 0 : Ok(())
866 : }
867 0 : }
868 : }
869 :
870 : /// We tweak the externally-set TenantConfig while configuring
871 : /// locations, using our awareness of whether secondary locations
872 : /// are in use to automatically enable/disable heatmap uploads.
873 0 : fn ha_aware_config(config: &TenantConfig, has_secondaries: bool) -> TenantConfig {
874 0 : let mut config = config.clone();
875 0 : if has_secondaries {
876 0 : if config.heatmap_period.is_none() {
877 0 : config.heatmap_period = Some(DEFAULT_HEATMAP_PERIOD.to_string());
878 0 : }
879 0 : } else {
880 0 : config.heatmap_period = None;
881 0 : }
882 0 : config
883 0 : }
884 :
885 0 : pub(crate) fn attached_location_conf(
886 0 : generation: Generation,
887 0 : shard: &ShardIdentity,
888 0 : config: &TenantConfig,
889 0 : policy: &PlacementPolicy,
890 0 : ) -> LocationConfig {
891 0 : let has_secondaries = match policy {
892 : PlacementPolicy::Attached(0) | PlacementPolicy::Detached | PlacementPolicy::Secondary => {
893 0 : false
894 : }
895 0 : PlacementPolicy::Attached(_) => true,
896 : };
897 :
898 0 : LocationConfig {
899 0 : mode: LocationConfigMode::AttachedSingle,
900 0 : generation: generation.into(),
901 0 : secondary_conf: None,
902 0 : shard_number: shard.number.0,
903 0 : shard_count: shard.count.literal(),
904 0 : shard_stripe_size: shard.stripe_size.0,
905 0 : tenant_conf: ha_aware_config(config, has_secondaries),
906 0 : }
907 0 : }
908 :
909 0 : pub(crate) fn secondary_location_conf(
910 0 : shard: &ShardIdentity,
911 0 : config: &TenantConfig,
912 0 : ) -> LocationConfig {
913 0 : LocationConfig {
914 0 : mode: LocationConfigMode::Secondary,
915 0 : generation: None,
916 0 : secondary_conf: Some(LocationConfigSecondary { warm: true }),
917 0 : shard_number: shard.number.0,
918 0 : shard_count: shard.count.literal(),
919 0 : shard_stripe_size: shard.stripe_size.0,
920 0 : tenant_conf: ha_aware_config(config, true),
921 0 : }
922 0 : }
|