LCOV - code coverage report
Current view: top level - storage_controller/src - reconciler.rs (source / functions) Coverage Total Hit
Test: a2f0f8a80fbf1089336086fa360ce27fa555cb1a.info Lines: 0.0 % 643 0
Test Date: 2024-11-20 17:59:39 Functions: 0.0 % 53 0

            Line data    Source code
       1              : use crate::pageserver_client::PageserverClient;
       2              : use crate::persistence::Persistence;
       3              : use crate::service;
       4              : use pageserver_api::controller_api::PlacementPolicy;
       5              : use pageserver_api::models::{
       6              :     LocationConfig, LocationConfigMode, LocationConfigSecondary, TenantConfig,
       7              : };
       8              : use pageserver_api::shard::{ShardIdentity, TenantShardId};
       9              : use pageserver_client::mgmt_api;
      10              : use reqwest::StatusCode;
      11              : use std::collections::HashMap;
      12              : use std::sync::Arc;
      13              : use std::time::{Duration, Instant};
      14              : use tokio_util::sync::CancellationToken;
      15              : use utils::backoff::exponential_backoff;
      16              : use utils::failpoint_support;
      17              : use utils::generation::Generation;
      18              : use utils::id::{NodeId, TimelineId};
      19              : use utils::lsn::Lsn;
      20              : use utils::pausable_failpoint;
      21              : use utils::sync::gate::GateGuard;
      22              : 
      23              : use crate::compute_hook::{ComputeHook, NotifyError};
      24              : use crate::node::Node;
      25              : use crate::tenant_shard::{IntentState, ObservedState, ObservedStateDelta, ObservedStateLocation};
      26              : 
      27              : const DEFAULT_HEATMAP_PERIOD: &str = "60s";
      28              : 
      29              : /// Object with the lifetime of the background reconcile task that is created
      30              : /// for tenants which have a difference between their intent and observed states.
      31              : pub(super) struct Reconciler {
      32              :     /// See [`crate::tenant_shard::TenantShard`] for the meanings of these fields: they are a snapshot
      33              :     /// of a tenant's state from when we spawned a reconcile task.
      34              :     pub(super) tenant_shard_id: TenantShardId,
      35              :     pub(crate) shard: ShardIdentity,
      36              :     pub(crate) placement_policy: PlacementPolicy,
      37              :     pub(crate) generation: Option<Generation>,
      38              :     pub(crate) intent: TargetState,
      39              : 
      40              :     /// Nodes not referenced by [`Self::intent`], from which we should try
      41              :     /// to detach this tenant shard.
      42              :     pub(crate) detach: Vec<Node>,
      43              : 
      44              :     /// Configuration specific to this reconciler
      45              :     pub(crate) reconciler_config: ReconcilerConfig,
      46              : 
      47              :     pub(crate) config: TenantConfig,
      48              : 
      49              :     /// Observed state from the point of view of the reconciler.
      50              :     /// This gets updated as the reconciliation makes progress.
      51              :     pub(crate) observed: ObservedState,
      52              : 
      53              :     /// Snapshot of the observed state at the point when the reconciler
      54              :     /// was spawned.
      55              :     pub(crate) original_observed: ObservedState,
      56              : 
      57              :     pub(crate) service_config: service::Config,
      58              : 
      59              :     /// A hook to notify the running postgres instances when we change the location
      60              :     /// of a tenant.  Use this via [`Self::compute_notify`] to update our failure flag
      61              :     /// and guarantee eventual retries.
      62              :     pub(crate) compute_hook: Arc<ComputeHook>,
      63              : 
      64              :     /// To avoid stalling if the cloud control plane is unavailable, we may proceed
      65              :     /// past failures in [`ComputeHook::notify`], but we _must_ remember that we failed
      66              :     /// so that we can set [`crate::tenant_shard::TenantShard::pending_compute_notification`] to ensure a later retry.
      67              :     pub(crate) compute_notify_failure: bool,
      68              : 
      69              :     /// Reconciler is responsible for keeping alive semaphore units that limit concurrency on how many
      70              :     /// we will spawn.
      71              :     pub(crate) _resource_units: ReconcileUnits,
      72              : 
      73              :     /// A means to abort background reconciliation: it is essential to
      74              :     /// call this when something changes in the original TenantShard that
      75              :     /// will make this reconciliation impossible or unnecessary, for
      76              :     /// example when a pageserver node goes offline, or the PlacementPolicy for
      77              :     /// the tenant is changed.
      78              :     pub(crate) cancel: CancellationToken,
      79              : 
      80              :     /// Reconcilers are registered with a Gate so that during a graceful shutdown we
      81              :     /// can wait for all the reconcilers to respond to their cancellation tokens.
      82              :     pub(crate) _gate_guard: GateGuard,
      83              : 
      84              :     /// Access to persistent storage for updating generation numbers
      85              :     pub(crate) persistence: Arc<Persistence>,
      86              : }
      87              : 
      88              : pub(crate) struct ReconcilerConfigBuilder {
      89              :     config: ReconcilerConfig,
      90              : }
      91              : 
      92              : impl ReconcilerConfigBuilder {
      93            0 :     pub(crate) fn new() -> Self {
      94            0 :         Self {
      95            0 :             config: ReconcilerConfig::default(),
      96            0 :         }
      97            0 :     }
      98              : 
      99            0 :     pub(crate) fn secondary_warmup_timeout(self, value: Duration) -> Self {
     100            0 :         Self {
     101            0 :             config: ReconcilerConfig {
     102            0 :                 secondary_warmup_timeout: Some(value),
     103            0 :                 ..self.config
     104            0 :             },
     105            0 :         }
     106            0 :     }
     107              : 
     108            0 :     pub(crate) fn secondary_download_request_timeout(self, value: Duration) -> Self {
     109            0 :         Self {
     110            0 :             config: ReconcilerConfig {
     111            0 :                 secondary_download_request_timeout: Some(value),
     112            0 :                 ..self.config
     113            0 :             },
     114            0 :         }
     115            0 :     }
     116              : 
     117            0 :     pub(crate) fn build(self) -> ReconcilerConfig {
     118            0 :         self.config
     119            0 :     }
     120              : }
     121              : 
     122              : #[derive(Default, Debug, Copy, Clone)]
     123              : pub(crate) struct ReconcilerConfig {
     124              :     // During live migration give up on warming-up the secondary
     125              :     // after this timeout.
     126              :     secondary_warmup_timeout: Option<Duration>,
     127              : 
     128              :     // During live migrations this is the amount of time that
     129              :     // the pagserver will hold our poll.
     130              :     secondary_download_request_timeout: Option<Duration>,
     131              : }
     132              : 
     133              : impl ReconcilerConfig {
     134            0 :     pub(crate) fn get_secondary_warmup_timeout(&self) -> Duration {
     135              :         const SECONDARY_WARMUP_TIMEOUT_DEFAULT: Duration = Duration::from_secs(300);
     136            0 :         self.secondary_warmup_timeout
     137            0 :             .unwrap_or(SECONDARY_WARMUP_TIMEOUT_DEFAULT)
     138            0 :     }
     139              : 
     140            0 :     pub(crate) fn get_secondary_download_request_timeout(&self) -> Duration {
     141              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT_DEFAULT: Duration = Duration::from_secs(20);
     142            0 :         self.secondary_download_request_timeout
     143            0 :             .unwrap_or(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT_DEFAULT)
     144            0 :     }
     145              : }
     146              : 
     147              : /// RAII resource units granted to a Reconciler, which it should keep alive until it finishes doing I/O
     148              : pub(crate) struct ReconcileUnits {
     149              :     _sem_units: tokio::sync::OwnedSemaphorePermit,
     150              : }
     151              : 
     152              : impl ReconcileUnits {
     153            0 :     pub(crate) fn new(sem_units: tokio::sync::OwnedSemaphorePermit) -> Self {
     154            0 :         Self {
     155            0 :             _sem_units: sem_units,
     156            0 :         }
     157            0 :     }
     158              : }
     159              : 
     160              : /// This is a snapshot of [`crate::tenant_shard::IntentState`], but it does not do any
     161              : /// reference counting for Scheduler.  The IntentState is what the scheduler works with,
     162              : /// and the TargetState is just the instruction for a particular Reconciler run.
     163              : #[derive(Debug)]
     164              : pub(crate) struct TargetState {
     165              :     pub(crate) attached: Option<Node>,
     166              :     pub(crate) secondary: Vec<Node>,
     167              : }
     168              : 
     169              : impl TargetState {
     170            0 :     pub(crate) fn from_intent(nodes: &HashMap<NodeId, Node>, intent: &IntentState) -> Self {
     171            0 :         Self {
     172            0 :             attached: intent.get_attached().map(|n| {
     173            0 :                 nodes
     174            0 :                     .get(&n)
     175            0 :                     .expect("Intent attached referenced non-existent node")
     176            0 :                     .clone()
     177            0 :             }),
     178            0 :             secondary: intent
     179            0 :                 .get_secondary()
     180            0 :                 .iter()
     181            0 :                 .map(|n| {
     182            0 :                     nodes
     183            0 :                         .get(n)
     184            0 :                         .expect("Intent secondary referenced non-existent node")
     185            0 :                         .clone()
     186            0 :                 })
     187            0 :                 .collect(),
     188            0 :         }
     189            0 :     }
     190              : }
     191              : 
     192            0 : #[derive(thiserror::Error, Debug)]
     193              : pub(crate) enum ReconcileError {
     194              :     #[error(transparent)]
     195              :     Remote(#[from] mgmt_api::Error),
     196              :     #[error(transparent)]
     197              :     Notify(#[from] NotifyError),
     198              :     #[error("Cancelled")]
     199              :     Cancel,
     200              :     #[error(transparent)]
     201              :     Other(#[from] anyhow::Error),
     202              : }
     203              : 
     204              : impl Reconciler {
     205            0 :     async fn location_config(
     206            0 :         &mut self,
     207            0 :         node: &Node,
     208            0 :         config: LocationConfig,
     209            0 :         flush_ms: Option<Duration>,
     210            0 :         lazy: bool,
     211            0 :     ) -> Result<(), ReconcileError> {
     212            0 :         if !node.is_available() && config.mode == LocationConfigMode::Detached {
     213              :             // Attempts to detach from offline nodes may be imitated without doing I/O: a node which is offline
     214              :             // will get fully reconciled wrt the shard's intent state when it is reactivated, irrespective of
     215              :             // what we put into `observed`, in [`crate::service::Service::node_activate_reconcile`]
     216            0 :             tracing::info!("Node {node} is unavailable during detach: proceeding anyway, it will be detached on next activation");
     217            0 :             self.observed.locations.remove(&node.get_id());
     218            0 :             return Ok(());
     219            0 :         }
     220            0 : 
     221            0 :         self.observed
     222            0 :             .locations
     223            0 :             .insert(node.get_id(), ObservedStateLocation { conf: None });
     224            0 : 
     225            0 :         // TODO: amend locations that use long-polling: they will hit this timeout.
     226            0 :         let timeout = Duration::from_secs(25);
     227            0 : 
     228            0 :         tracing::info!("location_config({node}) calling: {:?}", config);
     229            0 :         let tenant_shard_id = self.tenant_shard_id;
     230            0 :         let config_ref = &config;
     231            0 :         match node
     232            0 :             .with_client_retries(
     233            0 :                 |client| async move {
     234            0 :                     let config = config_ref.clone();
     235            0 :                     client
     236            0 :                         .location_config(tenant_shard_id, config.clone(), flush_ms, lazy)
     237            0 :                         .await
     238            0 :                 },
     239            0 :                 &self.service_config.jwt_token,
     240            0 :                 1,
     241            0 :                 3,
     242            0 :                 timeout,
     243            0 :                 &self.cancel,
     244            0 :             )
     245            0 :             .await
     246              :         {
     247            0 :             Some(Ok(_)) => {}
     248            0 :             Some(Err(e)) => return Err(e.into()),
     249            0 :             None => return Err(ReconcileError::Cancel),
     250              :         };
     251            0 :         tracing::info!("location_config({node}) complete: {:?}", config);
     252              : 
     253            0 :         match config.mode {
     254            0 :             LocationConfigMode::Detached => {
     255            0 :                 self.observed.locations.remove(&node.get_id());
     256            0 :             }
     257            0 :             _ => {
     258            0 :                 self.observed
     259            0 :                     .locations
     260            0 :                     .insert(node.get_id(), ObservedStateLocation { conf: Some(config) });
     261            0 :             }
     262              :         }
     263              : 
     264            0 :         Ok(())
     265            0 :     }
     266              : 
     267            0 :     fn get_node(&self, node_id: &NodeId) -> Option<&Node> {
     268            0 :         if let Some(node) = self.intent.attached.as_ref() {
     269            0 :             if node.get_id() == *node_id {
     270            0 :                 return Some(node);
     271            0 :             }
     272            0 :         }
     273              : 
     274            0 :         if let Some(node) = self
     275            0 :             .intent
     276            0 :             .secondary
     277            0 :             .iter()
     278            0 :             .find(|n| n.get_id() == *node_id)
     279              :         {
     280            0 :             return Some(node);
     281            0 :         }
     282              : 
     283            0 :         if let Some(node) = self.detach.iter().find(|n| n.get_id() == *node_id) {
     284            0 :             return Some(node);
     285            0 :         }
     286            0 : 
     287            0 :         None
     288            0 :     }
     289              : 
     290            0 :     async fn maybe_live_migrate(&mut self) -> Result<(), ReconcileError> {
     291            0 :         let destination = if let Some(node) = &self.intent.attached {
     292            0 :             match self.observed.locations.get(&node.get_id()) {
     293            0 :                 Some(conf) => {
     294              :                     // We will do a live migration only if the intended destination is not
     295              :                     // currently in an attached state.
     296            0 :                     match &conf.conf {
     297            0 :                         Some(conf) if conf.mode == LocationConfigMode::Secondary => {
     298            0 :                             // Fall through to do a live migration
     299            0 :                             node
     300              :                         }
     301              :                         None | Some(_) => {
     302              :                             // Attached or uncertain: don't do a live migration, proceed
     303              :                             // with a general-case reconciliation
     304            0 :                             tracing::info!("maybe_live_migrate: destination is None or attached");
     305            0 :                             return Ok(());
     306              :                         }
     307              :                     }
     308              :                 }
     309              :                 None => {
     310              :                     // Our destination is not attached: maybe live migrate if some other
     311              :                     // node is currently attached.  Fall through.
     312            0 :                     node
     313              :                 }
     314              :             }
     315              :         } else {
     316              :             // No intent to be attached
     317            0 :             tracing::info!("maybe_live_migrate: no attached intent");
     318            0 :             return Ok(());
     319              :         };
     320              : 
     321            0 :         let mut origin = None;
     322            0 :         for (node_id, state) in &self.observed.locations {
     323            0 :             if let Some(observed_conf) = &state.conf {
     324            0 :                 if observed_conf.mode == LocationConfigMode::AttachedSingle {
     325              :                     // We will only attempt live migration if the origin is not offline: this
     326              :                     // avoids trying to do it while reconciling after responding to an HA failover.
     327            0 :                     if let Some(node) = self.get_node(node_id) {
     328            0 :                         if node.is_available() {
     329            0 :                             origin = Some(node.clone());
     330            0 :                             break;
     331            0 :                         }
     332            0 :                     }
     333            0 :                 }
     334            0 :             }
     335              :         }
     336              : 
     337            0 :         let Some(origin) = origin else {
     338            0 :             tracing::info!("maybe_live_migrate: no origin found");
     339            0 :             return Ok(());
     340              :         };
     341              : 
     342              :         // We have an origin and a destination: proceed to do the live migration
     343            0 :         tracing::info!("Live migrating {}->{}", origin, destination);
     344            0 :         self.live_migrate(origin, destination.clone()).await?;
     345              : 
     346            0 :         Ok(())
     347            0 :     }
     348              : 
     349            0 :     async fn get_lsns(
     350            0 :         &self,
     351            0 :         tenant_shard_id: TenantShardId,
     352            0 :         node: &Node,
     353            0 :     ) -> anyhow::Result<HashMap<TimelineId, Lsn>> {
     354            0 :         let client = PageserverClient::new(
     355            0 :             node.get_id(),
     356            0 :             node.base_url(),
     357            0 :             self.service_config.jwt_token.as_deref(),
     358            0 :         );
     359              : 
     360            0 :         let timelines = client.timeline_list(&tenant_shard_id).await?;
     361            0 :         Ok(timelines
     362            0 :             .into_iter()
     363            0 :             .map(|t| (t.timeline_id, t.last_record_lsn))
     364            0 :             .collect())
     365            0 :     }
     366              : 
     367            0 :     async fn secondary_download(
     368            0 :         &self,
     369            0 :         tenant_shard_id: TenantShardId,
     370            0 :         node: &Node,
     371            0 :     ) -> Result<(), ReconcileError> {
     372            0 :         // This is not the timeout for a request, but the total amount of time we're willing to wait
     373            0 :         // for a secondary location to get up to date before
     374            0 :         let total_download_timeout = self.reconciler_config.get_secondary_warmup_timeout();
     375            0 : 
     376            0 :         // This the long-polling interval for the secondary download requests we send to destination pageserver
     377            0 :         // during a migration.
     378            0 :         let request_download_timeout = self
     379            0 :             .reconciler_config
     380            0 :             .get_secondary_download_request_timeout();
     381            0 : 
     382            0 :         let started_at = Instant::now();
     383              : 
     384              :         loop {
     385            0 :             let (status, progress) = match node
     386            0 :                 .with_client_retries(
     387            0 :                     |client| async move {
     388            0 :                         client
     389            0 :                             .tenant_secondary_download(
     390            0 :                                 tenant_shard_id,
     391            0 :                                 Some(request_download_timeout),
     392            0 :                             )
     393            0 :                             .await
     394            0 :                     },
     395            0 :                     &self.service_config.jwt_token,
     396            0 :                     1,
     397            0 :                     3,
     398            0 :                     request_download_timeout * 2,
     399            0 :                     &self.cancel,
     400            0 :                 )
     401            0 :                 .await
     402              :             {
     403            0 :                 None => Err(ReconcileError::Cancel),
     404            0 :                 Some(Ok(v)) => Ok(v),
     405            0 :                 Some(Err(e)) => {
     406            0 :                     // Give up, but proceed: it's unfortunate if we couldn't freshen the destination before
     407            0 :                     // attaching, but we should not let an issue with a secondary location stop us proceeding
     408            0 :                     // with a live migration.
     409            0 :                     tracing::warn!("Failed to prepare by downloading layers on node {node}: {e})");
     410            0 :                     return Ok(());
     411              :                 }
     412            0 :             }?;
     413              : 
     414            0 :             if status == StatusCode::OK {
     415            0 :                 tracing::info!(
     416            0 :                     "Downloads to {} complete: {}/{} layers, {}/{} bytes",
     417              :                     node,
     418              :                     progress.layers_downloaded,
     419              :                     progress.layers_total,
     420              :                     progress.bytes_downloaded,
     421              :                     progress.bytes_total
     422              :                 );
     423            0 :                 return Ok(());
     424            0 :             } else if status == StatusCode::ACCEPTED {
     425            0 :                 let total_runtime = started_at.elapsed();
     426            0 :                 if total_runtime > total_download_timeout {
     427            0 :                     tracing::warn!("Timed out after {}ms downloading layers to {node}.  Progress so far: {}/{} layers, {}/{} bytes",
     428            0 :                         total_runtime.as_millis(),
     429              :                         progress.layers_downloaded,
     430              :                         progress.layers_total,
     431              :                         progress.bytes_downloaded,
     432              :                         progress.bytes_total
     433              :                     );
     434              :                     // Give up, but proceed: an incompletely warmed destination doesn't prevent migration working,
     435              :                     // it just makes the I/O performance for users less good.
     436            0 :                     return Ok(());
     437            0 :                 }
     438            0 : 
     439            0 :                 // Log and proceed around the loop to retry.  We don't sleep between requests, because our HTTP call
     440            0 :                 // to the pageserver is a long-poll.
     441            0 :                 tracing::info!(
     442            0 :                     "Downloads to {} not yet complete: {}/{} layers, {}/{} bytes",
     443              :                     node,
     444              :                     progress.layers_downloaded,
     445              :                     progress.layers_total,
     446              :                     progress.bytes_downloaded,
     447              :                     progress.bytes_total
     448              :                 );
     449            0 :             }
     450              :         }
     451            0 :     }
     452              : 
     453              :     /// This function does _not_ mutate any state, so it is cancellation safe.
     454              :     ///
     455              :     /// This function does not respect [`Self::cancel`], callers should handle that.
     456            0 :     async fn await_lsn(
     457            0 :         &self,
     458            0 :         tenant_shard_id: TenantShardId,
     459            0 :         node: &Node,
     460            0 :         baseline: HashMap<TimelineId, Lsn>,
     461            0 :     ) -> anyhow::Result<()> {
     462              :         loop {
     463            0 :             let latest = match self.get_lsns(tenant_shard_id, node).await {
     464            0 :                 Ok(l) => l,
     465            0 :                 Err(e) => {
     466            0 :                     tracing::info!("🕑 Can't get LSNs on node {node} yet, waiting ({e})",);
     467            0 :                     tokio::time::sleep(Duration::from_millis(500)).await;
     468            0 :                     continue;
     469              :                 }
     470              :             };
     471              : 
     472            0 :             let mut any_behind: bool = false;
     473            0 :             for (timeline_id, baseline_lsn) in &baseline {
     474            0 :                 match latest.get(timeline_id) {
     475            0 :                     Some(latest_lsn) => {
     476            0 :                         tracing::info!(timeline_id = %timeline_id, "🕑 LSN origin {baseline_lsn} vs destination {latest_lsn}");
     477            0 :                         if latest_lsn < baseline_lsn {
     478            0 :                             any_behind = true;
     479            0 :                         }
     480              :                     }
     481            0 :                     None => {
     482            0 :                         // Timeline was deleted in the meantime - ignore it
     483            0 :                     }
     484              :                 }
     485              :             }
     486              : 
     487            0 :             if !any_behind {
     488            0 :                 tracing::info!("✅ LSN caught up.  Proceeding...");
     489            0 :                 break;
     490              :             } else {
     491            0 :                 tokio::time::sleep(Duration::from_millis(500)).await;
     492              :             }
     493              :         }
     494              : 
     495            0 :         Ok(())
     496            0 :     }
     497              : 
     498            0 :     pub async fn live_migrate(
     499            0 :         &mut self,
     500            0 :         origin_ps: Node,
     501            0 :         dest_ps: Node,
     502            0 :     ) -> Result<(), ReconcileError> {
     503            0 :         // `maybe_live_migrate` is responsibble for sanity of inputs
     504            0 :         assert!(origin_ps.get_id() != dest_ps.get_id());
     505              : 
     506            0 :         fn build_location_config(
     507            0 :             shard: &ShardIdentity,
     508            0 :             config: &TenantConfig,
     509            0 :             mode: LocationConfigMode,
     510            0 :             generation: Option<Generation>,
     511            0 :             secondary_conf: Option<LocationConfigSecondary>,
     512            0 :         ) -> LocationConfig {
     513            0 :             LocationConfig {
     514            0 :                 mode,
     515            0 :                 generation: generation.map(|g| g.into().unwrap()),
     516            0 :                 secondary_conf,
     517            0 :                 tenant_conf: config.clone(),
     518            0 :                 shard_number: shard.number.0,
     519            0 :                 shard_count: shard.count.literal(),
     520            0 :                 shard_stripe_size: shard.stripe_size.0,
     521            0 :             }
     522            0 :         }
     523              : 
     524            0 :         tracing::info!("🔁 Switching origin node {origin_ps} to stale mode",);
     525              : 
     526              :         // FIXME: it is incorrect to use self.generation here, we should use the generation
     527              :         // from the ObservedState of the origin pageserver (it might be older than self.generation)
     528            0 :         let stale_conf = build_location_config(
     529            0 :             &self.shard,
     530            0 :             &self.config,
     531            0 :             LocationConfigMode::AttachedStale,
     532            0 :             self.generation,
     533            0 :             None,
     534            0 :         );
     535            0 :         self.location_config(&origin_ps, stale_conf, Some(Duration::from_secs(10)), false)
     536            0 :             .await?;
     537              : 
     538            0 :         let baseline_lsns = Some(self.get_lsns(self.tenant_shard_id, &origin_ps).await?);
     539              : 
     540              :         // If we are migrating to a destination that has a secondary location, warm it up first
     541            0 :         if let Some(destination_conf) = self.observed.locations.get(&dest_ps.get_id()) {
     542            0 :             if let Some(destination_conf) = &destination_conf.conf {
     543            0 :                 if destination_conf.mode == LocationConfigMode::Secondary {
     544            0 :                     tracing::info!("🔁 Downloading latest layers to destination node {dest_ps}",);
     545            0 :                     self.secondary_download(self.tenant_shard_id, &dest_ps)
     546            0 :                         .await?;
     547            0 :                 }
     548            0 :             }
     549            0 :         }
     550              : 
     551            0 :         pausable_failpoint!("reconciler-live-migrate-pre-generation-inc");
     552              : 
     553              :         // Increment generation before attaching to new pageserver
     554              :         self.generation = Some(
     555            0 :             self.persistence
     556            0 :                 .increment_generation(self.tenant_shard_id, dest_ps.get_id())
     557            0 :                 .await?,
     558              :         );
     559              : 
     560            0 :         let dest_conf = build_location_config(
     561            0 :             &self.shard,
     562            0 :             &self.config,
     563            0 :             LocationConfigMode::AttachedMulti,
     564            0 :             self.generation,
     565            0 :             None,
     566            0 :         );
     567            0 : 
     568            0 :         tracing::info!("🔁 Attaching to pageserver {dest_ps}");
     569            0 :         self.location_config(&dest_ps, dest_conf, None, false)
     570            0 :             .await?;
     571              : 
     572            0 :         pausable_failpoint!("reconciler-live-migrate-pre-await-lsn");
     573              : 
     574            0 :         if let Some(baseline) = baseline_lsns {
     575            0 :             tracing::info!("🕑 Waiting for LSN to catch up...");
     576            0 :             tokio::select! {
     577            0 :                 r = self.await_lsn(self.tenant_shard_id, &dest_ps, baseline) => {r?;}
     578            0 :                 _ = self.cancel.cancelled() => {return Err(ReconcileError::Cancel)}
     579              :             };
     580            0 :         }
     581              : 
     582            0 :         tracing::info!("🔁 Notifying compute to use pageserver {dest_ps}");
     583              : 
     584              :         // During a live migration it is unhelpful to proceed if we couldn't notify compute: if we detach
     585              :         // the origin without notifying compute, we will render the tenant unavailable.
     586            0 :         self.compute_notify_blocking(&origin_ps).await?;
     587            0 :         pausable_failpoint!("reconciler-live-migrate-post-notify");
     588              : 
     589              :         // Downgrade the origin to secondary.  If the tenant's policy is PlacementPolicy::Attached(0), then
     590              :         // this location will be deleted in the general case reconciliation that runs after this.
     591            0 :         let origin_secondary_conf = build_location_config(
     592            0 :             &self.shard,
     593            0 :             &self.config,
     594            0 :             LocationConfigMode::Secondary,
     595            0 :             None,
     596            0 :             Some(LocationConfigSecondary { warm: true }),
     597            0 :         );
     598            0 :         self.location_config(&origin_ps, origin_secondary_conf.clone(), None, false)
     599            0 :             .await?;
     600              :         // TODO: we should also be setting the ObservedState on earlier API calls, in case we fail
     601              :         // partway through.  In fact, all location conf API calls should be in a wrapper that sets
     602              :         // the observed state to None, then runs, then sets it to what we wrote.
     603            0 :         self.observed.locations.insert(
     604            0 :             origin_ps.get_id(),
     605            0 :             ObservedStateLocation {
     606            0 :                 conf: Some(origin_secondary_conf),
     607            0 :             },
     608            0 :         );
     609            0 : 
     610            0 :         pausable_failpoint!("reconciler-live-migrate-post-detach");
     611              : 
     612            0 :         tracing::info!("🔁 Switching to AttachedSingle mode on node {dest_ps}",);
     613            0 :         let dest_final_conf = build_location_config(
     614            0 :             &self.shard,
     615            0 :             &self.config,
     616            0 :             LocationConfigMode::AttachedSingle,
     617            0 :             self.generation,
     618            0 :             None,
     619            0 :         );
     620            0 :         self.location_config(&dest_ps, dest_final_conf.clone(), None, false)
     621            0 :             .await?;
     622            0 :         self.observed.locations.insert(
     623            0 :             dest_ps.get_id(),
     624            0 :             ObservedStateLocation {
     625            0 :                 conf: Some(dest_final_conf),
     626            0 :             },
     627            0 :         );
     628            0 : 
     629            0 :         tracing::info!("✅ Migration complete");
     630              : 
     631            0 :         Ok(())
     632            0 :     }
     633              : 
     634            0 :     async fn maybe_refresh_observed(&mut self) -> Result<(), ReconcileError> {
     635              :         // If the attached node has uncertain state, read it from the pageserver before proceeding: this
     636              :         // is important to avoid spurious generation increments.
     637              :         //
     638              :         // We don't need to do this for secondary/detach locations because it's harmless to just PUT their
     639              :         // location conf, whereas for attached locations it can interrupt clients if we spuriously destroy/recreate
     640              :         // the `Timeline` object in the pageserver.
     641              : 
     642            0 :         let Some(attached_node) = self.intent.attached.as_ref() else {
     643              :             // Nothing to do
     644            0 :             return Ok(());
     645              :         };
     646              : 
     647            0 :         if matches!(
     648            0 :             self.observed.locations.get(&attached_node.get_id()),
     649              :             Some(ObservedStateLocation { conf: None })
     650              :         ) {
     651            0 :             let tenant_shard_id = self.tenant_shard_id;
     652            0 :             let observed_conf = match attached_node
     653            0 :                 .with_client_retries(
     654            0 :                     |client| async move { client.get_location_config(tenant_shard_id).await },
     655            0 :                     &self.service_config.jwt_token,
     656            0 :                     1,
     657            0 :                     1,
     658            0 :                     Duration::from_secs(5),
     659            0 :                     &self.cancel,
     660            0 :                 )
     661            0 :                 .await
     662              :             {
     663            0 :                 Some(Ok(observed)) => Some(observed),
     664            0 :                 Some(Err(mgmt_api::Error::ApiError(status, _msg)))
     665            0 :                     if status == StatusCode::NOT_FOUND =>
     666            0 :                 {
     667            0 :                     None
     668              :                 }
     669            0 :                 Some(Err(e)) => return Err(e.into()),
     670            0 :                 None => return Err(ReconcileError::Cancel),
     671              :             };
     672            0 :             tracing::info!("Scanned location configuration on {attached_node}: {observed_conf:?}");
     673            0 :             match observed_conf {
     674            0 :                 Some(conf) => {
     675            0 :                     // Pageserver returned a state: update it in observed.  This may still be an indeterminate (None) state,
     676            0 :                     // if internally the pageserver's TenantSlot was being mutated (e.g. some long running API call is still running)
     677            0 :                     self.observed
     678            0 :                         .locations
     679            0 :                         .insert(attached_node.get_id(), ObservedStateLocation { conf });
     680            0 :                 }
     681            0 :                 None => {
     682            0 :                     // Pageserver returned 404: we have confirmation that there is no state for this shard on that pageserver.
     683            0 :                     self.observed.locations.remove(&attached_node.get_id());
     684            0 :                 }
     685              :             }
     686            0 :         }
     687              : 
     688            0 :         Ok(())
     689            0 :     }
     690              : 
     691              :     /// Reconciling a tenant makes API calls to pageservers until the observed state
     692              :     /// matches the intended state.
     693              :     ///
     694              :     /// First we apply special case handling (e.g. for live migrations), and then a
     695              :     /// general case reconciliation where we walk through the intent by pageserver
     696              :     /// and call out to the pageserver to apply the desired state.
     697            0 :     pub(crate) async fn reconcile(&mut self) -> Result<(), ReconcileError> {
     698            0 :         // Prepare: if we have uncertain `observed` state for our would-be attachement location, then refresh it
     699            0 :         self.maybe_refresh_observed().await?;
     700              : 
     701              :         // Special case: live migration
     702            0 :         self.maybe_live_migrate().await?;
     703              : 
     704              :         // If the attached pageserver is not attached, do so now.
     705            0 :         if let Some(node) = self.intent.attached.as_ref() {
     706              :             // If we are in an attached policy, then generation must have been set (null generations
     707              :             // are only present when a tenant is initially loaded with a secondary policy)
     708            0 :             debug_assert!(self.generation.is_some());
     709            0 :             let Some(generation) = self.generation else {
     710            0 :                 return Err(ReconcileError::Other(anyhow::anyhow!(
     711            0 :                     "Attempted to attach with NULL generation"
     712            0 :                 )));
     713              :             };
     714              : 
     715            0 :             let mut wanted_conf = attached_location_conf(
     716            0 :                 generation,
     717            0 :                 &self.shard,
     718            0 :                 &self.config,
     719            0 :                 &self.placement_policy,
     720            0 :             );
     721            0 :             match self.observed.locations.get(&node.get_id()) {
     722            0 :                 Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {
     723            0 :                     // Nothing to do
     724            0 :                     tracing::info!(node_id=%node.get_id(), "Observed configuration already correct.")
     725              :                 }
     726            0 :                 observed => {
     727              :                     // In all cases other than a matching observed configuration, we will
     728              :                     // reconcile this location.  This includes locations with different configurations, as well
     729              :                     // as locations with unknown (None) observed state.
     730              : 
     731              :                     // Incrementing generation is the safe general case, but is inefficient for changes that only
     732              :                     // modify some details (e.g. the tenant's config).
     733            0 :                     let increment_generation = match observed {
     734            0 :                         None => true,
     735            0 :                         Some(ObservedStateLocation { conf: None }) => true,
     736              :                         Some(ObservedStateLocation {
     737            0 :                             conf: Some(observed),
     738            0 :                         }) => {
     739            0 :                             let generations_match = observed.generation == wanted_conf.generation;
     740            0 : 
     741            0 :                             // We may skip incrementing the generation if the location is already in the expected mode and
     742            0 :                             // generation.  In principle it would also be safe to skip from certain other modes (e.g. AttachedStale),
     743            0 :                             // but such states are handled inside `live_migrate`, and if we see that state here we're cleaning up
     744            0 :                             // after a restart/crash, so fall back to the universally safe path of incrementing generation.
     745            0 :                             !generations_match || (observed.mode != wanted_conf.mode)
     746              :                         }
     747              :                     };
     748              : 
     749            0 :                     if increment_generation {
     750            0 :                         let generation = self
     751            0 :                             .persistence
     752            0 :                             .increment_generation(self.tenant_shard_id, node.get_id())
     753            0 :                             .await?;
     754            0 :                         self.generation = Some(generation);
     755            0 :                         wanted_conf.generation = generation.into();
     756            0 :                     }
     757            0 :                     tracing::info!(node_id=%node.get_id(), "Observed configuration requires update.");
     758              : 
     759              :                     // Because `node` comes from a ref to &self, clone it before calling into a &mut self
     760              :                     // function: this could be avoided by refactoring the state mutated by location_config into
     761              :                     // a separate type to Self.
     762            0 :                     let node = node.clone();
     763            0 : 
     764            0 :                     // Use lazy=true, because we may run many of Self concurrently, and do not want to
     765            0 :                     // overload the pageserver with logical size calculations.
     766            0 :                     self.location_config(&node, wanted_conf, None, true).await?;
     767            0 :                     self.compute_notify().await?;
     768              :                 }
     769              :             }
     770            0 :         }
     771              : 
     772              :         // Configure secondary locations: if these were previously attached this
     773              :         // implicitly downgrades them from attached to secondary.
     774            0 :         let mut changes = Vec::new();
     775            0 :         for node in &self.intent.secondary {
     776            0 :             let wanted_conf = secondary_location_conf(&self.shard, &self.config);
     777            0 :             match self.observed.locations.get(&node.get_id()) {
     778            0 :                 Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {
     779            0 :                     // Nothing to do
     780            0 :                     tracing::info!(node_id=%node.get_id(), "Observed configuration already correct.")
     781              :                 }
     782              :                 _ => {
     783              :                     // In all cases other than a matching observed configuration, we will
     784              :                     // reconcile this location.
     785            0 :                     tracing::info!(node_id=%node.get_id(), "Observed configuration requires update.");
     786            0 :                     changes.push((node.clone(), wanted_conf))
     787              :                 }
     788              :             }
     789              :         }
     790              : 
     791              :         // Detach any extraneous pageservers that are no longer referenced
     792              :         // by our intent.
     793            0 :         for node in &self.detach {
     794            0 :             changes.push((
     795            0 :                 node.clone(),
     796            0 :                 LocationConfig {
     797            0 :                     mode: LocationConfigMode::Detached,
     798            0 :                     generation: None,
     799            0 :                     secondary_conf: None,
     800            0 :                     shard_number: self.shard.number.0,
     801            0 :                     shard_count: self.shard.count.literal(),
     802            0 :                     shard_stripe_size: self.shard.stripe_size.0,
     803            0 :                     tenant_conf: self.config.clone(),
     804            0 :                 },
     805            0 :             ));
     806            0 :         }
     807              : 
     808            0 :         for (node, conf) in changes {
     809            0 :             if self.cancel.is_cancelled() {
     810            0 :                 return Err(ReconcileError::Cancel);
     811            0 :             }
     812            0 :             self.location_config(&node, conf, None, false).await?;
     813              :         }
     814              : 
     815              :         // The condition below identifies a detach. We must have no attached intent and
     816              :         // must have been attached to something previously. Pass this information to
     817              :         // the [`ComputeHook`] such that it can update its tenant-wide state.
     818            0 :         if self.intent.attached.is_none() && !self.detach.is_empty() {
     819            0 :             // TODO: Consider notifying control plane about detaches. This would avoid situations
     820            0 :             // where the compute tries to start-up with a stale set of pageservers.
     821            0 :             self.compute_hook
     822            0 :                 .handle_detach(self.tenant_shard_id, self.shard.stripe_size);
     823            0 :         }
     824              : 
     825            0 :         failpoint_support::sleep_millis_async!("sleep-on-reconcile-epilogue");
     826              : 
     827            0 :         Ok(())
     828            0 :     }
     829              : 
     830            0 :     pub(crate) async fn compute_notify(&mut self) -> Result<(), NotifyError> {
     831              :         // Whenever a particular Reconciler emits a notification, it is always notifying for the intended
     832              :         // destination.
     833            0 :         if let Some(node) = &self.intent.attached {
     834            0 :             let result = self
     835            0 :                 .compute_hook
     836            0 :                 .notify(
     837            0 :                     self.tenant_shard_id,
     838            0 :                     node.get_id(),
     839            0 :                     self.shard.stripe_size,
     840            0 :                     &self.cancel,
     841            0 :                 )
     842            0 :                 .await;
     843            0 :             if let Err(e) = &result {
     844              :                 // It is up to the caller whether they want to drop out on this error, but they don't have to:
     845              :                 // in general we should avoid letting unavailability of the cloud control plane stop us from
     846              :                 // making progress.
     847            0 :                 if !matches!(e, NotifyError::ShuttingDown) {
     848            0 :                     tracing::warn!("Failed to notify compute of attached pageserver {node}: {e}");
     849            0 :                 }
     850              : 
     851              :                 // Set this flag so that in our ReconcileResult we will set the flag on the shard that it
     852              :                 // needs to retry at some point.
     853            0 :                 self.compute_notify_failure = true;
     854            0 :             }
     855            0 :             result
     856              :         } else {
     857            0 :             Ok(())
     858              :         }
     859            0 :     }
     860              : 
     861              :     /// Compare the observed state snapshot from when the reconcile was created
     862              :     /// with the final observed state in order to generate observed state deltas.
     863            0 :     pub(crate) fn observed_deltas(&self) -> Vec<ObservedStateDelta> {
     864            0 :         let mut deltas = Vec::default();
     865              : 
     866            0 :         for (node_id, location) in &self.observed.locations {
     867            0 :             let previous_location = self.original_observed.locations.get(node_id);
     868            0 :             let do_upsert = match previous_location {
     869              :                 // Location config changed for node
     870            0 :                 Some(prev) if location.conf != prev.conf => true,
     871              :                 // New location config for node
     872            0 :                 None => true,
     873              :                 // Location config has not changed for node
     874            0 :                 _ => false,
     875              :             };
     876              : 
     877            0 :             if do_upsert {
     878            0 :                 deltas.push(ObservedStateDelta::Upsert(Box::new((
     879            0 :                     *node_id,
     880            0 :                     location.clone(),
     881            0 :                 ))));
     882            0 :             }
     883              :         }
     884              : 
     885            0 :         for node_id in self.original_observed.locations.keys() {
     886            0 :             if !self.observed.locations.contains_key(node_id) {
     887            0 :                 deltas.push(ObservedStateDelta::Delete(*node_id));
     888            0 :             }
     889              :         }
     890              : 
     891            0 :         deltas
     892            0 :     }
     893              : 
     894              :     /// Keep trying to notify the compute indefinitely, only dropping out if:
     895              :     /// - the node `origin` becomes unavailable -> Ok(())
     896              :     /// - the node `origin` no longer has our tenant shard attached -> Ok(())
     897              :     /// - our cancellation token fires -> Err(ReconcileError::Cancelled)
     898              :     ///
     899              :     /// This is used during live migration, where we do not wish to detach
     900              :     /// an origin location until the compute definitely knows about the new
     901              :     /// location.
     902              :     ///
     903              :     /// In cases where the origin node becomes unavailable, we return success, indicating
     904              :     /// to the caller that they should continue irrespective of whether the compute was notified,
     905              :     /// because the origin node is unusable anyway.  Notification will be retried later via the
     906              :     /// [`Self::compute_notify_failure`] flag.
     907            0 :     async fn compute_notify_blocking(&mut self, origin: &Node) -> Result<(), ReconcileError> {
     908            0 :         let mut notify_attempts = 0;
     909            0 :         while let Err(e) = self.compute_notify().await {
     910            0 :             match e {
     911            0 :                 NotifyError::Fatal(_) => return Err(ReconcileError::Notify(e)),
     912            0 :                 NotifyError::ShuttingDown => return Err(ReconcileError::Cancel),
     913              :                 _ => {
     914            0 :                     tracing::warn!(
     915            0 :                         "Live migration blocked by compute notification error, retrying: {e}"
     916              :                     );
     917              :                 }
     918              :             }
     919              : 
     920              :             // Did the origin pageserver become unavailable?
     921            0 :             if !origin.is_available() {
     922            0 :                 tracing::info!("Giving up on compute notification because {origin} is unavailable");
     923            0 :                 break;
     924            0 :             }
     925            0 : 
     926            0 :             // Does the origin pageserver still host the shard we are interested in?  We should only
     927            0 :             // continue waiting for compute notification to be acked if the old location is still usable.
     928            0 :             let tenant_shard_id = self.tenant_shard_id;
     929            0 :             match origin
     930            0 :                 .with_client_retries(
     931            0 :                     |client| async move { client.get_location_config(tenant_shard_id).await },
     932            0 :                     &self.service_config.jwt_token,
     933            0 :                     1,
     934            0 :                     3,
     935            0 :                     Duration::from_secs(5),
     936            0 :                     &self.cancel,
     937            0 :                 )
     938            0 :                 .await
     939              :             {
     940            0 :                 Some(Ok(Some(location_conf))) => {
     941            0 :                     if matches!(
     942            0 :                         location_conf.mode,
     943              :                         LocationConfigMode::AttachedMulti
     944              :                             | LocationConfigMode::AttachedSingle
     945              :                             | LocationConfigMode::AttachedStale
     946              :                     ) {
     947            0 :                         tracing::debug!(
     948            0 :                             "Still attached to {origin}, will wait & retry compute notification"
     949              :                         );
     950              :                     } else {
     951            0 :                         tracing::info!(
     952            0 :                             "Giving up on compute notification because {origin} is in state {:?}",
     953              :                             location_conf.mode
     954              :                         );
     955            0 :                         return Ok(());
     956              :                     }
     957              :                     // Fall through
     958              :                 }
     959              :                 Some(Ok(None)) => {
     960            0 :                     tracing::info!(
     961            0 :                         "No longer attached to {origin}, giving up on compute notification"
     962              :                     );
     963            0 :                     return Ok(());
     964              :                 }
     965            0 :                 Some(Err(e)) => {
     966            0 :                     match e {
     967              :                         mgmt_api::Error::Cancelled => {
     968            0 :                             tracing::info!(
     969            0 :                                 "Giving up on compute notification because {origin} is unavailable"
     970              :                             );
     971            0 :                             return Ok(());
     972              :                         }
     973              :                         mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _) => {
     974            0 :                             tracing::info!(
     975            0 :                                 "No longer attached to {origin}, giving up on compute notification"
     976              :                             );
     977            0 :                             return Ok(());
     978              :                         }
     979            0 :                         e => {
     980            0 :                             // Other API errors are unexpected here.
     981            0 :                             tracing::warn!("Unexpected error checking location on {origin}: {e}");
     982              : 
     983              :                             // Fall through, we will retry compute notification.
     984              :                         }
     985              :                     }
     986              :                 }
     987            0 :                 None => return Err(ReconcileError::Cancel),
     988              :             };
     989              : 
     990            0 :             exponential_backoff(
     991            0 :                 notify_attempts,
     992            0 :                 // Generous waits: control plane operations which might be blocking us usually complete on the order
     993            0 :                 // of hundreds to thousands of milliseconds, so no point busy polling.
     994            0 :                 1.0,
     995            0 :                 10.0,
     996            0 :                 &self.cancel,
     997            0 :             )
     998            0 :             .await;
     999            0 :             notify_attempts += 1;
    1000              :         }
    1001              : 
    1002            0 :         Ok(())
    1003            0 :     }
    1004              : }
    1005              : 
    1006              : /// We tweak the externally-set TenantConfig while configuring
    1007              : /// locations, using our awareness of whether secondary locations
    1008              : /// are in use to automatically enable/disable heatmap uploads.
    1009            0 : fn ha_aware_config(config: &TenantConfig, has_secondaries: bool) -> TenantConfig {
    1010            0 :     let mut config = config.clone();
    1011            0 :     if has_secondaries {
    1012            0 :         if config.heatmap_period.is_none() {
    1013            0 :             config.heatmap_period = Some(DEFAULT_HEATMAP_PERIOD.to_string());
    1014            0 :         }
    1015            0 :     } else {
    1016            0 :         config.heatmap_period = None;
    1017            0 :     }
    1018            0 :     config
    1019            0 : }
    1020              : 
    1021            0 : pub(crate) fn attached_location_conf(
    1022            0 :     generation: Generation,
    1023            0 :     shard: &ShardIdentity,
    1024            0 :     config: &TenantConfig,
    1025            0 :     policy: &PlacementPolicy,
    1026            0 : ) -> LocationConfig {
    1027            0 :     let has_secondaries = match policy {
    1028              :         PlacementPolicy::Attached(0) | PlacementPolicy::Detached | PlacementPolicy::Secondary => {
    1029            0 :             false
    1030              :         }
    1031            0 :         PlacementPolicy::Attached(_) => true,
    1032              :     };
    1033              : 
    1034            0 :     LocationConfig {
    1035            0 :         mode: LocationConfigMode::AttachedSingle,
    1036            0 :         generation: generation.into(),
    1037            0 :         secondary_conf: None,
    1038            0 :         shard_number: shard.number.0,
    1039            0 :         shard_count: shard.count.literal(),
    1040            0 :         shard_stripe_size: shard.stripe_size.0,
    1041            0 :         tenant_conf: ha_aware_config(config, has_secondaries),
    1042            0 :     }
    1043            0 : }
    1044              : 
    1045            0 : pub(crate) fn secondary_location_conf(
    1046            0 :     shard: &ShardIdentity,
    1047            0 :     config: &TenantConfig,
    1048            0 : ) -> LocationConfig {
    1049            0 :     LocationConfig {
    1050            0 :         mode: LocationConfigMode::Secondary,
    1051            0 :         generation: None,
    1052            0 :         secondary_conf: Some(LocationConfigSecondary { warm: true }),
    1053            0 :         shard_number: shard.number.0,
    1054            0 :         shard_count: shard.count.literal(),
    1055            0 :         shard_stripe_size: shard.stripe_size.0,
    1056            0 :         tenant_conf: ha_aware_config(config, true),
    1057            0 :     }
    1058            0 : }
        

Generated by: LCOV version 2.1-beta