LCOV - code coverage report
Current view: top level - pageserver/src/tenant - mgr.rs (source / functions) Coverage Total Hit
Test: aca806cab4756d7eb6a304846130f4a73a5d5393.info Lines: 16.6 % 1276 212
Test Date: 2025-04-24 20:31:15 Functions: 14.5 % 117 17

            Line data    Source code
       1              : //! This module acts as a switchboard to access different repositories managed by this
       2              : //! page server.
       3              : 
       4              : use std::borrow::Cow;
       5              : use std::cmp::Ordering;
       6              : use std::collections::{BTreeMap, HashMap, HashSet};
       7              : use std::ops::Deref;
       8              : use std::sync::Arc;
       9              : use std::time::Duration;
      10              : 
      11              : use anyhow::Context;
      12              : use camino::{Utf8DirEntry, Utf8Path, Utf8PathBuf};
      13              : use futures::StreamExt;
      14              : use itertools::Itertools;
      15              : use once_cell::sync::Lazy;
      16              : use pageserver_api::key::Key;
      17              : use pageserver_api::models::{DetachBehavior, LocationConfigMode};
      18              : use pageserver_api::shard::{
      19              :     ShardCount, ShardIdentity, ShardIndex, ShardNumber, ShardStripeSize, TenantShardId,
      20              : };
      21              : use pageserver_api::upcall_api::ReAttachResponseTenant;
      22              : use rand::Rng;
      23              : use rand::distributions::Alphanumeric;
      24              : use remote_storage::TimeoutOrCancel;
      25              : use sysinfo::SystemExt;
      26              : use tokio::fs;
      27              : use tokio::task::JoinSet;
      28              : use tokio_util::sync::CancellationToken;
      29              : use tracing::*;
      30              : use utils::crashsafe::path_with_suffix_extension;
      31              : use utils::fs_ext::PathExt;
      32              : use utils::generation::Generation;
      33              : use utils::id::{TenantId, TimelineId};
      34              : use utils::{backoff, completion, crashsafe};
      35              : 
      36              : use super::remote_timeline_client::remote_tenant_path;
      37              : use super::secondary::SecondaryTenant;
      38              : use super::timeline::detach_ancestor::{self, PreparedTimelineDetach};
      39              : use super::{GlobalShutDown, TenantSharedResources};
      40              : use crate::config::PageServerConf;
      41              : use crate::context::{DownloadBehavior, RequestContext};
      42              : use crate::controller_upcall_client::{
      43              :     RetryForeverError, StorageControllerUpcallApi, StorageControllerUpcallClient,
      44              : };
      45              : use crate::deletion_queue::DeletionQueueClient;
      46              : use crate::http::routes::ACTIVE_TENANT_TIMEOUT;
      47              : use crate::metrics::{TENANT, TENANT_MANAGER as METRICS};
      48              : use crate::task_mgr::{BACKGROUND_RUNTIME, TaskKind};
      49              : use crate::tenant::config::{
      50              :     AttachedLocationConfig, AttachmentMode, LocationConf, LocationMode, SecondaryLocationConfig,
      51              : };
      52              : use crate::tenant::span::debug_assert_current_span_has_tenant_id;
      53              : use crate::tenant::storage_layer::inmemory_layer;
      54              : use crate::tenant::timeline::ShutdownMode;
      55              : use crate::tenant::{
      56              :     AttachedTenantConf, GcError, LoadConfigError, SpawnMode, TenantShard, TenantState,
      57              : };
      58              : use crate::virtual_file::MaybeFatalIo;
      59              : use crate::{InitializationOrder, TEMP_FILE_SUFFIX};
      60              : 
      61              : /// For a tenant that appears in TenantsMap, it may either be
      62              : /// - `Attached`: has a full Tenant object, is elegible to service
      63              : ///   reads and ingest WAL.
      64              : /// - `Secondary`: is only keeping a local cache warm.
      65              : ///
      66              : /// Secondary is a totally distinct state rather than being a mode of a `Tenant`, because
      67              : /// that way we avoid having to carefully switch a tenant's ingestion etc on and off during
      68              : /// its lifetime, and we can preserve some important safety invariants like `Tenant` always
      69              : /// having a properly acquired generation (Secondary doesn't need a generation)
      70              : #[derive(Clone)]
      71              : pub(crate) enum TenantSlot {
      72              :     Attached(Arc<TenantShard>),
      73              :     Secondary(Arc<SecondaryTenant>),
      74              :     /// In this state, other administrative operations acting on the TenantId should
      75              :     /// block, or return a retry indicator equivalent to HTTP 503.
      76              :     InProgress(utils::completion::Barrier),
      77              : }
      78              : 
      79              : impl std::fmt::Debug for TenantSlot {
      80            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      81            0 :         match self {
      82            0 :             Self::Attached(tenant) => write!(f, "Attached({})", tenant.current_state()),
      83            0 :             Self::Secondary(_) => write!(f, "Secondary"),
      84            0 :             Self::InProgress(_) => write!(f, "InProgress"),
      85              :         }
      86            0 :     }
      87              : }
      88              : 
      89              : impl TenantSlot {
      90              :     /// Return the `Tenant` in this slot if attached, else None
      91            0 :     fn get_attached(&self) -> Option<&Arc<TenantShard>> {
      92            0 :         match self {
      93            0 :             Self::Attached(t) => Some(t),
      94            0 :             Self::Secondary(_) => None,
      95            0 :             Self::InProgress(_) => None,
      96              :         }
      97            0 :     }
      98              : }
      99              : 
     100              : /// The tenants known to the pageserver.
     101              : /// The enum variants are used to distinguish the different states that the pageserver can be in.
     102              : pub(crate) enum TenantsMap {
     103              :     /// [`init_tenant_mgr`] is not done yet.
     104              :     Initializing,
     105              :     /// [`init_tenant_mgr`] is done, all on-disk tenants have been loaded.
     106              :     /// New tenants can be added using [`tenant_map_acquire_slot`].
     107              :     Open(BTreeMap<TenantShardId, TenantSlot>),
     108              :     /// The pageserver has entered shutdown mode via [`TenantManager::shutdown`].
     109              :     /// Existing tenants are still accessible, but no new tenants can be created.
     110              :     ShuttingDown(BTreeMap<TenantShardId, TenantSlot>),
     111              : }
     112              : 
     113              : /// When resolving a TenantId to a shard, we may be looking for the 0th
     114              : /// shard, or we might be looking for whichever shard holds a particular page.
     115              : #[derive(Copy, Clone)]
     116              : pub(crate) enum ShardSelector {
     117              :     /// Only return the 0th shard, if it is present.  If a non-0th shard is present,
     118              :     /// ignore it.
     119              :     Zero,
     120              :     /// Pick the shard that holds this key
     121              :     Page(Key),
     122              :     /// The shard ID is known: pick the given shard
     123              :     Known(ShardIndex),
     124              : }
     125              : 
     126              : /// A convenience for use with the re_attach ControllerUpcallClient function: rather
     127              : /// than the serializable struct, we build this enum that encapsulates
     128              : /// the invariant that attached tenants always have generations.
     129              : ///
     130              : /// This represents the subset of a LocationConfig that we receive during re-attach.
     131              : pub(crate) enum TenantStartupMode {
     132              :     Attached((AttachmentMode, Generation)),
     133              :     Secondary,
     134              : }
     135              : 
     136              : impl TenantStartupMode {
     137              :     /// Return the generation & mode that should be used when starting
     138              :     /// this tenant.
     139              :     ///
     140              :     /// If this returns None, the re-attach struct is in an invalid state and
     141              :     /// should be ignored in the response.
     142            0 :     fn from_reattach_tenant(rart: ReAttachResponseTenant) -> Option<Self> {
     143            0 :         match (rart.mode, rart.r#gen) {
     144            0 :             (LocationConfigMode::Detached, _) => None,
     145            0 :             (LocationConfigMode::Secondary, _) => Some(Self::Secondary),
     146            0 :             (LocationConfigMode::AttachedMulti, Some(g)) => {
     147            0 :                 Some(Self::Attached((AttachmentMode::Multi, Generation::new(g))))
     148              :             }
     149            0 :             (LocationConfigMode::AttachedSingle, Some(g)) => {
     150            0 :                 Some(Self::Attached((AttachmentMode::Single, Generation::new(g))))
     151              :             }
     152            0 :             (LocationConfigMode::AttachedStale, Some(g)) => {
     153            0 :                 Some(Self::Attached((AttachmentMode::Stale, Generation::new(g))))
     154              :             }
     155              :             _ => {
     156            0 :                 tracing::warn!(
     157            0 :                     "Received invalid re-attach state for tenant {}: {rart:?}",
     158              :                     rart.id
     159              :                 );
     160            0 :                 None
     161              :             }
     162              :         }
     163            0 :     }
     164              : }
     165              : 
     166              : /// Result type for looking up a TenantId to a specific shard
     167              : pub(crate) enum ShardResolveResult {
     168              :     NotFound,
     169              :     Found(Arc<TenantShard>),
     170              :     // Wait for this barrrier, then query again
     171              :     InProgress(utils::completion::Barrier),
     172              : }
     173              : 
     174              : impl TenantsMap {
     175              :     /// Convenience function for typical usage, where we want to get a `Tenant` object, for
     176              :     /// working with attached tenants.  If the TenantId is in the map but in Secondary state,
     177              :     /// None is returned.
     178            0 :     pub(crate) fn get(&self, tenant_shard_id: &TenantShardId) -> Option<&Arc<TenantShard>> {
     179            0 :         match self {
     180            0 :             TenantsMap::Initializing => None,
     181            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => {
     182            0 :                 m.get(tenant_shard_id).and_then(|slot| slot.get_attached())
     183              :             }
     184              :         }
     185            0 :     }
     186              : 
     187              :     #[cfg(all(debug_assertions, not(test)))]
     188            0 :     pub(crate) fn len(&self) -> usize {
     189            0 :         match self {
     190            0 :             TenantsMap::Initializing => 0,
     191            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => m.len(),
     192              :         }
     193            0 :     }
     194              : }
     195              : 
     196              : /// Precursor to deletion of a tenant dir: we do a fast rename to a tmp path, and then
     197              : /// the slower actual deletion in the background.
     198              : ///
     199              : /// This is "safe" in that that it won't leave behind a partially deleted directory
     200              : /// at the original path, because we rename with TEMP_FILE_SUFFIX before starting deleting
     201              : /// the contents.
     202              : ///
     203              : /// This is pageserver-specific, as it relies on future processes after a crash to check
     204              : /// for TEMP_FILE_SUFFIX when loading things.
     205            0 : async fn safe_rename_tenant_dir(path: impl AsRef<Utf8Path>) -> std::io::Result<Utf8PathBuf> {
     206            0 :     let parent = path
     207            0 :         .as_ref()
     208            0 :         .parent()
     209            0 :         // It is invalid to call this function with a relative path.  Tenant directories
     210            0 :         // should always have a parent.
     211            0 :         .ok_or(std::io::Error::new(
     212            0 :             std::io::ErrorKind::InvalidInput,
     213            0 :             "Path must be absolute",
     214            0 :         ))?;
     215            0 :     let rand_suffix = rand::thread_rng()
     216            0 :         .sample_iter(&Alphanumeric)
     217            0 :         .take(8)
     218            0 :         .map(char::from)
     219            0 :         .collect::<String>()
     220            0 :         + TEMP_FILE_SUFFIX;
     221            0 :     let tmp_path = path_with_suffix_extension(&path, &rand_suffix);
     222            0 :     fs::rename(path.as_ref(), &tmp_path).await?;
     223            0 :     fs::File::open(parent)
     224            0 :         .await?
     225            0 :         .sync_all()
     226            0 :         .await
     227            0 :         .maybe_fatal_err("safe_rename_tenant_dir")?;
     228            0 :     Ok(tmp_path)
     229            0 : }
     230              : 
     231              : /// See [`Self::spawn`].
     232              : #[derive(Clone, Default)]
     233              : pub struct BackgroundPurges(tokio_util::task::TaskTracker);
     234              : 
     235              : impl BackgroundPurges {
     236              :     /// When we have moved a tenant's content to a temporary directory, we may delete it lazily in
     237              :     /// the background, and thereby avoid blocking any API requests on this deletion completing.
     238              :     ///
     239              :     /// Although we are cleaning up the tenant, this task is not meant to be bound by the lifetime of the tenant in memory.
     240              :     /// Thus the [`BackgroundPurges`] type to keep track of these tasks.
     241            0 :     pub fn spawn(&self, tmp_path: Utf8PathBuf) {
     242            0 :         // because on shutdown we close and wait, we are misusing TaskTracker a bit.
     243            0 :         //
     244            0 :         // so first acquire a token, then check if the tracker has been closed. the tracker might get closed
     245            0 :         // right after, but at least the shutdown will wait for what we are spawning next.
     246            0 :         let token = self.0.token();
     247            0 : 
     248            0 :         if self.0.is_closed() {
     249            0 :             warn!(
     250              :                 %tmp_path,
     251            0 :                 "trying to spawn background purge during shutdown, ignoring"
     252              :             );
     253            0 :             return;
     254            0 :         }
     255              : 
     256            0 :         let span = info_span!(parent: None, "background_purge", %tmp_path);
     257              : 
     258            0 :         let task = move || {
     259            0 :             let _token = token;
     260            0 :             let _entered = span.entered();
     261            0 :             if let Err(error) = std::fs::remove_dir_all(tmp_path.as_path()) {
     262              :                 // should we fatal_io_error here?
     263            0 :                 warn!(%error, "failed to purge tenant directory");
     264            0 :             }
     265            0 :         };
     266              : 
     267            0 :         BACKGROUND_RUNTIME.spawn_blocking(task);
     268            0 :     }
     269              : 
     270              :     /// When this future completes, all background purges have completed.
     271              :     /// The first poll of the future will already lock out new background purges spawned via [`Self::spawn`].
     272              :     ///
     273              :     /// Concurrent calls will coalesce.
     274              :     ///
     275              :     /// # Cancellation-Safety
     276              :     ///
     277              :     /// If this future is dropped before polled to completion, concurrent and subsequent
     278              :     /// instances of this future will continue to be correct.
     279              :     #[instrument(skip_all)]
     280              :     pub async fn shutdown(&self) {
     281              :         // forbid new tasks (can be called many times)
     282              :         self.0.close();
     283              :         self.0.wait().await;
     284              :     }
     285              : }
     286              : 
     287              : static TENANTS: Lazy<std::sync::RwLock<TenantsMap>> =
     288           12 :     Lazy::new(|| std::sync::RwLock::new(TenantsMap::Initializing));
     289              : 
     290              : /// Responsible for storing and mutating the collection of all tenants
     291              : /// that this pageserver has state for.
     292              : ///
     293              : /// Every Tenant and SecondaryTenant instance lives inside the TenantManager.
     294              : ///
     295              : /// The most important role of the TenantManager is to prevent conflicts: e.g. trying to attach
     296              : /// the same tenant twice concurrently, or trying to configure the same tenant into secondary
     297              : /// and attached modes concurrently.
     298              : pub struct TenantManager {
     299              :     conf: &'static PageServerConf,
     300              :     // TODO: currently this is a &'static pointing to TENANTs.  When we finish refactoring
     301              :     // out of that static variable, the TenantManager can own this.
     302              :     // See https://github.com/neondatabase/neon/issues/5796
     303              :     tenants: &'static std::sync::RwLock<TenantsMap>,
     304              :     resources: TenantSharedResources,
     305              : 
     306              :     // Long-running operations that happen outside of a [`Tenant`] lifetime should respect this token.
     307              :     // This is for edge cases like tenant deletion.  In normal cases (within a Tenant lifetime),
     308              :     // tenants have their own cancellation tokens, which we fire individually in [`Self::shutdown`], or
     309              :     // when the tenant detaches.
     310              :     cancel: CancellationToken,
     311              : 
     312              :     background_purges: BackgroundPurges,
     313              : }
     314              : 
     315            0 : fn emergency_generations(
     316            0 :     tenant_confs: &HashMap<TenantShardId, Result<LocationConf, LoadConfigError>>,
     317            0 : ) -> HashMap<TenantShardId, TenantStartupMode> {
     318            0 :     tenant_confs
     319            0 :         .iter()
     320            0 :         .filter_map(|(tid, lc)| {
     321            0 :             let lc = match lc {
     322            0 :                 Ok(lc) => lc,
     323            0 :                 Err(_) => return None,
     324              :             };
     325              :             Some((
     326            0 :                 *tid,
     327            0 :                 match &lc.mode {
     328            0 :                     LocationMode::Attached(alc) => {
     329            0 :                         TenantStartupMode::Attached((alc.attach_mode, alc.generation))
     330              :                     }
     331            0 :                     LocationMode::Secondary(_) => TenantStartupMode::Secondary,
     332              :                 },
     333              :             ))
     334            0 :         })
     335            0 :         .collect()
     336            0 : }
     337              : 
     338            0 : async fn init_load_generations(
     339            0 :     conf: &'static PageServerConf,
     340            0 :     tenant_confs: &HashMap<TenantShardId, Result<LocationConf, LoadConfigError>>,
     341            0 :     resources: &TenantSharedResources,
     342            0 :     cancel: &CancellationToken,
     343            0 : ) -> anyhow::Result<Option<HashMap<TenantShardId, TenantStartupMode>>> {
     344            0 :     let generations = if conf.control_plane_emergency_mode {
     345            0 :         error!(
     346            0 :             "Emergency mode!  Tenants will be attached unsafely using their last known generation"
     347              :         );
     348            0 :         emergency_generations(tenant_confs)
     349            0 :     } else if let Some(client) = StorageControllerUpcallClient::new(conf, cancel)? {
     350            0 :         info!("Calling {} API to re-attach tenants", client.base_url());
     351              :         // If we are configured to use the control plane API, then it is the source of truth for what tenants to load.
     352            0 :         match client.re_attach(conf).await {
     353            0 :             Ok(tenants) => tenants
     354            0 :                 .into_iter()
     355            0 :                 .flat_map(|(id, rart)| {
     356            0 :                     TenantStartupMode::from_reattach_tenant(rart).map(|tsm| (id, tsm))
     357            0 :                 })
     358            0 :                 .collect(),
     359              :             Err(RetryForeverError::ShuttingDown) => {
     360            0 :                 anyhow::bail!("Shut down while waiting for control plane re-attach response")
     361              :             }
     362              :         }
     363              :     } else {
     364            0 :         info!("Control plane API not configured, tenant generations are disabled");
     365            0 :         return Ok(None);
     366              :     };
     367              : 
     368              :     // The deletion queue needs to know about the startup attachment state to decide which (if any) stored
     369              :     // deletion list entries may still be valid.  We provide that by pushing a recovery operation into
     370              :     // the queue. Sequential processing of te queue ensures that recovery is done before any new tenant deletions
     371              :     // are processed, even though we don't block on recovery completing here.
     372            0 :     let attached_tenants = generations
     373            0 :         .iter()
     374            0 :         .flat_map(|(id, start_mode)| {
     375            0 :             match start_mode {
     376            0 :                 TenantStartupMode::Attached((_mode, generation)) => Some(generation),
     377            0 :                 TenantStartupMode::Secondary => None,
     378              :             }
     379            0 :             .map(|gen_| (*id, *gen_))
     380            0 :         })
     381            0 :         .collect();
     382            0 :     resources.deletion_queue_client.recover(attached_tenants)?;
     383              : 
     384            0 :     Ok(Some(generations))
     385            0 : }
     386              : 
     387              : /// Given a directory discovered in the pageserver's tenants/ directory, attempt
     388              : /// to load a tenant config from it.
     389              : ///
     390              : /// If we cleaned up something expected (like an empty dir or a temp dir), return None.
     391            0 : fn load_tenant_config(
     392            0 :     conf: &'static PageServerConf,
     393            0 :     tenant_shard_id: TenantShardId,
     394            0 :     dentry: Utf8DirEntry,
     395            0 : ) -> Option<Result<LocationConf, LoadConfigError>> {
     396            0 :     let tenant_dir_path = dentry.path().to_path_buf();
     397            0 :     if crate::is_temporary(&tenant_dir_path) {
     398            0 :         info!("Found temporary tenant directory, removing: {tenant_dir_path}");
     399              :         // No need to use safe_remove_tenant_dir_all because this is already
     400              :         // a temporary path
     401            0 :         std::fs::remove_dir_all(&tenant_dir_path).fatal_err("delete temporary tenant dir");
     402            0 :         return None;
     403            0 :     }
     404            0 : 
     405            0 :     // This case happens if we crash during attachment before writing a config into the dir
     406            0 :     let is_empty = tenant_dir_path
     407            0 :         .is_empty_dir()
     408            0 :         .fatal_err("Checking for empty tenant dir");
     409            0 :     if is_empty {
     410            0 :         info!("removing empty tenant directory {tenant_dir_path:?}");
     411            0 :         std::fs::remove_dir(&tenant_dir_path).fatal_err("delete empty tenant dir");
     412            0 :         return None;
     413            0 :     }
     414            0 : 
     415            0 :     Some(TenantShard::load_tenant_config(conf, &tenant_shard_id))
     416            0 : }
     417              : 
     418              : /// Initial stage of load: walk the local tenants directory, clean up any temp files,
     419              : /// and load configurations for the tenants we found.
     420              : ///
     421              : /// Do this in parallel, because we expect 10k+ tenants, so serial execution can take
     422              : /// seconds even on reasonably fast drives.
     423            0 : async fn init_load_tenant_configs(
     424            0 :     conf: &'static PageServerConf,
     425            0 : ) -> HashMap<TenantShardId, Result<LocationConf, LoadConfigError>> {
     426            0 :     let tenants_dir = conf.tenants_path();
     427              : 
     428            0 :     let dentries = tokio::task::spawn_blocking(move || -> Vec<Utf8DirEntry> {
     429            0 :         let context = format!("read tenants dir {tenants_dir}");
     430            0 :         let dir_entries = tenants_dir.read_dir_utf8().fatal_err(&context);
     431            0 : 
     432            0 :         dir_entries
     433            0 :             .collect::<Result<Vec<_>, std::io::Error>>()
     434            0 :             .fatal_err(&context)
     435            0 :     })
     436            0 :     .await
     437            0 :     .expect("Config load task panicked");
     438            0 : 
     439            0 :     let mut configs = HashMap::new();
     440            0 : 
     441            0 :     let mut join_set = JoinSet::new();
     442            0 :     for dentry in dentries {
     443            0 :         let tenant_shard_id = match dentry.file_name().parse::<TenantShardId>() {
     444            0 :             Ok(id) => id,
     445              :             Err(_) => {
     446            0 :                 warn!(
     447            0 :                     "Invalid tenant path (garbage in our repo directory?): '{}'",
     448            0 :                     dentry.file_name()
     449              :                 );
     450            0 :                 continue;
     451              :             }
     452              :         };
     453              : 
     454            0 :         join_set.spawn_blocking(move || {
     455            0 :             (
     456            0 :                 tenant_shard_id,
     457            0 :                 load_tenant_config(conf, tenant_shard_id, dentry),
     458            0 :             )
     459            0 :         });
     460            0 :     }
     461              : 
     462            0 :     while let Some(r) = join_set.join_next().await {
     463            0 :         let (tenant_shard_id, tenant_config) = r.expect("Panic in config load task");
     464            0 :         if let Some(tenant_config) = tenant_config {
     465            0 :             configs.insert(tenant_shard_id, tenant_config);
     466            0 :         }
     467              :     }
     468              : 
     469            0 :     configs
     470            0 : }
     471              : 
     472              : #[derive(Debug, thiserror::Error)]
     473              : pub(crate) enum DeleteTenantError {
     474              :     #[error("Tenant map slot error {0}")]
     475              :     SlotError(#[from] TenantSlotError),
     476              : 
     477              :     #[error("Cancelled")]
     478              :     Cancelled,
     479              : 
     480              :     #[error(transparent)]
     481              :     Other(#[from] anyhow::Error),
     482              : }
     483              : 
     484              : /// Initialize repositories with locally available timelines.
     485              : /// Timelines that are only partially available locally (remote storage has more data than this pageserver)
     486              : /// are scheduled for download and added to the tenant once download is completed.
     487              : #[instrument(skip_all)]
     488              : pub async fn init_tenant_mgr(
     489              :     conf: &'static PageServerConf,
     490              :     background_purges: BackgroundPurges,
     491              :     resources: TenantSharedResources,
     492              :     init_order: InitializationOrder,
     493              :     cancel: CancellationToken,
     494              : ) -> anyhow::Result<TenantManager> {
     495              :     let mut tenants = BTreeMap::new();
     496              : 
     497              :     let ctx = RequestContext::todo_child(TaskKind::Startup, DownloadBehavior::Warn);
     498              : 
     499              :     // Initialize dynamic limits that depend on system resources
     500              :     let system_memory =
     501              :         sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_memory())
     502              :             .total_memory();
     503              :     let max_ephemeral_layer_bytes =
     504              :         conf.ephemeral_bytes_per_memory_kb as u64 * (system_memory / 1024);
     505              :     tracing::info!(
     506              :         "Initialized ephemeral layer size limit to {max_ephemeral_layer_bytes}, for {system_memory} bytes of memory"
     507              :     );
     508              :     inmemory_layer::GLOBAL_RESOURCES.max_dirty_bytes.store(
     509              :         max_ephemeral_layer_bytes,
     510              :         std::sync::atomic::Ordering::Relaxed,
     511              :     );
     512              : 
     513              :     // Scan local filesystem for attached tenants
     514              :     let tenant_configs = init_load_tenant_configs(conf).await;
     515              : 
     516              :     // Determine which tenants are to be secondary or attached, and in which generation
     517              :     let tenant_modes = init_load_generations(conf, &tenant_configs, &resources, &cancel).await?;
     518              : 
     519              :     tracing::info!(
     520              :         "Attaching {} tenants at startup, warming up {} at a time",
     521              :         tenant_configs.len(),
     522              :         conf.concurrent_tenant_warmup.initial_permits()
     523              :     );
     524              :     TENANT.startup_scheduled.inc_by(tenant_configs.len() as u64);
     525              : 
     526              :     // Accumulate futures for writing tenant configs, so that we can execute in parallel
     527              :     let mut config_write_futs = Vec::new();
     528              : 
     529              :     // Update the location configs according to the re-attach response and persist them to disk
     530              :     tracing::info!("Updating {} location configs", tenant_configs.len());
     531              :     for (tenant_shard_id, location_conf) in tenant_configs {
     532              :         let tenant_dir_path = conf.tenant_path(&tenant_shard_id);
     533              : 
     534              :         let mut location_conf = match location_conf {
     535              :             Ok(l) => l,
     536              :             Err(e) => {
     537              :                 // This should only happen in the case of a serialization bug or critical local I/O error: we cannot load this tenant
     538              :                 error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Failed to load tenant config, failed to {e:#}");
     539              :                 continue;
     540              :             }
     541              :         };
     542              : 
     543              :         // FIXME: if we were attached, and get demoted to secondary on re-attach, we
     544              :         // don't have a place to get a config.
     545              :         // (https://github.com/neondatabase/neon/issues/5377)
     546              :         const DEFAULT_SECONDARY_CONF: SecondaryLocationConfig =
     547              :             SecondaryLocationConfig { warm: true };
     548              : 
     549              :         if let Some(tenant_modes) = &tenant_modes {
     550              :             // We have a generation map: treat it as the authority for whether
     551              :             // this tenant is really attached.
     552              :             match tenant_modes.get(&tenant_shard_id) {
     553              :                 None => {
     554              :                     info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Detaching tenant, control plane omitted it in re-attach response");
     555              : 
     556              :                     match safe_rename_tenant_dir(&tenant_dir_path).await {
     557              :                         Ok(tmp_path) => {
     558              :                             background_purges.spawn(tmp_path);
     559              :                         }
     560              :                         Err(e) => {
     561              :                             error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
     562              :                             "Failed to move detached tenant directory '{tenant_dir_path}': {e:?}");
     563              :                         }
     564              :                     };
     565              : 
     566              :                     // We deleted local content: move on to next tenant, don't try and spawn this one.
     567              :                     continue;
     568              :                 }
     569              :                 Some(TenantStartupMode::Secondary) => {
     570              :                     if !matches!(location_conf.mode, LocationMode::Secondary(_)) {
     571              :                         location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF);
     572              :                     }
     573              :                 }
     574              :                 Some(TenantStartupMode::Attached((attach_mode, generation))) => {
     575              :                     let old_gen_higher = match &location_conf.mode {
     576              :                         LocationMode::Attached(AttachedLocationConfig {
     577              :                             generation: old_generation,
     578              :                             attach_mode: _attach_mode,
     579              :                         }) => {
     580              :                             if old_generation > generation {
     581              :                                 Some(old_generation)
     582              :                             } else {
     583              :                                 None
     584              :                             }
     585              :                         }
     586              :                         _ => None,
     587              :                     };
     588              :                     if let Some(old_generation) = old_gen_higher {
     589              :                         tracing::error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
     590              :                             "Control plane gave decreasing generation ({generation:?}) in re-attach response for tenant that was attached in generation {:?}, demoting to secondary",
     591              :                             old_generation
     592              :                         );
     593              : 
     594              :                         // We cannot safely attach this tenant given a bogus generation number, but let's avoid throwing away
     595              :                         // local disk content: demote to secondary rather than detaching.
     596              :                         location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF);
     597              :                     } else {
     598              :                         location_conf.attach_in_generation(*attach_mode, *generation);
     599              :                     }
     600              :                 }
     601              :             }
     602              :         } else {
     603              :             // Legacy mode: no generation information, any tenant present
     604              :             // on local disk may activate
     605              :             info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Starting tenant in legacy mode, no generation",);
     606              :         };
     607              : 
     608              :         // Presence of a generation number implies attachment: attach the tenant
     609              :         // if it wasn't already, and apply the generation number.
     610            0 :         config_write_futs.push(async move {
     611            0 :             let r =
     612            0 :                 TenantShard::persist_tenant_config(conf, &tenant_shard_id, &location_conf).await;
     613            0 :             (tenant_shard_id, location_conf, r)
     614            0 :         });
     615              :     }
     616              : 
     617              :     // Execute config writes with concurrency, to avoid bottlenecking on local FS write latency
     618              :     tracing::info!(
     619              :         "Writing {} location config files...",
     620              :         config_write_futs.len()
     621              :     );
     622              :     let config_write_results = futures::stream::iter(config_write_futs)
     623              :         .buffer_unordered(16)
     624              :         .collect::<Vec<_>>()
     625              :         .await;
     626              : 
     627              :     tracing::info!(
     628              :         "Spawning {} tenant shard locations...",
     629              :         config_write_results.len()
     630              :     );
     631              :     // For those shards that have live configurations, construct `Tenant` or `SecondaryTenant` objects and start them running
     632              :     for (tenant_shard_id, location_conf, config_write_result) in config_write_results {
     633              :         // Writing a config to local disk is foundational to startup up tenants: panic if we can't.
     634              :         config_write_result.fatal_err("write tenant shard config file");
     635              : 
     636              :         let tenant_dir_path = conf.tenant_path(&tenant_shard_id);
     637              :         let shard_identity = location_conf.shard;
     638              :         let slot = match location_conf.mode {
     639              :             LocationMode::Attached(attached_conf) => TenantSlot::Attached(
     640              :                 tenant_spawn(
     641              :                     conf,
     642              :                     tenant_shard_id,
     643              :                     &tenant_dir_path,
     644              :                     resources.clone(),
     645              :                     AttachedTenantConf::new(location_conf.tenant_conf, attached_conf),
     646              :                     shard_identity,
     647              :                     Some(init_order.clone()),
     648              :                     SpawnMode::Lazy,
     649              :                     &ctx,
     650              :                 )
     651              :                 .expect("global shutdown during init_tenant_mgr cannot happen"),
     652              :             ),
     653              :             LocationMode::Secondary(secondary_conf) => {
     654              :                 info!(
     655              :                     tenant_id = %tenant_shard_id.tenant_id,
     656              :                     shard_id = %tenant_shard_id.shard_slug(),
     657              :                     "Starting secondary tenant"
     658              :                 );
     659              :                 TenantSlot::Secondary(SecondaryTenant::new(
     660              :                     tenant_shard_id,
     661              :                     shard_identity,
     662              :                     location_conf.tenant_conf,
     663              :                     &secondary_conf,
     664              :                 ))
     665              :             }
     666              :         };
     667              : 
     668              :         METRICS.slot_inserted(&slot);
     669              :         tenants.insert(tenant_shard_id, slot);
     670              :     }
     671              : 
     672              :     info!("Processed {} local tenants at startup", tenants.len());
     673              : 
     674              :     let mut tenants_map = TENANTS.write().unwrap();
     675              :     assert!(matches!(&*tenants_map, &TenantsMap::Initializing));
     676              : 
     677              :     *tenants_map = TenantsMap::Open(tenants);
     678              : 
     679              :     Ok(TenantManager {
     680              :         conf,
     681              :         tenants: &TENANTS,
     682              :         resources,
     683              :         cancel: CancellationToken::new(),
     684              :         background_purges,
     685              :     })
     686              : }
     687              : 
     688              : /// Wrapper for Tenant::spawn that checks invariants before running
     689              : #[allow(clippy::too_many_arguments)]
     690            0 : fn tenant_spawn(
     691            0 :     conf: &'static PageServerConf,
     692            0 :     tenant_shard_id: TenantShardId,
     693            0 :     tenant_path: &Utf8Path,
     694            0 :     resources: TenantSharedResources,
     695            0 :     location_conf: AttachedTenantConf,
     696            0 :     shard_identity: ShardIdentity,
     697            0 :     init_order: Option<InitializationOrder>,
     698            0 :     mode: SpawnMode,
     699            0 :     ctx: &RequestContext,
     700            0 : ) -> Result<Arc<TenantShard>, GlobalShutDown> {
     701            0 :     // All these conditions should have been satisfied by our caller: the tenant dir exists, is a well formed
     702            0 :     // path, and contains a configuration file.  Assertions that do synchronous I/O are limited to debug mode
     703            0 :     // to avoid impacting prod runtime performance.
     704            0 :     assert!(!crate::is_temporary(tenant_path));
     705            0 :     debug_assert!(tenant_path.is_dir());
     706            0 :     debug_assert!(
     707            0 :         conf.tenant_location_config_path(&tenant_shard_id)
     708            0 :             .try_exists()
     709            0 :             .unwrap()
     710              :     );
     711              : 
     712            0 :     TenantShard::spawn(
     713            0 :         conf,
     714            0 :         tenant_shard_id,
     715            0 :         resources,
     716            0 :         location_conf,
     717            0 :         shard_identity,
     718            0 :         init_order,
     719            0 :         mode,
     720            0 :         ctx,
     721            0 :     )
     722            0 : }
     723              : 
     724           12 : async fn shutdown_all_tenants0(tenants: &std::sync::RwLock<TenantsMap>) {
     725           12 :     let mut join_set = JoinSet::new();
     726            0 : 
     727            0 :     #[cfg(all(debug_assertions, not(test)))]
     728            0 :     {
     729            0 :         // Check that our metrics properly tracked the size of the tenants map.  This is a convenient location to check,
     730            0 :         // as it happens implicitly at the end of tests etc.
     731            0 :         let m = tenants.read().unwrap();
     732            0 :         debug_assert_eq!(METRICS.slots_total(), m.len() as u64);
     733              :     }
     734              : 
     735              :     // Atomically, 1. create the shutdown tasks and 2. prevent creation of new tenants.
     736           12 :     let (total_in_progress, total_attached) = {
     737           12 :         let mut m = tenants.write().unwrap();
     738           12 :         match &mut *m {
     739              :             TenantsMap::Initializing => {
     740            0 :                 *m = TenantsMap::ShuttingDown(BTreeMap::default());
     741            0 :                 info!("tenants map is empty");
     742            0 :                 return;
     743              :             }
     744           12 :             TenantsMap::Open(tenants) => {
     745           12 :                 let mut shutdown_state = BTreeMap::new();
     746           12 :                 let mut total_in_progress = 0;
     747           12 :                 let mut total_attached = 0;
     748              : 
     749           12 :                 for (tenant_shard_id, v) in std::mem::take(tenants).into_iter() {
     750           12 :                     match v {
     751            0 :                         TenantSlot::Attached(t) => {
     752            0 :                             shutdown_state.insert(tenant_shard_id, TenantSlot::Attached(t.clone()));
     753            0 :                             join_set.spawn(
     754            0 :                                 async move {
     755            0 :                                     let res = {
     756            0 :                                         let (_guard, shutdown_progress) = completion::channel();
     757            0 :                                         t.shutdown(shutdown_progress, ShutdownMode::FreezeAndFlush).await
     758              :                                     };
     759              : 
     760            0 :                                     if let Err(other_progress) = res {
     761              :                                         // join the another shutdown in progress
     762            0 :                                         other_progress.wait().await;
     763            0 :                                     }
     764              : 
     765              :                                     // we cannot afford per tenant logging here, because if s3 is degraded, we are
     766              :                                     // going to log too many lines
     767            0 :                                     debug!("tenant successfully stopped");
     768            0 :                                 }
     769            0 :                                 .instrument(info_span!("shutdown", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())),
     770              :                             );
     771              : 
     772            0 :                             total_attached += 1;
     773              :                         }
     774            0 :                         TenantSlot::Secondary(state) => {
     775            0 :                             // We don't need to wait for this individually per-tenant: the
     776            0 :                             // downloader task will be waited on eventually, this cancel
     777            0 :                             // is just to encourage it to drop out if it is doing work
     778            0 :                             // for this tenant right now.
     779            0 :                             state.cancel.cancel();
     780            0 : 
     781            0 :                             shutdown_state.insert(tenant_shard_id, TenantSlot::Secondary(state));
     782            0 :                         }
     783           12 :                         TenantSlot::InProgress(notify) => {
     784           12 :                             // InProgress tenants are not visible in TenantsMap::ShuttingDown: we will
     785           12 :                             // wait for their notifications to fire in this function.
     786           12 :                             join_set.spawn(async move {
     787           12 :                                 notify.wait().await;
     788           12 :                             });
     789           12 : 
     790           12 :                             total_in_progress += 1;
     791           12 :                         }
     792              :                     }
     793              :                 }
     794           12 :                 *m = TenantsMap::ShuttingDown(shutdown_state);
     795           12 :                 (total_in_progress, total_attached)
     796              :             }
     797              :             TenantsMap::ShuttingDown(_) => {
     798            0 :                 error!(
     799            0 :                     "already shutting down, this function isn't supposed to be called more than once"
     800              :                 );
     801            0 :                 return;
     802              :             }
     803              :         }
     804              :     };
     805              : 
     806           12 :     let started_at = std::time::Instant::now();
     807           12 : 
     808           12 :     info!(
     809            0 :         "Waiting for {} InProgress tenants and {} Attached tenants to shut down",
     810              :         total_in_progress, total_attached
     811              :     );
     812              : 
     813           12 :     let total = join_set.len();
     814           12 :     let mut panicked = 0;
     815           12 :     let mut buffering = true;
     816              :     const BUFFER_FOR: std::time::Duration = std::time::Duration::from_millis(500);
     817           12 :     let mut buffered = std::pin::pin!(tokio::time::sleep(BUFFER_FOR));
     818              : 
     819           36 :     while !join_set.is_empty() {
     820           24 :         tokio::select! {
     821           24 :             Some(joined) = join_set.join_next() => {
     822            0 :                 match joined {
     823           12 :                     Ok(()) => {},
     824            0 :                     Err(join_error) if join_error.is_cancelled() => {
     825            0 :                         unreachable!("we are not cancelling any of the tasks");
     826              :                     }
     827            0 :                     Err(join_error) if join_error.is_panic() => {
     828            0 :                         // cannot really do anything, as this panic is likely a bug
     829            0 :                         panicked += 1;
     830            0 :                     }
     831            0 :                     Err(join_error) => {
     832            0 :                         warn!("unknown kind of JoinError: {join_error}");
     833              :                     }
     834              :                 }
     835           12 :                 if !buffering {
     836           12 :                     // buffer so that every 500ms since the first update (or starting) we'll log
     837           12 :                     // how far away we are; this is because we will get SIGKILL'd at 10s, and we
     838           12 :                     // are not able to log *then*.
     839           12 :                     buffering = true;
     840           12 :                     buffered.as_mut().reset(tokio::time::Instant::now() + BUFFER_FOR);
     841           12 :                 }
     842              :             },
     843           24 :             _ = &mut buffered, if buffering => {
     844           12 :                 buffering = false;
     845           12 :                 info!(remaining = join_set.len(), total, elapsed_ms = started_at.elapsed().as_millis(), "waiting for tenants to shutdown");
     846              :             }
     847              :         }
     848              :     }
     849              : 
     850           12 :     if panicked > 0 {
     851            0 :         warn!(
     852              :             panicked,
     853            0 :             total, "observed panicks while shutting down tenants"
     854              :         );
     855           12 :     }
     856              : 
     857              :     // caller will log how long we took
     858           12 : }
     859              : 
     860              : #[derive(thiserror::Error, Debug)]
     861              : pub(crate) enum UpsertLocationError {
     862              :     #[error("Bad config request: {0}")]
     863              :     BadRequest(anyhow::Error),
     864              : 
     865              :     #[error("Cannot change config in this state: {0}")]
     866              :     Unavailable(#[from] TenantMapError),
     867              : 
     868              :     #[error("Tenant is already being modified")]
     869              :     InProgress,
     870              : 
     871              :     #[error("Failed to flush: {0}")]
     872              :     Flush(anyhow::Error),
     873              : 
     874              :     /// This error variant is for unexpected situations (soft assertions) where the system is in an unexpected state.
     875              :     #[error("Internal error: {0}")]
     876              :     InternalError(anyhow::Error),
     877              : }
     878              : 
     879              : impl TenantManager {
     880              :     /// Convenience function so that anyone with a TenantManager can get at the global configuration, without
     881              :     /// having to pass it around everywhere as a separate object.
     882            0 :     pub(crate) fn get_conf(&self) -> &'static PageServerConf {
     883            0 :         self.conf
     884            0 :     }
     885              : 
     886              :     /// Gets the attached tenant from the in-memory data, erroring if it's absent, in secondary mode, or currently
     887              :     /// undergoing a state change (i.e. slot is InProgress).
     888              :     ///
     889              :     /// The return TenantShard is not guaranteed to be active: check its status after obtaing it, or
     890              :     /// use [`TenantShard::wait_to_become_active`] before using it if you will do I/O on it.
     891            0 :     pub(crate) fn get_attached_tenant_shard(
     892            0 :         &self,
     893            0 :         tenant_shard_id: TenantShardId,
     894            0 :     ) -> Result<Arc<TenantShard>, GetTenantError> {
     895            0 :         let locked = self.tenants.read().unwrap();
     896              : 
     897            0 :         let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)?;
     898              : 
     899            0 :         match peek_slot {
     900            0 :             Some(TenantSlot::Attached(tenant)) => Ok(Arc::clone(tenant)),
     901            0 :             Some(TenantSlot::InProgress(_)) => Err(GetTenantError::NotActive(tenant_shard_id)),
     902              :             None | Some(TenantSlot::Secondary(_)) => {
     903            0 :                 Err(GetTenantError::ShardNotFound(tenant_shard_id))
     904              :             }
     905              :         }
     906            0 :     }
     907              : 
     908            0 :     pub(crate) fn get_secondary_tenant_shard(
     909            0 :         &self,
     910            0 :         tenant_shard_id: TenantShardId,
     911            0 :     ) -> Option<Arc<SecondaryTenant>> {
     912            0 :         let locked = self.tenants.read().unwrap();
     913            0 : 
     914            0 :         let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
     915            0 :             .ok()
     916            0 :             .flatten();
     917              : 
     918            0 :         match peek_slot {
     919            0 :             Some(TenantSlot::Secondary(s)) => Some(s.clone()),
     920            0 :             _ => None,
     921              :         }
     922            0 :     }
     923              : 
     924              :     /// Whether the `TenantManager` is responsible for the tenant shard
     925            0 :     pub(crate) fn manages_tenant_shard(&self, tenant_shard_id: TenantShardId) -> bool {
     926            0 :         let locked = self.tenants.read().unwrap();
     927            0 : 
     928            0 :         let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
     929            0 :             .ok()
     930            0 :             .flatten();
     931            0 : 
     932            0 :         peek_slot.is_some()
     933            0 :     }
     934              : 
     935              :     #[instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
     936              :     pub(crate) async fn upsert_location(
     937              :         &self,
     938              :         tenant_shard_id: TenantShardId,
     939              :         new_location_config: LocationConf,
     940              :         flush: Option<Duration>,
     941              :         mut spawn_mode: SpawnMode,
     942              :         ctx: &RequestContext,
     943              :     ) -> Result<Option<Arc<TenantShard>>, UpsertLocationError> {
     944              :         debug_assert_current_span_has_tenant_id();
     945              :         info!("configuring tenant location to state {new_location_config:?}");
     946              : 
     947              :         enum FastPathModified {
     948              :             Attached(Arc<TenantShard>),
     949              :             Secondary(Arc<SecondaryTenant>),
     950              :         }
     951              : 
     952              :         // Special case fast-path for updates to existing slots: if our upsert is only updating configuration,
     953              :         // then we do not need to set the slot to InProgress, we can just call into the
     954              :         // existng tenant.
     955              :         let fast_path_taken = {
     956              :             let locked = self.tenants.read().unwrap();
     957              :             let peek_slot =
     958              :                 tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Write)?;
     959              :             match (&new_location_config.mode, peek_slot) {
     960              :                 (LocationMode::Attached(attach_conf), Some(TenantSlot::Attached(tenant))) => {
     961              :                     match attach_conf.generation.cmp(&tenant.generation) {
     962              :                         Ordering::Equal => {
     963              :                             // A transition from Attached to Attached in the same generation, we may
     964              :                             // take our fast path and just provide the updated configuration
     965              :                             // to the tenant.
     966              :                             tenant.set_new_location_config(
     967              :                                 AttachedTenantConf::try_from(new_location_config.clone())
     968              :                                     .map_err(UpsertLocationError::BadRequest)?,
     969              :                             );
     970              : 
     971              :                             Some(FastPathModified::Attached(tenant.clone()))
     972              :                         }
     973              :                         Ordering::Less => {
     974              :                             return Err(UpsertLocationError::BadRequest(anyhow::anyhow!(
     975              :                                 "Generation {:?} is less than existing {:?}",
     976              :                                 attach_conf.generation,
     977              :                                 tenant.generation
     978              :                             )));
     979              :                         }
     980              :                         Ordering::Greater => {
     981              :                             // Generation advanced, fall through to general case of replacing `Tenant` object
     982              :                             None
     983              :                         }
     984              :                     }
     985              :                 }
     986              :                 (
     987              :                     LocationMode::Secondary(secondary_conf),
     988              :                     Some(TenantSlot::Secondary(secondary_tenant)),
     989              :                 ) => {
     990              :                     secondary_tenant.set_config(secondary_conf);
     991              :                     secondary_tenant.set_tenant_conf(&new_location_config.tenant_conf);
     992              :                     Some(FastPathModified::Secondary(secondary_tenant.clone()))
     993              :                 }
     994              :                 _ => {
     995              :                     // Not an Attached->Attached transition, fall through to general case
     996              :                     None
     997              :                 }
     998              :             }
     999              :         };
    1000              : 
    1001              :         // Fast-path continued: having dropped out of the self.tenants lock, do the async
    1002              :         // phase of writing config and/or waiting for flush, before returning.
    1003              :         match fast_path_taken {
    1004              :             Some(FastPathModified::Attached(tenant)) => {
    1005              :                 TenantShard::persist_tenant_config(
    1006              :                     self.conf,
    1007              :                     &tenant_shard_id,
    1008              :                     &new_location_config,
    1009              :                 )
    1010              :                 .await
    1011              :                 .fatal_err("write tenant shard config");
    1012              : 
    1013              :                 // Transition to AttachedStale means we may well hold a valid generation
    1014              :                 // still, and have been requested to go stale as part of a migration.  If
    1015              :                 // the caller set `flush`, then flush to remote storage.
    1016              :                 if let LocationMode::Attached(AttachedLocationConfig {
    1017              :                     generation: _,
    1018              :                     attach_mode: AttachmentMode::Stale,
    1019              :                 }) = &new_location_config.mode
    1020              :                 {
    1021              :                     if let Some(flush_timeout) = flush {
    1022              :                         match tokio::time::timeout(flush_timeout, tenant.flush_remote()).await {
    1023              :                             Ok(Err(e)) => {
    1024              :                                 return Err(UpsertLocationError::Flush(e));
    1025              :                             }
    1026              :                             Ok(Ok(_)) => return Ok(Some(tenant)),
    1027              :                             Err(_) => {
    1028              :                                 tracing::warn!(
    1029              :                                     timeout_ms = flush_timeout.as_millis(),
    1030              :                                     "Timed out waiting for flush to remote storage, proceeding anyway."
    1031              :                                 )
    1032              :                             }
    1033              :                         }
    1034              :                     }
    1035              :                 }
    1036              : 
    1037              :                 return Ok(Some(tenant));
    1038              :             }
    1039              :             Some(FastPathModified::Secondary(_secondary_tenant)) => {
    1040              :                 TenantShard::persist_tenant_config(
    1041              :                     self.conf,
    1042              :                     &tenant_shard_id,
    1043              :                     &new_location_config,
    1044              :                 )
    1045              :                 .await
    1046              :                 .fatal_err("write tenant shard config");
    1047              : 
    1048              :                 return Ok(None);
    1049              :             }
    1050              :             None => {
    1051              :                 // Proceed with the general case procedure, where we will shutdown & remove any existing
    1052              :                 // slot contents and replace with a fresh one
    1053              :             }
    1054              :         };
    1055              : 
    1056              :         // General case for upserts to TenantsMap, excluding the case above: we will substitute an
    1057              :         // InProgress value to the slot while we make whatever changes are required.  The state for
    1058              :         // the tenant is inaccessible to the outside world while we are doing this, but that is sensible:
    1059              :         // the state is ill-defined while we're in transition.  Transitions are async, but fast: we do
    1060              :         // not do significant I/O, and shutdowns should be prompt via cancellation tokens.
    1061              :         let mut slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)
    1062            0 :             .map_err(|e| match e {
    1063              :                 TenantSlotError::NotFound(_) => {
    1064            0 :                     unreachable!("Called with mode Any")
    1065              :                 }
    1066            0 :                 TenantSlotError::InProgress => UpsertLocationError::InProgress,
    1067            0 :                 TenantSlotError::MapState(s) => UpsertLocationError::Unavailable(s),
    1068            0 :             })?;
    1069              : 
    1070              :         match slot_guard.get_old_value() {
    1071              :             Some(TenantSlot::Attached(tenant)) => {
    1072              :                 // The case where we keep a Tenant alive was covered above in the special case
    1073              :                 // for Attached->Attached transitions in the same generation.  By this point,
    1074              :                 // if we see an attached tenant we know it will be discarded and should be
    1075              :                 // shut down.
    1076              :                 let (_guard, progress) = utils::completion::channel();
    1077              : 
    1078              :                 match tenant.get_attach_mode() {
    1079              :                     AttachmentMode::Single | AttachmentMode::Multi => {
    1080              :                         // Before we leave our state as the presumed holder of the latest generation,
    1081              :                         // flush any outstanding deletions to reduce the risk of leaking objects.
    1082              :                         self.resources.deletion_queue_client.flush_advisory()
    1083              :                     }
    1084              :                     AttachmentMode::Stale => {
    1085              :                         // If we're stale there's not point trying to flush deletions
    1086              :                     }
    1087              :                 };
    1088              : 
    1089              :                 info!("Shutting down attached tenant");
    1090              :                 match tenant.shutdown(progress, ShutdownMode::Hard).await {
    1091              :                     Ok(()) => {}
    1092              :                     Err(barrier) => {
    1093              :                         info!("Shutdown already in progress, waiting for it to complete");
    1094              :                         barrier.wait().await;
    1095              :                     }
    1096              :                 }
    1097              :                 slot_guard.drop_old_value().expect("We just shut it down");
    1098              : 
    1099              :                 // Edge case: if we were called with SpawnMode::Create, but a Tenant already existed, then
    1100              :                 // the caller thinks they're creating but the tenant already existed.  We must switch to
    1101              :                 // Eager mode so that when starting this Tenant we properly probe remote storage for timelines,
    1102              :                 // rather than assuming it to be empty.
    1103              :                 spawn_mode = SpawnMode::Eager;
    1104              :             }
    1105              :             Some(TenantSlot::Secondary(state)) => {
    1106              :                 info!("Shutting down secondary tenant");
    1107              :                 state.shutdown().await;
    1108              :             }
    1109              :             Some(TenantSlot::InProgress(_)) => {
    1110              :                 // This should never happen: acquire_slot should error out
    1111              :                 // if the contents of a slot were InProgress.
    1112              :                 return Err(UpsertLocationError::InternalError(anyhow::anyhow!(
    1113              :                     "Acquired an InProgress slot, this is a bug."
    1114              :                 )));
    1115              :             }
    1116              :             None => {
    1117              :                 // Slot was vacant, nothing needs shutting down.
    1118              :             }
    1119              :         }
    1120              : 
    1121              :         let tenant_path = self.conf.tenant_path(&tenant_shard_id);
    1122              :         let timelines_path = self.conf.timelines_path(&tenant_shard_id);
    1123              : 
    1124              :         // Directory structure is the same for attached and secondary modes:
    1125              :         // create it if it doesn't exist.  Timeline load/creation expects the
    1126              :         // timelines/ subdir to already exist.
    1127              :         //
    1128              :         // Does not need to be fsync'd because local storage is just a cache.
    1129              :         tokio::fs::create_dir_all(&timelines_path)
    1130              :             .await
    1131              :             .fatal_err("create timelines/ dir");
    1132              : 
    1133              :         // Before activating either secondary or attached mode, persist the
    1134              :         // configuration, so that on restart we will re-attach (or re-start
    1135              :         // secondary) on the tenant.
    1136              :         TenantShard::persist_tenant_config(self.conf, &tenant_shard_id, &new_location_config)
    1137              :             .await
    1138              :             .fatal_err("write tenant shard config");
    1139              : 
    1140              :         let new_slot = match &new_location_config.mode {
    1141              :             LocationMode::Secondary(secondary_config) => {
    1142              :                 let shard_identity = new_location_config.shard;
    1143              :                 TenantSlot::Secondary(SecondaryTenant::new(
    1144              :                     tenant_shard_id,
    1145              :                     shard_identity,
    1146              :                     new_location_config.tenant_conf,
    1147              :                     secondary_config,
    1148              :                 ))
    1149              :             }
    1150              :             LocationMode::Attached(_attach_config) => {
    1151              :                 let shard_identity = new_location_config.shard;
    1152              : 
    1153              :                 // Testing hack: if we are configured with no control plane, then drop the generation
    1154              :                 // from upserts.  This enables creating generation-less tenants even though neon_local
    1155              :                 // always uses generations when calling the location conf API.
    1156              :                 let attached_conf = if cfg!(feature = "testing") {
    1157              :                     let mut conf = AttachedTenantConf::try_from(new_location_config)
    1158              :                         .map_err(UpsertLocationError::BadRequest)?;
    1159              :                     if self.conf.control_plane_api.is_none() {
    1160              :                         conf.location.generation = Generation::none();
    1161              :                     }
    1162              :                     conf
    1163              :                 } else {
    1164              :                     AttachedTenantConf::try_from(new_location_config)
    1165              :                         .map_err(UpsertLocationError::BadRequest)?
    1166              :                 };
    1167              : 
    1168              :                 let tenant = tenant_spawn(
    1169              :                     self.conf,
    1170              :                     tenant_shard_id,
    1171              :                     &tenant_path,
    1172              :                     self.resources.clone(),
    1173              :                     attached_conf,
    1174              :                     shard_identity,
    1175              :                     None,
    1176              :                     spawn_mode,
    1177              :                     ctx,
    1178              :                 )
    1179            0 :                 .map_err(|_: GlobalShutDown| {
    1180            0 :                     UpsertLocationError::Unavailable(TenantMapError::ShuttingDown)
    1181            0 :                 })?;
    1182              : 
    1183              :                 TenantSlot::Attached(tenant)
    1184              :             }
    1185              :         };
    1186              : 
    1187              :         let attached_tenant = if let TenantSlot::Attached(tenant) = &new_slot {
    1188              :             Some(tenant.clone())
    1189              :         } else {
    1190              :             None
    1191              :         };
    1192              : 
    1193              :         match slot_guard.upsert(new_slot) {
    1194              :             Err(TenantSlotUpsertError::InternalError(e)) => {
    1195              :                 Err(UpsertLocationError::InternalError(anyhow::anyhow!(e)))
    1196              :             }
    1197              :             Err(TenantSlotUpsertError::MapState(e)) => Err(UpsertLocationError::Unavailable(e)),
    1198              :             Err(TenantSlotUpsertError::ShuttingDown((new_slot, _completion))) => {
    1199              :                 // If we just called tenant_spawn() on a new tenant, and can't insert it into our map, then
    1200              :                 // we must not leak it: this would violate the invariant that after shutdown_all_tenants, all tenants
    1201              :                 // are shutdown.
    1202              :                 //
    1203              :                 // We must shut it down inline here.
    1204              :                 match new_slot {
    1205              :                     TenantSlot::InProgress(_) => {
    1206              :                         // Unreachable because we never insert an InProgress
    1207              :                         unreachable!()
    1208              :                     }
    1209              :                     TenantSlot::Attached(tenant) => {
    1210              :                         let (_guard, progress) = utils::completion::channel();
    1211              :                         info!(
    1212              :                             "Shutting down just-spawned tenant, because tenant manager is shut down"
    1213              :                         );
    1214              :                         match tenant.shutdown(progress, ShutdownMode::Hard).await {
    1215              :                             Ok(()) => {
    1216              :                                 info!("Finished shutting down just-spawned tenant");
    1217              :                             }
    1218              :                             Err(barrier) => {
    1219              :                                 info!("Shutdown already in progress, waiting for it to complete");
    1220              :                                 barrier.wait().await;
    1221              :                             }
    1222              :                         }
    1223              :                     }
    1224              :                     TenantSlot::Secondary(secondary_tenant) => {
    1225              :                         secondary_tenant.shutdown().await;
    1226              :                     }
    1227              :                 }
    1228              : 
    1229              :                 Err(UpsertLocationError::Unavailable(
    1230              :                     TenantMapError::ShuttingDown,
    1231              :                 ))
    1232              :             }
    1233              :             Ok(()) => Ok(attached_tenant),
    1234              :         }
    1235              :     }
    1236              : 
    1237              :     /// Resetting a tenant is equivalent to detaching it, then attaching it again with the same
    1238              :     /// LocationConf that was last used to attach it.  Optionally, the local file cache may be
    1239              :     /// dropped before re-attaching.
    1240              :     ///
    1241              :     /// This is not part of a tenant's normal lifecycle: it is used for debug/support, in situations
    1242              :     /// where an issue is identified that would go away with a restart of the tenant.
    1243              :     ///
    1244              :     /// This does not have any special "force" shutdown of a tenant: it relies on the tenant's tasks
    1245              :     /// to respect the cancellation tokens used in normal shutdown().
    1246              :     #[instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %drop_cache))]
    1247              :     pub(crate) async fn reset_tenant(
    1248              :         &self,
    1249              :         tenant_shard_id: TenantShardId,
    1250              :         drop_cache: bool,
    1251              :         ctx: &RequestContext,
    1252              :     ) -> anyhow::Result<()> {
    1253              :         let mut slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?;
    1254              :         let Some(old_slot) = slot_guard.get_old_value() else {
    1255              :             anyhow::bail!("Tenant not found when trying to reset");
    1256              :         };
    1257              : 
    1258              :         let Some(tenant) = old_slot.get_attached() else {
    1259              :             slot_guard.revert();
    1260              :             anyhow::bail!("Tenant is not in attached state");
    1261              :         };
    1262              : 
    1263              :         let (_guard, progress) = utils::completion::channel();
    1264              :         match tenant.shutdown(progress, ShutdownMode::Hard).await {
    1265              :             Ok(()) => {
    1266              :                 slot_guard.drop_old_value()?;
    1267              :             }
    1268              :             Err(_barrier) => {
    1269              :                 slot_guard.revert();
    1270              :                 anyhow::bail!("Cannot reset Tenant, already shutting down");
    1271              :             }
    1272              :         }
    1273              : 
    1274              :         let tenant_path = self.conf.tenant_path(&tenant_shard_id);
    1275              :         let timelines_path = self.conf.timelines_path(&tenant_shard_id);
    1276              :         let config = TenantShard::load_tenant_config(self.conf, &tenant_shard_id)?;
    1277              : 
    1278              :         if drop_cache {
    1279              :             tracing::info!("Dropping local file cache");
    1280              : 
    1281              :             match tokio::fs::read_dir(&timelines_path).await {
    1282              :                 Err(e) => {
    1283              :                     tracing::warn!("Failed to list timelines while dropping cache: {}", e);
    1284              :                 }
    1285              :                 Ok(mut entries) => {
    1286              :                     while let Some(entry) = entries.next_entry().await? {
    1287              :                         tokio::fs::remove_dir_all(entry.path()).await?;
    1288              :                     }
    1289              :                 }
    1290              :             }
    1291              :         }
    1292              : 
    1293              :         let shard_identity = config.shard;
    1294              :         let tenant = tenant_spawn(
    1295              :             self.conf,
    1296              :             tenant_shard_id,
    1297              :             &tenant_path,
    1298              :             self.resources.clone(),
    1299              :             AttachedTenantConf::try_from(config)?,
    1300              :             shard_identity,
    1301              :             None,
    1302              :             SpawnMode::Eager,
    1303              :             ctx,
    1304              :         )?;
    1305              : 
    1306              :         slot_guard.upsert(TenantSlot::Attached(tenant))?;
    1307              : 
    1308              :         Ok(())
    1309              :     }
    1310              : 
    1311            0 :     pub(crate) fn get_attached_active_tenant_shards(&self) -> Vec<Arc<TenantShard>> {
    1312            0 :         let locked = self.tenants.read().unwrap();
    1313            0 :         match &*locked {
    1314            0 :             TenantsMap::Initializing => Vec::new(),
    1315            0 :             TenantsMap::Open(map) | TenantsMap::ShuttingDown(map) => map
    1316            0 :                 .values()
    1317            0 :                 .filter_map(|slot| {
    1318            0 :                     slot.get_attached()
    1319            0 :                         .and_then(|t| if t.is_active() { Some(t.clone()) } else { None })
    1320            0 :                 })
    1321            0 :                 .collect(),
    1322              :         }
    1323            0 :     }
    1324              :     // Do some synchronous work for all tenant slots in Secondary state.  The provided
    1325              :     // callback should be small and fast, as it will be called inside the global
    1326              :     // TenantsMap lock.
    1327            0 :     pub(crate) fn foreach_secondary_tenants<F>(&self, mut func: F)
    1328            0 :     where
    1329            0 :         // TODO: let the callback return a hint to drop out of the loop early
    1330            0 :         F: FnMut(&TenantShardId, &Arc<SecondaryTenant>),
    1331            0 :     {
    1332            0 :         let locked = self.tenants.read().unwrap();
    1333              : 
    1334            0 :         let map = match &*locked {
    1335            0 :             TenantsMap::Initializing | TenantsMap::ShuttingDown(_) => return,
    1336            0 :             TenantsMap::Open(m) => m,
    1337              :         };
    1338              : 
    1339            0 :         for (tenant_id, slot) in map {
    1340            0 :             if let TenantSlot::Secondary(state) = slot {
    1341              :                 // Only expose secondary tenants that are not currently shutting down
    1342            0 :                 if !state.cancel.is_cancelled() {
    1343            0 :                     func(tenant_id, state)
    1344            0 :                 }
    1345            0 :             }
    1346              :         }
    1347            0 :     }
    1348              : 
    1349              :     /// Total list of all tenant slots: this includes attached, secondary, and InProgress.
    1350            0 :     pub(crate) fn list(&self) -> Vec<(TenantShardId, TenantSlot)> {
    1351            0 :         let locked = self.tenants.read().unwrap();
    1352            0 :         match &*locked {
    1353            0 :             TenantsMap::Initializing => Vec::new(),
    1354            0 :             TenantsMap::Open(map) | TenantsMap::ShuttingDown(map) => {
    1355            0 :                 map.iter().map(|(k, v)| (*k, v.clone())).collect()
    1356              :             }
    1357              :         }
    1358            0 :     }
    1359              : 
    1360            0 :     pub(crate) fn get(&self, tenant_shard_id: TenantShardId) -> Option<TenantSlot> {
    1361            0 :         let locked = self.tenants.read().unwrap();
    1362            0 :         match &*locked {
    1363            0 :             TenantsMap::Initializing => None,
    1364            0 :             TenantsMap::Open(map) | TenantsMap::ShuttingDown(map) => {
    1365            0 :                 map.get(&tenant_shard_id).cloned()
    1366              :             }
    1367              :         }
    1368            0 :     }
    1369              : 
    1370              :     /// If a tenant is attached, detach it.  Then remove its data from remote storage.
    1371              :     ///
    1372              :     /// A tenant is considered deleted once it is gone from remote storage.  It is the caller's
    1373              :     /// responsibility to avoid trying to attach the tenant again or use it any way once deletion
    1374              :     /// has started: this operation is not atomic, and must be retried until it succeeds.
    1375              :     ///
    1376              :     /// As a special case, if an unsharded tenant ID is given for a sharded tenant, it will remove
    1377              :     /// all tenant shards in remote storage (removing all paths with the tenant prefix). The storage
    1378              :     /// controller uses this to purge all remote tenant data, including any stale parent shards that
    1379              :     /// may remain after splits. Ideally, this special case would be handled elsewhere. See:
    1380              :     /// <https://github.com/neondatabase/neon/pull/9394>.
    1381            0 :     pub(crate) async fn delete_tenant(
    1382            0 :         &self,
    1383            0 :         tenant_shard_id: TenantShardId,
    1384            0 :     ) -> Result<(), DeleteTenantError> {
    1385            0 :         super::span::debug_assert_current_span_has_tenant_id();
    1386              : 
    1387            0 :         async fn delete_local(
    1388            0 :             conf: &PageServerConf,
    1389            0 :             background_purges: &BackgroundPurges,
    1390            0 :             tenant_shard_id: &TenantShardId,
    1391            0 :         ) -> anyhow::Result<()> {
    1392            0 :             let local_tenant_directory = conf.tenant_path(tenant_shard_id);
    1393            0 :             let tmp_dir = safe_rename_tenant_dir(&local_tenant_directory)
    1394            0 :                 .await
    1395            0 :                 .with_context(|| {
    1396            0 :                     format!("local tenant directory {local_tenant_directory:?} rename")
    1397            0 :                 })?;
    1398            0 :             background_purges.spawn(tmp_dir);
    1399            0 :             Ok(())
    1400            0 :         }
    1401              : 
    1402            0 :         let slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?;
    1403            0 :         match &slot_guard.old_value {
    1404            0 :             Some(TenantSlot::Attached(tenant)) => {
    1405            0 :                 // Legacy deletion flow: the tenant remains attached, goes to Stopping state, and
    1406            0 :                 // deletion will be resumed across restarts.
    1407            0 :                 let tenant = tenant.clone();
    1408            0 :                 let (_guard, progress) = utils::completion::channel();
    1409            0 :                 match tenant.shutdown(progress, ShutdownMode::Hard).await {
    1410            0 :                     Ok(()) => {}
    1411            0 :                     Err(barrier) => {
    1412            0 :                         info!("Shutdown already in progress, waiting for it to complete");
    1413            0 :                         barrier.wait().await;
    1414              :                     }
    1415              :                 }
    1416            0 :                 delete_local(self.conf, &self.background_purges, &tenant_shard_id).await?;
    1417              :             }
    1418            0 :             Some(TenantSlot::Secondary(secondary_tenant)) => {
    1419            0 :                 secondary_tenant.shutdown().await;
    1420              : 
    1421            0 :                 delete_local(self.conf, &self.background_purges, &tenant_shard_id).await?;
    1422              :             }
    1423            0 :             Some(TenantSlot::InProgress(_)) => unreachable!(),
    1424            0 :             None => {}
    1425              :         };
    1426              : 
    1427              :         // Fall through: local state for this tenant is no longer present, proceed with remote delete.
    1428              :         // - We use a retry wrapper here so that common transient S3 errors (e.g. 503, 429) do not result
    1429              :         //   in 500 responses to delete requests.
    1430              :         // - We keep the `SlotGuard` during this I/O, so that if a concurrent delete request comes in, it will
    1431              :         //   503/retry, rather than kicking off a wasteful concurrent deletion.
    1432              :         // NB: this also deletes partial prefixes, i.e. a <tenant_id> path will delete all
    1433              :         // <tenant_id>_<shard_id>/* objects. See method comment for why.
    1434            0 :         backoff::retry(
    1435            0 :             || async move {
    1436            0 :                 self.resources
    1437            0 :                     .remote_storage
    1438            0 :                     .delete_prefix(&remote_tenant_path(&tenant_shard_id), &self.cancel)
    1439            0 :                     .await
    1440            0 :             },
    1441            0 :             |_| false, // backoff::retry handles cancellation
    1442            0 :             1,
    1443            0 :             3,
    1444            0 :             &format!("delete_tenant[tenant_shard_id={tenant_shard_id}]"),
    1445            0 :             &self.cancel,
    1446            0 :         )
    1447            0 :         .await
    1448            0 :         .unwrap_or(Err(TimeoutOrCancel::Cancel.into()))
    1449            0 :         .map_err(|err| {
    1450            0 :             if TimeoutOrCancel::caused_by_cancel(&err) {
    1451            0 :                 return DeleteTenantError::Cancelled;
    1452            0 :             }
    1453            0 :             DeleteTenantError::Other(err)
    1454            0 :         })
    1455            0 :     }
    1456              : 
    1457              :     #[instrument(skip_all, fields(tenant_id=%tenant.get_tenant_shard_id().tenant_id, shard_id=%tenant.get_tenant_shard_id().shard_slug(), new_shard_count=%new_shard_count.literal()))]
    1458              :     pub(crate) async fn shard_split(
    1459              :         &self,
    1460              :         tenant: Arc<TenantShard>,
    1461              :         new_shard_count: ShardCount,
    1462              :         new_stripe_size: Option<ShardStripeSize>,
    1463              :         ctx: &RequestContext,
    1464              :     ) -> anyhow::Result<Vec<TenantShardId>> {
    1465              :         let tenant_shard_id = *tenant.get_tenant_shard_id();
    1466              :         let r = self
    1467              :             .do_shard_split(tenant, new_shard_count, new_stripe_size, ctx)
    1468              :             .await;
    1469              :         if r.is_err() {
    1470              :             // Shard splitting might have left the original shard in a partially shut down state (it
    1471              :             // stops the shard's remote timeline client).  Reset it to ensure we leave things in
    1472              :             // a working state.
    1473              :             if self.get(tenant_shard_id).is_some() {
    1474              :                 tracing::warn!("Resetting after shard split failure");
    1475              :                 if let Err(e) = self.reset_tenant(tenant_shard_id, false, ctx).await {
    1476              :                     // Log this error because our return value will still be the original error, not this one.  This is
    1477              :                     // a severe error: if this happens, we might be leaving behind a tenant that is not fully functional
    1478              :                     // (e.g. has uploads disabled).  We can't do anything else: if reset fails then shutting the tenant down or
    1479              :                     // setting it broken probably won't help either.
    1480              :                     tracing::error!("Failed to reset: {e}");
    1481              :                 }
    1482              :             }
    1483              :         }
    1484              : 
    1485              :         r
    1486              :     }
    1487              : 
    1488            0 :     pub(crate) async fn do_shard_split(
    1489            0 :         &self,
    1490            0 :         tenant: Arc<TenantShard>,
    1491            0 :         new_shard_count: ShardCount,
    1492            0 :         new_stripe_size: Option<ShardStripeSize>,
    1493            0 :         ctx: &RequestContext,
    1494            0 :     ) -> anyhow::Result<Vec<TenantShardId>> {
    1495            0 :         let tenant_shard_id = *tenant.get_tenant_shard_id();
    1496            0 : 
    1497            0 :         // Validate the incoming request
    1498            0 :         if new_shard_count.count() <= tenant_shard_id.shard_count.count() {
    1499            0 :             anyhow::bail!("Requested shard count is not an increase");
    1500            0 :         }
    1501            0 :         let expansion_factor = new_shard_count.count() / tenant_shard_id.shard_count.count();
    1502            0 :         if !expansion_factor.is_power_of_two() {
    1503            0 :             anyhow::bail!("Requested split is not a power of two");
    1504            0 :         }
    1505              : 
    1506            0 :         if let Some(new_stripe_size) = new_stripe_size {
    1507            0 :             if tenant.get_shard_stripe_size() != new_stripe_size
    1508            0 :                 && tenant_shard_id.shard_count.count() > 1
    1509              :             {
    1510              :                 // This tenant already has multiple shards, it is illegal to try and change its stripe size
    1511            0 :                 anyhow::bail!(
    1512            0 :                     "Shard stripe size may not be modified once tenant has multiple shards"
    1513            0 :                 );
    1514            0 :             }
    1515            0 :         }
    1516              : 
    1517              :         // Plan: identify what the new child shards will be
    1518            0 :         let child_shards = tenant_shard_id.split(new_shard_count);
    1519            0 :         tracing::info!(
    1520            0 :             "Shard {} splits into: {}",
    1521            0 :             tenant_shard_id.to_index(),
    1522            0 :             child_shards
    1523            0 :                 .iter()
    1524            0 :                 .map(|id| format!("{}", id.to_index()))
    1525            0 :                 .join(",")
    1526              :         );
    1527              : 
    1528            0 :         fail::fail_point!("shard-split-pre-prepare", |_| Err(anyhow::anyhow!(
    1529            0 :             "failpoint"
    1530            0 :         )));
    1531              : 
    1532            0 :         let parent_shard_identity = tenant.shard_identity;
    1533            0 :         let parent_tenant_conf = tenant.get_tenant_conf();
    1534            0 :         let parent_generation = tenant.generation;
    1535              : 
    1536              :         // Phase 1: Write out child shards' remote index files, in the parent tenant's current generation
    1537            0 :         if let Err(e) = tenant.split_prepare(&child_shards).await {
    1538              :             // If [`Tenant::split_prepare`] fails, we must reload the tenant, because it might
    1539              :             // have been left in a partially-shut-down state.
    1540            0 :             tracing::warn!("Failed to prepare for split: {e}, reloading Tenant before returning");
    1541            0 :             return Err(e);
    1542            0 :         }
    1543            0 : 
    1544            0 :         fail::fail_point!("shard-split-post-prepare", |_| Err(anyhow::anyhow!(
    1545            0 :             "failpoint"
    1546            0 :         )));
    1547              : 
    1548            0 :         self.resources.deletion_queue_client.flush_advisory();
    1549            0 : 
    1550            0 :         // Phase 2: Put the parent shard to InProgress and grab a reference to the parent Tenant
    1551            0 :         drop(tenant);
    1552            0 :         let mut parent_slot_guard =
    1553            0 :             tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?;
    1554            0 :         let parent = match parent_slot_guard.get_old_value() {
    1555            0 :             Some(TenantSlot::Attached(t)) => t,
    1556            0 :             Some(TenantSlot::Secondary(_)) => anyhow::bail!("Tenant location in secondary mode"),
    1557              :             Some(TenantSlot::InProgress(_)) => {
    1558              :                 // tenant_map_acquire_slot never returns InProgress, if a slot was InProgress
    1559              :                 // it would return an error.
    1560            0 :                 unreachable!()
    1561              :             }
    1562              :             None => {
    1563              :                 // We don't actually need the parent shard to still be attached to do our work, but it's
    1564              :                 // a weird enough situation that the caller probably didn't want us to continue working
    1565              :                 // if they had detached the tenant they requested the split on.
    1566            0 :                 anyhow::bail!("Detached parent shard in the middle of split!")
    1567              :             }
    1568              :         };
    1569            0 :         fail::fail_point!("shard-split-pre-hardlink", |_| Err(anyhow::anyhow!(
    1570            0 :             "failpoint"
    1571            0 :         )));
    1572              :         // Optimization: hardlink layers from the parent into the children, so that they don't have to
    1573              :         // re-download & duplicate the data referenced in their initial IndexPart
    1574            0 :         self.shard_split_hardlink(parent, child_shards.clone())
    1575            0 :             .await?;
    1576            0 :         fail::fail_point!("shard-split-post-hardlink", |_| Err(anyhow::anyhow!(
    1577            0 :             "failpoint"
    1578            0 :         )));
    1579              : 
    1580              :         // Take a snapshot of where the parent's WAL ingest had got to: we will wait for
    1581              :         // child shards to reach this point.
    1582            0 :         let mut target_lsns = HashMap::new();
    1583            0 :         for timeline in parent.timelines.lock().unwrap().clone().values() {
    1584            0 :             target_lsns.insert(timeline.timeline_id, timeline.get_last_record_lsn());
    1585            0 :         }
    1586              : 
    1587              :         // TODO: we should have the parent shard stop its WAL ingest here, it's a waste of resources
    1588              :         // and could slow down the children trying to catch up.
    1589              : 
    1590              :         // Phase 3: Spawn the child shards
    1591            0 :         for child_shard in &child_shards {
    1592            0 :             let mut child_shard_identity = parent_shard_identity;
    1593            0 :             if let Some(new_stripe_size) = new_stripe_size {
    1594            0 :                 child_shard_identity.stripe_size = new_stripe_size;
    1595            0 :             }
    1596            0 :             child_shard_identity.count = child_shard.shard_count;
    1597            0 :             child_shard_identity.number = child_shard.shard_number;
    1598            0 : 
    1599            0 :             let child_location_conf = LocationConf {
    1600            0 :                 mode: LocationMode::Attached(AttachedLocationConfig {
    1601            0 :                     generation: parent_generation,
    1602            0 :                     attach_mode: AttachmentMode::Single,
    1603            0 :                 }),
    1604            0 :                 shard: child_shard_identity,
    1605            0 :                 tenant_conf: parent_tenant_conf.clone(),
    1606            0 :             };
    1607            0 : 
    1608            0 :             self.upsert_location(
    1609            0 :                 *child_shard,
    1610            0 :                 child_location_conf,
    1611            0 :                 None,
    1612            0 :                 SpawnMode::Eager,
    1613            0 :                 ctx,
    1614            0 :             )
    1615            0 :             .await?;
    1616              :         }
    1617              : 
    1618            0 :         fail::fail_point!("shard-split-post-child-conf", |_| Err(anyhow::anyhow!(
    1619            0 :             "failpoint"
    1620            0 :         )));
    1621              : 
    1622              :         // Phase 4: wait for child chards WAL ingest to catch up to target LSN
    1623            0 :         for child_shard_id in &child_shards {
    1624            0 :             let child_shard_id = *child_shard_id;
    1625            0 :             let child_shard = {
    1626            0 :                 let locked = self.tenants.read().unwrap();
    1627            0 :                 let peek_slot =
    1628            0 :                     tenant_map_peek_slot(&locked, &child_shard_id, TenantSlotPeekMode::Read)?;
    1629            0 :                 peek_slot.and_then(|s| s.get_attached()).cloned()
    1630              :             };
    1631            0 :             if let Some(t) = child_shard {
    1632              :                 // Wait for the child shard to become active: this should be very quick because it only
    1633              :                 // has to download the index_part that we just uploaded when creating it.
    1634            0 :                 if let Err(e) = t.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await {
    1635              :                     // This is not fatal: we have durably created the child shard.  It just makes the
    1636              :                     // split operation less seamless for clients, as we will may detach the parent
    1637              :                     // shard before the child shards are fully ready to serve requests.
    1638            0 :                     tracing::warn!("Failed to wait for shard {child_shard_id} to activate: {e}");
    1639            0 :                     continue;
    1640            0 :                 }
    1641            0 : 
    1642            0 :                 let timelines = t.timelines.lock().unwrap().clone();
    1643            0 :                 for timeline in timelines.values() {
    1644            0 :                     let Some(target_lsn) = target_lsns.get(&timeline.timeline_id) else {
    1645            0 :                         continue;
    1646              :                     };
    1647              : 
    1648            0 :                     tracing::info!(
    1649            0 :                         "Waiting for child shard {}/{} to reach target lsn {}...",
    1650            0 :                         child_shard_id,
    1651            0 :                         timeline.timeline_id,
    1652              :                         target_lsn
    1653              :                     );
    1654              : 
    1655            0 :                     fail::fail_point!("shard-split-lsn-wait", |_| Err(anyhow::anyhow!(
    1656            0 :                         "failpoint"
    1657            0 :                     )));
    1658            0 :                     if let Err(e) = timeline
    1659            0 :                         .wait_lsn(
    1660            0 :                             *target_lsn,
    1661            0 :                             crate::tenant::timeline::WaitLsnWaiter::Tenant,
    1662            0 :                             crate::tenant::timeline::WaitLsnTimeout::Default,
    1663            0 :                             ctx,
    1664            0 :                         )
    1665            0 :                         .await
    1666              :                     {
    1667              :                         // Failure here might mean shutdown, in any case this part is an optimization
    1668              :                         // and we shouldn't hold up the split operation.
    1669            0 :                         tracing::warn!(
    1670            0 :                             "Failed to wait for timeline {} to reach lsn {target_lsn}: {e}",
    1671            0 :                             timeline.timeline_id
    1672              :                         );
    1673              :                     } else {
    1674            0 :                         tracing::info!(
    1675            0 :                             "Child shard {}/{} reached target lsn {}",
    1676            0 :                             child_shard_id,
    1677            0 :                             timeline.timeline_id,
    1678              :                             target_lsn
    1679              :                         );
    1680              :                     }
    1681              :                 }
    1682            0 :             }
    1683              :         }
    1684              : 
    1685              :         // Phase 5: Shut down the parent shard, and erase it from disk
    1686            0 :         let (_guard, progress) = completion::channel();
    1687            0 :         match parent.shutdown(progress, ShutdownMode::Hard).await {
    1688            0 :             Ok(()) => {}
    1689            0 :             Err(other) => {
    1690            0 :                 other.wait().await;
    1691              :             }
    1692              :         }
    1693            0 :         let local_tenant_directory = self.conf.tenant_path(&tenant_shard_id);
    1694            0 :         let tmp_path = safe_rename_tenant_dir(&local_tenant_directory)
    1695            0 :             .await
    1696            0 :             .with_context(|| format!("local tenant directory {local_tenant_directory:?} rename"))?;
    1697            0 :         self.background_purges.spawn(tmp_path);
    1698            0 : 
    1699            0 :         fail::fail_point!("shard-split-pre-finish", |_| Err(anyhow::anyhow!(
    1700            0 :             "failpoint"
    1701            0 :         )));
    1702              : 
    1703            0 :         parent_slot_guard.drop_old_value()?;
    1704              : 
    1705              :         // Phase 6: Release the InProgress on the parent shard
    1706            0 :         drop(parent_slot_guard);
    1707            0 : 
    1708            0 :         Ok(child_shards)
    1709            0 :     }
    1710              : 
    1711              :     /// Part of [`Self::shard_split`]: hard link parent shard layers into child shards, as an optimization
    1712              :     /// to avoid the children downloading them again.
    1713              :     ///
    1714              :     /// For each resident layer in the parent shard, we will hard link it into all of the child shards.
    1715            0 :     async fn shard_split_hardlink(
    1716            0 :         &self,
    1717            0 :         parent_shard: &TenantShard,
    1718            0 :         child_shards: Vec<TenantShardId>,
    1719            0 :     ) -> anyhow::Result<()> {
    1720            0 :         debug_assert_current_span_has_tenant_id();
    1721            0 : 
    1722            0 :         let parent_path = self.conf.tenant_path(parent_shard.get_tenant_shard_id());
    1723            0 :         let (parent_timelines, parent_layers) = {
    1724            0 :             let mut parent_layers = Vec::new();
    1725            0 :             let timelines = parent_shard.timelines.lock().unwrap().clone();
    1726            0 :             let parent_timelines = timelines.keys().cloned().collect::<Vec<_>>();
    1727            0 :             for timeline in timelines.values() {
    1728            0 :                 tracing::info!(timeline_id=%timeline.timeline_id, "Loading list of layers to hardlink");
    1729            0 :                 let layers = timeline.layers.read().await;
    1730              : 
    1731            0 :                 for layer in layers.likely_resident_layers() {
    1732            0 :                     let relative_path = layer
    1733            0 :                         .local_path()
    1734            0 :                         .strip_prefix(&parent_path)
    1735            0 :                         .context("Removing prefix from parent layer path")?;
    1736            0 :                     parent_layers.push(relative_path.to_owned());
    1737              :                 }
    1738              :             }
    1739              : 
    1740            0 :             if parent_layers.is_empty() {
    1741            0 :                 tracing::info!("Ancestor shard has no resident layer to hard link");
    1742            0 :             }
    1743              : 
    1744            0 :             (parent_timelines, parent_layers)
    1745            0 :         };
    1746            0 : 
    1747            0 :         let mut child_prefixes = Vec::new();
    1748            0 :         let mut create_dirs = Vec::new();
    1749              : 
    1750            0 :         for child in child_shards {
    1751            0 :             let child_prefix = self.conf.tenant_path(&child);
    1752            0 :             create_dirs.push(child_prefix.clone());
    1753            0 :             create_dirs.extend(
    1754            0 :                 parent_timelines
    1755            0 :                     .iter()
    1756            0 :                     .map(|t| self.conf.timeline_path(&child, t)),
    1757            0 :             );
    1758            0 : 
    1759            0 :             child_prefixes.push(child_prefix);
    1760            0 :         }
    1761              : 
    1762              :         // Since we will do a large number of small filesystem metadata operations, batch them into
    1763              :         // spawn_blocking calls rather than doing each one as a tokio::fs round-trip.
    1764            0 :         let span = tracing::Span::current();
    1765            0 :         let jh = tokio::task::spawn_blocking(move || -> anyhow::Result<usize> {
    1766            0 :             // Run this synchronous code in the same log context as the outer function that spawned it.
    1767            0 :             let _span = span.enter();
    1768            0 : 
    1769            0 :             tracing::info!("Creating {} directories", create_dirs.len());
    1770            0 :             for dir in &create_dirs {
    1771            0 :                 if let Err(e) = std::fs::create_dir_all(dir) {
    1772              :                     // Ignore AlreadyExists errors, drop out on all other errors
    1773            0 :                     match e.kind() {
    1774            0 :                         std::io::ErrorKind::AlreadyExists => {}
    1775              :                         _ => {
    1776            0 :                             return Err(anyhow::anyhow!(e).context(format!("Creating {dir}")));
    1777              :                         }
    1778              :                     }
    1779            0 :                 }
    1780              :             }
    1781              : 
    1782            0 :             for child_prefix in child_prefixes {
    1783            0 :                 tracing::info!(
    1784            0 :                     "Hard-linking {} parent layers into child path {}",
    1785            0 :                     parent_layers.len(),
    1786              :                     child_prefix
    1787              :                 );
    1788            0 :                 for relative_layer in &parent_layers {
    1789            0 :                     let parent_path = parent_path.join(relative_layer);
    1790            0 :                     let child_path = child_prefix.join(relative_layer);
    1791            0 :                     if let Err(e) = std::fs::hard_link(&parent_path, &child_path) {
    1792            0 :                         match e.kind() {
    1793            0 :                             std::io::ErrorKind::AlreadyExists => {}
    1794              :                             std::io::ErrorKind::NotFound => {
    1795            0 :                                 tracing::info!(
    1796            0 :                                     "Layer {} not found during hard-linking, evicted during split?",
    1797              :                                     relative_layer
    1798              :                                 );
    1799              :                             }
    1800              :                             _ => {
    1801            0 :                                 return Err(anyhow::anyhow!(e).context(format!(
    1802            0 :                                     "Hard linking {relative_layer} into {child_prefix}"
    1803            0 :                                 )));
    1804              :                             }
    1805              :                         }
    1806            0 :                     }
    1807              :                 }
    1808              :             }
    1809              : 
    1810              :             // Durability is not required for correctness, but if we crashed during split and
    1811              :             // then came restarted with empty timeline dirs, it would be very inefficient to
    1812              :             // re-populate from remote storage.
    1813            0 :             tracing::info!("fsyncing {} directories", create_dirs.len());
    1814            0 :             for dir in create_dirs {
    1815            0 :                 if let Err(e) = crashsafe::fsync(&dir) {
    1816              :                     // Something removed a newly created timeline dir out from underneath us?  Extremely
    1817              :                     // unexpected, but not worth panic'ing over as this whole function is just an
    1818              :                     // optimization.
    1819            0 :                     tracing::warn!("Failed to fsync directory {dir}: {e}")
    1820            0 :                 }
    1821              :             }
    1822              : 
    1823            0 :             Ok(parent_layers.len())
    1824            0 :         });
    1825            0 : 
    1826            0 :         match jh.await {
    1827            0 :             Ok(Ok(layer_count)) => {
    1828            0 :                 tracing::info!(count = layer_count, "Hard linked layers into child shards");
    1829              :             }
    1830            0 :             Ok(Err(e)) => {
    1831            0 :                 // This is an optimization, so we tolerate failure.
    1832            0 :                 tracing::warn!("Error hard-linking layers, proceeding anyway: {e}")
    1833              :             }
    1834            0 :             Err(e) => {
    1835            0 :                 // This is something totally unexpected like a panic, so bail out.
    1836            0 :                 anyhow::bail!("Error joining hard linking task: {e}");
    1837              :             }
    1838              :         }
    1839              : 
    1840            0 :         Ok(())
    1841            0 :     }
    1842              : 
    1843              :     ///
    1844              :     /// Shut down all tenants. This runs as part of pageserver shutdown.
    1845              :     ///
    1846              :     /// NB: We leave the tenants in the map, so that they remain accessible through
    1847              :     /// the management API until we shut it down. If we removed the shut-down tenants
    1848              :     /// from the tenants map, the management API would return 404 for these tenants,
    1849              :     /// because TenantsMap::get() now returns `None`.
    1850              :     /// That could be easily misinterpreted by control plane, the consumer of the
    1851              :     /// management API. For example, it could attach the tenant on a different pageserver.
    1852              :     /// We would then be in split-brain once this pageserver restarts.
    1853              :     #[instrument(skip_all)]
    1854              :     pub(crate) async fn shutdown(&self) {
    1855              :         self.cancel.cancel();
    1856              : 
    1857              :         shutdown_all_tenants0(self.tenants).await
    1858              :     }
    1859              : 
    1860            0 :     pub(crate) async fn detach_tenant(
    1861            0 :         &self,
    1862            0 :         conf: &'static PageServerConf,
    1863            0 :         tenant_shard_id: TenantShardId,
    1864            0 :         deletion_queue_client: &DeletionQueueClient,
    1865            0 :     ) -> Result<(), TenantStateError> {
    1866            0 :         let tmp_path = self
    1867            0 :             .detach_tenant0(conf, tenant_shard_id, deletion_queue_client)
    1868            0 :             .await?;
    1869            0 :         self.background_purges.spawn(tmp_path);
    1870            0 : 
    1871            0 :         Ok(())
    1872            0 :     }
    1873              : 
    1874            0 :     async fn detach_tenant0(
    1875            0 :         &self,
    1876            0 :         conf: &'static PageServerConf,
    1877            0 :         tenant_shard_id: TenantShardId,
    1878            0 :         deletion_queue_client: &DeletionQueueClient,
    1879            0 :     ) -> Result<Utf8PathBuf, TenantStateError> {
    1880            0 :         let tenant_dir_rename_operation = |tenant_id_to_clean: TenantShardId| async move {
    1881            0 :             let local_tenant_directory = conf.tenant_path(&tenant_id_to_clean);
    1882            0 :             safe_rename_tenant_dir(&local_tenant_directory)
    1883            0 :                 .await
    1884            0 :                 .with_context(|| {
    1885            0 :                     format!("local tenant directory {local_tenant_directory:?} rename")
    1886            0 :                 })
    1887            0 :         };
    1888              : 
    1889            0 :         let removal_result = remove_tenant_from_memory(
    1890            0 :             self.tenants,
    1891            0 :             tenant_shard_id,
    1892            0 :             tenant_dir_rename_operation(tenant_shard_id),
    1893            0 :         )
    1894            0 :         .await;
    1895              : 
    1896              :         // Flush pending deletions, so that they have a good chance of passing validation
    1897              :         // before this tenant is potentially re-attached elsewhere.
    1898            0 :         deletion_queue_client.flush_advisory();
    1899            0 : 
    1900            0 :         removal_result
    1901            0 :     }
    1902              : 
    1903            0 :     pub(crate) fn list_tenants(
    1904            0 :         &self,
    1905            0 :     ) -> Result<Vec<(TenantShardId, TenantState, Generation)>, TenantMapListError> {
    1906            0 :         let tenants = self.tenants.read().unwrap();
    1907            0 :         let m = match &*tenants {
    1908            0 :             TenantsMap::Initializing => return Err(TenantMapListError::Initializing),
    1909            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => m,
    1910            0 :         };
    1911            0 :         Ok(m.iter()
    1912            0 :             .filter_map(|(id, tenant)| match tenant {
    1913            0 :                 TenantSlot::Attached(tenant) => {
    1914            0 :                     Some((*id, tenant.current_state(), tenant.generation()))
    1915              :                 }
    1916            0 :                 TenantSlot::Secondary(_) => None,
    1917            0 :                 TenantSlot::InProgress(_) => None,
    1918            0 :             })
    1919            0 :             .collect())
    1920            0 :     }
    1921              : 
    1922              :     /// Completes an earlier prepared timeline detach ancestor.
    1923            0 :     pub(crate) async fn complete_detaching_timeline_ancestor(
    1924            0 :         &self,
    1925            0 :         tenant_shard_id: TenantShardId,
    1926            0 :         timeline_id: TimelineId,
    1927            0 :         prepared: PreparedTimelineDetach,
    1928            0 :         behavior: DetachBehavior,
    1929            0 :         mut attempt: detach_ancestor::Attempt,
    1930            0 :         ctx: &RequestContext,
    1931            0 :     ) -> Result<HashSet<TimelineId>, detach_ancestor::Error> {
    1932              :         use detach_ancestor::Error;
    1933              : 
    1934            0 :         let slot_guard =
    1935            0 :             tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustExist).map_err(
    1936            0 :                 |e| {
    1937              :                     use TenantSlotError::*;
    1938              : 
    1939            0 :                     match e {
    1940            0 :                         MapState(TenantMapError::ShuttingDown) => Error::ShuttingDown,
    1941            0 :                         NotFound(_) | InProgress | MapState(_) => Error::DetachReparent(e.into()),
    1942              :                     }
    1943            0 :                 },
    1944            0 :             )?;
    1945              : 
    1946            0 :         let tenant = {
    1947            0 :             let old_slot = slot_guard
    1948            0 :                 .get_old_value()
    1949            0 :                 .as_ref()
    1950            0 :                 .expect("requested MustExist");
    1951              : 
    1952            0 :             let Some(tenant) = old_slot.get_attached() else {
    1953            0 :                 return Err(Error::DetachReparent(anyhow::anyhow!(
    1954            0 :                     "Tenant is not in attached state"
    1955            0 :                 )));
    1956              :             };
    1957              : 
    1958            0 :             if !tenant.is_active() {
    1959            0 :                 return Err(Error::DetachReparent(anyhow::anyhow!(
    1960            0 :                     "Tenant is not active"
    1961            0 :                 )));
    1962            0 :             }
    1963            0 : 
    1964            0 :             tenant.clone()
    1965              :         };
    1966              : 
    1967            0 :         let timeline = tenant
    1968            0 :             .get_timeline(timeline_id, true)
    1969            0 :             .map_err(Error::NotFound)?;
    1970              : 
    1971            0 :         let resp = timeline
    1972            0 :             .detach_from_ancestor_and_reparent(
    1973            0 :                 &tenant,
    1974            0 :                 prepared,
    1975            0 :                 attempt.ancestor_timeline_id,
    1976            0 :                 attempt.ancestor_lsn,
    1977            0 :                 behavior,
    1978            0 :                 ctx,
    1979            0 :             )
    1980            0 :             .await?;
    1981              : 
    1982            0 :         let mut slot_guard = slot_guard;
    1983              : 
    1984            0 :         let tenant = if resp.reset_tenant_required() {
    1985            0 :             attempt.before_reset_tenant();
    1986            0 : 
    1987            0 :             let (_guard, progress) = utils::completion::channel();
    1988            0 :             match tenant.shutdown(progress, ShutdownMode::Reload).await {
    1989            0 :                 Ok(()) => {
    1990            0 :                     slot_guard.drop_old_value().expect("it was just shutdown");
    1991            0 :                 }
    1992            0 :                 Err(_barrier) => {
    1993            0 :                     slot_guard.revert();
    1994            0 :                     // this really should not happen, at all, unless a shutdown without acquiring
    1995            0 :                     // tenant slot was already going? regardless, on restart the attempt tracking
    1996            0 :                     // will reset to retryable.
    1997            0 :                     return Err(Error::ShuttingDown);
    1998              :                 }
    1999              :             }
    2000              : 
    2001            0 :             let tenant_path = self.conf.tenant_path(&tenant_shard_id);
    2002            0 :             let config = TenantShard::load_tenant_config(self.conf, &tenant_shard_id)
    2003            0 :                 .map_err(|e| Error::DetachReparent(e.into()))?;
    2004              : 
    2005            0 :             let shard_identity = config.shard;
    2006            0 :             let tenant = tenant_spawn(
    2007            0 :                 self.conf,
    2008            0 :                 tenant_shard_id,
    2009            0 :                 &tenant_path,
    2010            0 :                 self.resources.clone(),
    2011            0 :                 AttachedTenantConf::try_from(config).map_err(Error::DetachReparent)?,
    2012            0 :                 shard_identity,
    2013            0 :                 None,
    2014            0 :                 SpawnMode::Eager,
    2015            0 :                 ctx,
    2016            0 :             )
    2017            0 :             .map_err(|_| Error::ShuttingDown)?;
    2018              : 
    2019              :             {
    2020            0 :                 let mut g = tenant.ongoing_timeline_detach.lock().unwrap();
    2021            0 :                 assert!(
    2022            0 :                     g.is_none(),
    2023            0 :                     "there cannot be any new timeline detach ancestor on newly created tenant"
    2024              :                 );
    2025            0 :                 *g = Some((attempt.timeline_id, attempt.new_barrier()));
    2026            0 :             }
    2027            0 : 
    2028            0 :             // if we bail out here, we will not allow a new attempt, which should be fine.
    2029            0 :             // pageserver should be shutting down regardless? tenant_reset would help, unless it
    2030            0 :             // runs into the same problem.
    2031            0 :             slot_guard
    2032            0 :                 .upsert(TenantSlot::Attached(tenant.clone()))
    2033            0 :                 .map_err(|e| match e {
    2034            0 :                     TenantSlotUpsertError::ShuttingDown(_) => Error::ShuttingDown,
    2035            0 :                     other => Error::DetachReparent(other.into()),
    2036            0 :                 })?;
    2037            0 :             tenant
    2038              :         } else {
    2039            0 :             tracing::info!("skipping tenant_reset as no changes made required it");
    2040            0 :             tenant
    2041              :         };
    2042              : 
    2043            0 :         if let Some(reparented) = resp.completed() {
    2044              :             // finally ask the restarted tenant to complete the detach
    2045              :             //
    2046              :             // rationale for 9999s: we don't really have a timetable here; if retried, the caller
    2047              :             // will get an 503.
    2048            0 :             tenant
    2049            0 :                 .wait_to_become_active(std::time::Duration::from_secs(9999))
    2050            0 :                 .await
    2051            0 :                 .map_err(|e| {
    2052              :                     use GetActiveTenantError::{Cancelled, WillNotBecomeActive};
    2053              :                     use pageserver_api::models::TenantState;
    2054            0 :                     match e {
    2055              :                         Cancelled | WillNotBecomeActive(TenantState::Stopping { .. }) => {
    2056            0 :                             Error::ShuttingDown
    2057              :                         }
    2058            0 :                         other => Error::Complete(other.into()),
    2059              :                     }
    2060            0 :                 })?;
    2061              : 
    2062            0 :             utils::pausable_failpoint!(
    2063            0 :                 "timeline-detach-ancestor::after_activating_before_finding-pausable"
    2064            0 :             );
    2065              : 
    2066            0 :             let timeline = tenant
    2067            0 :                 .get_timeline(attempt.timeline_id, true)
    2068            0 :                 .map_err(Error::NotFound)?;
    2069              : 
    2070            0 :             timeline
    2071            0 :                 .complete_detaching_timeline_ancestor(&tenant, attempt, ctx)
    2072            0 :                 .await
    2073            0 :                 .map(|()| reparented)
    2074              :         } else {
    2075              :             // at least the latest versions have now been downloaded and refreshed; be ready to
    2076              :             // retry another time.
    2077            0 :             Err(Error::FailedToReparentAll)
    2078              :         }
    2079            0 :     }
    2080              : 
    2081              :     /// A page service client sends a TenantId, and to look up the correct Tenant we must
    2082              :     /// resolve this to a fully qualified TenantShardId.
    2083              :     ///
    2084              :     /// During shard splits: we shall see parent shards in InProgress state and skip them, and
    2085              :     /// instead match on child shards which should appear in Attached state.  Very early in a shard
    2086              :     /// split, or in other cases where a shard is InProgress, we will return our own InProgress result
    2087              :     /// to instruct the caller to wait for that to finish before querying again.
    2088            0 :     pub(crate) fn resolve_attached_shard(
    2089            0 :         &self,
    2090            0 :         tenant_id: &TenantId,
    2091            0 :         selector: ShardSelector,
    2092            0 :     ) -> ShardResolveResult {
    2093            0 :         let tenants = self.tenants.read().unwrap();
    2094            0 :         let mut want_shard = None;
    2095            0 :         let mut any_in_progress = None;
    2096            0 : 
    2097            0 :         match &*tenants {
    2098            0 :             TenantsMap::Initializing => ShardResolveResult::NotFound,
    2099            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => {
    2100            0 :                 for slot in m.range(TenantShardId::tenant_range(*tenant_id)) {
    2101              :                     // Ignore all slots that don't contain an attached tenant
    2102            0 :                     let tenant = match &slot.1 {
    2103            0 :                         TenantSlot::Attached(t) => t,
    2104            0 :                         TenantSlot::InProgress(barrier) => {
    2105            0 :                             // We might still find a usable shard, but in case we don't, remember that
    2106            0 :                             // we saw at least one InProgress slot, so that we can distinguish this case
    2107            0 :                             // from a simple NotFound in our return value.
    2108            0 :                             any_in_progress = Some(barrier.clone());
    2109            0 :                             continue;
    2110              :                         }
    2111            0 :                         _ => continue,
    2112              :                     };
    2113              : 
    2114            0 :                     match selector {
    2115            0 :                         ShardSelector::Zero if slot.0.shard_number == ShardNumber(0) => {
    2116            0 :                             return ShardResolveResult::Found(tenant.clone());
    2117              :                         }
    2118            0 :                         ShardSelector::Page(key) => {
    2119            0 :                             // First slot we see for this tenant, calculate the expected shard number
    2120            0 :                             // for the key: we will use this for checking if this and subsequent
    2121            0 :                             // slots contain the key, rather than recalculating the hash each time.
    2122            0 :                             if want_shard.is_none() {
    2123            0 :                                 want_shard = Some(tenant.shard_identity.get_shard_number(&key));
    2124            0 :                             }
    2125              : 
    2126            0 :                             if Some(tenant.shard_identity.number) == want_shard {
    2127            0 :                                 return ShardResolveResult::Found(tenant.clone());
    2128            0 :                             }
    2129              :                         }
    2130            0 :                         ShardSelector::Known(shard)
    2131            0 :                             if tenant.shard_identity.shard_index() == shard =>
    2132            0 :                         {
    2133            0 :                             return ShardResolveResult::Found(tenant.clone());
    2134              :                         }
    2135            0 :                         _ => continue,
    2136              :                     }
    2137              :                 }
    2138              : 
    2139              :                 // Fall through: we didn't find a slot that was in Attached state & matched our selector.  If
    2140              :                 // we found one or more InProgress slot, indicate to caller that they should retry later.  Otherwise
    2141              :                 // this requested shard simply isn't found.
    2142            0 :                 if let Some(barrier) = any_in_progress {
    2143            0 :                     ShardResolveResult::InProgress(barrier)
    2144              :                 } else {
    2145            0 :                     ShardResolveResult::NotFound
    2146              :                 }
    2147              :             }
    2148              :         }
    2149            0 :     }
    2150              : 
    2151              :     /// Calculate the tenant shards' contributions to this pageserver's utilization metrics.  The
    2152              :     /// returned values are:
    2153              :     ///  - the number of bytes of local disk space this pageserver's shards are requesting, i.e.
    2154              :     ///    how much space they would use if not impacted by disk usage eviction.
    2155              :     ///  - the number of tenant shards currently on this pageserver, including attached
    2156              :     ///    and secondary.
    2157              :     ///
    2158              :     /// This function is quite expensive: callers are expected to cache the result and
    2159              :     /// limit how often they call it.
    2160            0 :     pub(crate) fn calculate_utilization(&self) -> Result<(u64, u32), TenantMapListError> {
    2161            0 :         let tenants = self.tenants.read().unwrap();
    2162            0 :         let m = match &*tenants {
    2163            0 :             TenantsMap::Initializing => return Err(TenantMapListError::Initializing),
    2164            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => m,
    2165            0 :         };
    2166            0 :         let shard_count = m.len();
    2167            0 :         let mut wanted_bytes = 0;
    2168              : 
    2169            0 :         for tenant_slot in m.values() {
    2170            0 :             match tenant_slot {
    2171            0 :                 TenantSlot::InProgress(_barrier) => {
    2172            0 :                     // While a slot is being changed, we can't know how much storage it wants.  This
    2173            0 :                     // means this function's output can fluctuate if a lot of changes are going on
    2174            0 :                     // (such as transitions from secondary to attached).
    2175            0 :                     //
    2176            0 :                     // We could wait for the barrier and retry, but it's important that the utilization
    2177            0 :                     // API is responsive, and the data quality impact is not very significant.
    2178            0 :                     continue;
    2179              :                 }
    2180            0 :                 TenantSlot::Attached(tenant) => {
    2181            0 :                     wanted_bytes += tenant.local_storage_wanted();
    2182            0 :                 }
    2183            0 :                 TenantSlot::Secondary(secondary) => {
    2184            0 :                     let progress = secondary.progress.lock().unwrap();
    2185            0 :                     wanted_bytes += if progress.heatmap_mtime.is_some() {
    2186              :                         // If we have heatmap info, then we will 'want' the sum
    2187              :                         // of the size of layers in the heatmap: this is how much space
    2188              :                         // we would use if not doing any eviction.
    2189            0 :                         progress.bytes_total
    2190              :                     } else {
    2191              :                         // In the absence of heatmap info, assume that the secondary location simply
    2192              :                         // needs as much space as it is currently using.
    2193            0 :                         secondary.resident_size_metric.get()
    2194              :                     }
    2195              :                 }
    2196              :             }
    2197              :         }
    2198              : 
    2199            0 :         Ok((wanted_bytes, shard_count as u32))
    2200            0 :     }
    2201              : 
    2202              :     #[instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id))]
    2203              :     pub(crate) async fn immediate_gc(
    2204              :         &self,
    2205              :         tenant_shard_id: TenantShardId,
    2206              :         timeline_id: TimelineId,
    2207              :         gc_req: TimelineGcRequest,
    2208              :         cancel: CancellationToken,
    2209              :         ctx: &RequestContext,
    2210              :     ) -> Result<GcResult, ApiError> {
    2211              :         let tenant = {
    2212              :             let guard = self.tenants.read().unwrap();
    2213              :             guard
    2214              :                 .get(&tenant_shard_id)
    2215              :                 .cloned()
    2216            0 :                 .with_context(|| format!("tenant {tenant_shard_id}"))
    2217            0 :                 .map_err(|e| ApiError::NotFound(e.into()))?
    2218              :         };
    2219              : 
    2220            0 :         let gc_horizon = gc_req.gc_horizon.unwrap_or_else(|| tenant.get_gc_horizon());
    2221              :         // Use tenant's pitr setting
    2222              :         let pitr = tenant.get_pitr_interval();
    2223              : 
    2224              :         tenant.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await?;
    2225              : 
    2226              :         // Run in task_mgr to avoid race with tenant_detach operation
    2227              :         let ctx: RequestContext =
    2228              :             ctx.detached_child(TaskKind::GarbageCollector, DownloadBehavior::Download);
    2229              : 
    2230            0 :         let _gate_guard = tenant.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    2231              : 
    2232              :         fail::fail_point!("immediate_gc_task_pre");
    2233              : 
    2234              :         #[allow(unused_mut)]
    2235              :         let mut result = tenant
    2236              :             .gc_iteration(Some(timeline_id), gc_horizon, pitr, &cancel, &ctx)
    2237              :             .await;
    2238              :         // FIXME: `gc_iteration` can return an error for multiple reasons; we should handle it
    2239              :         // better once the types support it.
    2240              : 
    2241              :         #[cfg(feature = "testing")]
    2242              :         {
    2243              :             // we need to synchronize with drop completion for python tests without polling for
    2244              :             // log messages
    2245              :             if let Ok(result) = result.as_mut() {
    2246              :                 let mut js = tokio::task::JoinSet::new();
    2247              :                 for layer in std::mem::take(&mut result.doomed_layers) {
    2248              :                     js.spawn(layer.wait_drop());
    2249              :                 }
    2250              :                 tracing::info!(
    2251              :                     total = js.len(),
    2252              :                     "starting to wait for the gc'd layers to be dropped"
    2253              :                 );
    2254              :                 while let Some(res) = js.join_next().await {
    2255              :                     res.expect("wait_drop should not panic");
    2256              :                 }
    2257              :             }
    2258              : 
    2259              :             let timeline = tenant.get_timeline(timeline_id, false).ok();
    2260            0 :             let rtc = timeline.as_ref().map(|x| &x.remote_client);
    2261              : 
    2262              :             if let Some(rtc) = rtc {
    2263              :                 // layer drops schedule actions on remote timeline client to actually do the
    2264              :                 // deletions; don't care about the shutdown error, just exit fast
    2265              :                 drop(rtc.wait_completion().await);
    2266              :             }
    2267              :         }
    2268              : 
    2269            0 :         result.map_err(|e| match e {
    2270            0 :             GcError::TenantCancelled | GcError::TimelineCancelled => ApiError::ShuttingDown,
    2271              :             GcError::TimelineNotFound => {
    2272            0 :                 ApiError::NotFound(anyhow::anyhow!("Timeline not found").into())
    2273              :             }
    2274            0 :             other => ApiError::InternalServerError(anyhow::anyhow!(other)),
    2275            0 :         })
    2276              :     }
    2277              : }
    2278              : 
    2279              : #[derive(Debug, thiserror::Error)]
    2280              : pub(crate) enum GetTenantError {
    2281              :     /// NotFound is a TenantId rather than TenantShardId, because this error type is used from
    2282              :     /// getters that use a TenantId and a ShardSelector, not just getters that target a specific shard.
    2283              :     #[error("Tenant {0} not found")]
    2284              :     NotFound(TenantId),
    2285              : 
    2286              :     #[error("Tenant {0} not found")]
    2287              :     ShardNotFound(TenantShardId),
    2288              : 
    2289              :     #[error("Tenant {0} is not active")]
    2290              :     NotActive(TenantShardId),
    2291              : 
    2292              :     // Initializing or shutting down: cannot authoritatively say whether we have this tenant
    2293              :     #[error("Tenant map is not available: {0}")]
    2294              :     MapState(#[from] TenantMapError),
    2295              : }
    2296              : 
    2297              : #[derive(thiserror::Error, Debug)]
    2298              : pub(crate) enum GetActiveTenantError {
    2299              :     /// We may time out either while TenantSlot is InProgress, or while the Tenant
    2300              :     /// is in a non-Active state
    2301              :     #[error(
    2302              :         "Timed out waiting {wait_time:?} for tenant active state. Latest state: {latest_state:?}"
    2303              :     )]
    2304              :     WaitForActiveTimeout {
    2305              :         latest_state: Option<TenantState>,
    2306              :         wait_time: Duration,
    2307              :     },
    2308              : 
    2309              :     /// The TenantSlot is absent, or in secondary mode
    2310              :     #[error(transparent)]
    2311              :     NotFound(#[from] GetTenantError),
    2312              : 
    2313              :     /// Cancellation token fired while we were waiting
    2314              :     #[error("cancelled")]
    2315              :     Cancelled,
    2316              : 
    2317              :     /// Tenant exists, but is in a state that cannot become active (e.g. Stopping, Broken)
    2318              :     #[error("will not become active.  Current state: {0}")]
    2319              :     WillNotBecomeActive(TenantState),
    2320              : 
    2321              :     /// Broken is logically a subset of WillNotBecomeActive, but a distinct error is useful as
    2322              :     /// WillNotBecomeActive is a permitted error under some circumstances, whereas broken should
    2323              :     /// never happen.
    2324              :     #[error("Tenant is broken: {0}")]
    2325              :     Broken(String),
    2326              : 
    2327              :     #[error("reconnect to switch tenant id")]
    2328              :     SwitchedTenant,
    2329              : }
    2330              : 
    2331              : #[derive(Debug, thiserror::Error)]
    2332              : pub(crate) enum DeleteTimelineError {
    2333              :     #[error("Tenant {0}")]
    2334              :     Tenant(#[from] GetTenantError),
    2335              : 
    2336              :     #[error("Timeline {0}")]
    2337              :     Timeline(#[from] crate::tenant::DeleteTimelineError),
    2338              : }
    2339              : 
    2340              : #[derive(Debug, thiserror::Error)]
    2341              : pub(crate) enum TenantStateError {
    2342              :     #[error("Tenant {0} is stopping")]
    2343              :     IsStopping(TenantShardId),
    2344              :     #[error(transparent)]
    2345              :     SlotError(#[from] TenantSlotError),
    2346              :     #[error(transparent)]
    2347              :     SlotUpsertError(#[from] TenantSlotUpsertError),
    2348              :     #[error(transparent)]
    2349              :     Other(#[from] anyhow::Error),
    2350              : }
    2351              : 
    2352              : #[derive(Debug, thiserror::Error)]
    2353              : pub(crate) enum TenantMapListError {
    2354              :     #[error("tenant map is still initiailizing")]
    2355              :     Initializing,
    2356              : }
    2357              : 
    2358              : #[derive(Debug, thiserror::Error)]
    2359              : pub(crate) enum TenantMapInsertError {
    2360              :     #[error(transparent)]
    2361              :     SlotError(#[from] TenantSlotError),
    2362              :     #[error(transparent)]
    2363              :     SlotUpsertError(#[from] TenantSlotUpsertError),
    2364              :     #[error(transparent)]
    2365              :     Other(#[from] anyhow::Error),
    2366              : }
    2367              : 
    2368              : /// Superset of TenantMapError: issues that can occur when acquiring a slot
    2369              : /// for a particular tenant ID.
    2370              : #[derive(Debug, thiserror::Error)]
    2371              : pub(crate) enum TenantSlotError {
    2372              :     /// When acquiring a slot with the expectation that the tenant already exists.
    2373              :     #[error("Tenant {0} not found")]
    2374              :     NotFound(TenantShardId),
    2375              : 
    2376              :     // Tried to read a slot that is currently being mutated by another administrative
    2377              :     // operation.
    2378              :     #[error("tenant has a state change in progress, try again later")]
    2379              :     InProgress,
    2380              : 
    2381              :     #[error(transparent)]
    2382              :     MapState(#[from] TenantMapError),
    2383              : }
    2384              : 
    2385              : /// Superset of TenantMapError: issues that can occur when using a SlotGuard
    2386              : /// to insert a new value.
    2387              : #[derive(thiserror::Error)]
    2388              : pub(crate) enum TenantSlotUpsertError {
    2389              :     /// An error where the slot is in an unexpected state, indicating a code bug
    2390              :     #[error("Internal error updating Tenant")]
    2391              :     InternalError(Cow<'static, str>),
    2392              : 
    2393              :     #[error(transparent)]
    2394              :     MapState(TenantMapError),
    2395              : 
    2396              :     // If we encounter TenantManager shutdown during upsert, we must carry the Completion
    2397              :     // from the SlotGuard, so that the caller can hold it while they clean up: otherwise
    2398              :     // TenantManager shutdown might race ahead before we're done cleaning up any Tenant that
    2399              :     // was protected by the SlotGuard.
    2400              :     #[error("Shutting down")]
    2401              :     ShuttingDown((TenantSlot, utils::completion::Completion)),
    2402              : }
    2403              : 
    2404              : impl std::fmt::Debug for TenantSlotUpsertError {
    2405            0 :     fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
    2406            0 :         match self {
    2407            0 :             Self::InternalError(reason) => write!(f, "Internal Error {reason}"),
    2408            0 :             Self::MapState(map_error) => write!(f, "Tenant map state: {map_error:?}"),
    2409            0 :             Self::ShuttingDown(_completion) => write!(f, "Tenant map shutting down"),
    2410              :         }
    2411            0 :     }
    2412              : }
    2413              : 
    2414              : #[derive(Debug, thiserror::Error)]
    2415              : enum TenantSlotDropError {
    2416              :     /// It is only legal to drop a TenantSlot if its contents are fully shut down
    2417              :     #[error("Tenant was not shut down")]
    2418              :     NotShutdown,
    2419              : }
    2420              : 
    2421              : /// Errors that can happen any time we are walking the tenant map to try and acquire
    2422              : /// the TenantSlot for a particular tenant.
    2423              : #[derive(Debug, thiserror::Error)]
    2424              : pub(crate) enum TenantMapError {
    2425              :     // Tried to read while initializing
    2426              :     #[error("tenant map is still initializing")]
    2427              :     StillInitializing,
    2428              : 
    2429              :     // Tried to read while shutting down
    2430              :     #[error("tenant map is shutting down")]
    2431              :     ShuttingDown,
    2432              : }
    2433              : 
    2434              : /// Guards a particular tenant_id's content in the TenantsMap.
    2435              : ///
    2436              : /// While this structure exists, the TenantsMap will contain a [`TenantSlot::InProgress`]
    2437              : /// for this tenant, which acts as a marker for any operations targeting
    2438              : /// this tenant to retry later, or wait for the InProgress state to end.
    2439              : ///
    2440              : /// This structure enforces the important invariant that we do not have overlapping
    2441              : /// tasks that will try use local storage for a the same tenant ID: we enforce that
    2442              : /// the previous contents of a slot have been shut down before the slot can be
    2443              : /// left empty or used for something else
    2444              : ///
    2445              : /// Holders of a SlotGuard should explicitly dispose of it, using either `upsert`
    2446              : /// to provide a new value, or `revert` to put the slot back into its initial
    2447              : /// state.  If the SlotGuard is dropped without calling either of these, then
    2448              : /// we will leave the slot empty if our `old_value` is already shut down, else
    2449              : /// we will replace the slot with `old_value` (equivalent to doing a revert).
    2450              : ///
    2451              : /// The `old_value` may be dropped before the SlotGuard is dropped, by calling
    2452              : /// `drop_old_value`.  It is an error to call this without shutting down
    2453              : /// the conents of `old_value`.
    2454              : pub(crate) struct SlotGuard {
    2455              :     tenant_shard_id: TenantShardId,
    2456              :     old_value: Option<TenantSlot>,
    2457              :     upserted: bool,
    2458              : 
    2459              :     /// [`TenantSlot::InProgress`] carries the corresponding Barrier: it will
    2460              :     /// release any waiters as soon as this SlotGuard is dropped.
    2461              :     completion: utils::completion::Completion,
    2462              : }
    2463              : 
    2464              : impl SlotGuard {
    2465           12 :     fn new(
    2466           12 :         tenant_shard_id: TenantShardId,
    2467           12 :         old_value: Option<TenantSlot>,
    2468           12 :         completion: utils::completion::Completion,
    2469           12 :     ) -> Self {
    2470           12 :         Self {
    2471           12 :             tenant_shard_id,
    2472           12 :             old_value,
    2473           12 :             upserted: false,
    2474           12 :             completion,
    2475           12 :         }
    2476           12 :     }
    2477              : 
    2478              :     /// Get any value that was present in the slot before we acquired ownership
    2479              :     /// of it: in state transitions, this will be the old state.
    2480              :     ///
    2481              :     // FIXME: get_ prefix
    2482              :     // FIXME: this should be .as_ref() -- unsure why no clippy
    2483           12 :     fn get_old_value(&self) -> &Option<TenantSlot> {
    2484           12 :         &self.old_value
    2485           12 :     }
    2486              : 
    2487              :     /// Emplace a new value in the slot.  This consumes the guard, and after
    2488              :     /// returning, the slot is no longer protected from concurrent changes.
    2489            0 :     fn upsert(mut self, new_value: TenantSlot) -> Result<(), TenantSlotUpsertError> {
    2490            0 :         if !self.old_value_is_shutdown() {
    2491              :             // This is a bug: callers should never try to drop an old value without
    2492              :             // shutting it down
    2493            0 :             return Err(TenantSlotUpsertError::InternalError(
    2494            0 :                 "Old TenantSlot value not shut down".into(),
    2495            0 :             ));
    2496            0 :         }
    2497              : 
    2498            0 :         let replaced = {
    2499            0 :             let mut locked = TENANTS.write().unwrap();
    2500            0 : 
    2501            0 :             if let TenantSlot::InProgress(_) = new_value {
    2502              :                 // It is never expected to try and upsert InProgress via this path: it should
    2503              :                 // only be written via the tenant_map_acquire_slot path.  If we hit this it's a bug.
    2504            0 :                 return Err(TenantSlotUpsertError::InternalError(
    2505            0 :                     "Attempt to upsert an InProgress state".into(),
    2506            0 :                 ));
    2507            0 :             }
    2508              : 
    2509            0 :             let m = match &mut *locked {
    2510              :                 TenantsMap::Initializing => {
    2511            0 :                     return Err(TenantSlotUpsertError::MapState(
    2512            0 :                         TenantMapError::StillInitializing,
    2513            0 :                     ));
    2514              :                 }
    2515              :                 TenantsMap::ShuttingDown(_) => {
    2516            0 :                     return Err(TenantSlotUpsertError::ShuttingDown((
    2517            0 :                         new_value,
    2518            0 :                         self.completion.clone(),
    2519            0 :                     )));
    2520              :                 }
    2521            0 :                 TenantsMap::Open(m) => m,
    2522            0 :             };
    2523            0 : 
    2524            0 :             METRICS.slot_inserted(&new_value);
    2525            0 : 
    2526            0 :             let replaced = m.insert(self.tenant_shard_id, new_value);
    2527            0 :             self.upserted = true;
    2528            0 :             if let Some(replaced) = replaced.as_ref() {
    2529            0 :                 METRICS.slot_removed(replaced);
    2530            0 :             }
    2531              : 
    2532            0 :             replaced
    2533              :         };
    2534              : 
    2535              :         // Sanity check: on an upsert we should always be replacing an InProgress marker
    2536            0 :         match replaced {
    2537              :             Some(TenantSlot::InProgress(_)) => {
    2538              :                 // Expected case: we find our InProgress in the map: nothing should have
    2539              :                 // replaced it because the code that acquires slots will not grant another
    2540              :                 // one for the same TenantId.
    2541            0 :                 Ok(())
    2542              :             }
    2543              :             None => {
    2544            0 :                 METRICS.unexpected_errors.inc();
    2545            0 :                 error!(
    2546              :                     tenant_shard_id = %self.tenant_shard_id,
    2547            0 :                     "Missing InProgress marker during tenant upsert, this is a bug."
    2548              :                 );
    2549            0 :                 Err(TenantSlotUpsertError::InternalError(
    2550            0 :                     "Missing InProgress marker during tenant upsert".into(),
    2551            0 :                 ))
    2552              :             }
    2553            0 :             Some(slot) => {
    2554            0 :                 METRICS.unexpected_errors.inc();
    2555            0 :                 error!(tenant_shard_id=%self.tenant_shard_id, "Unexpected contents of TenantSlot during upsert, this is a bug.  Contents: {:?}", slot);
    2556            0 :                 Err(TenantSlotUpsertError::InternalError(
    2557            0 :                     "Unexpected contents of TenantSlot".into(),
    2558            0 :                 ))
    2559              :             }
    2560              :         }
    2561            0 :     }
    2562              : 
    2563              :     /// Replace the InProgress slot with whatever was in the guard when we started
    2564            0 :     fn revert(mut self) {
    2565            0 :         if let Some(value) = self.old_value.take() {
    2566            0 :             match self.upsert(value) {
    2567            0 :                 Err(TenantSlotUpsertError::InternalError(_)) => {
    2568            0 :                     // We already logged the error, nothing else we can do.
    2569            0 :                 }
    2570              :                 Err(
    2571              :                     TenantSlotUpsertError::MapState(_) | TenantSlotUpsertError::ShuttingDown(_),
    2572            0 :                 ) => {
    2573            0 :                     // If the map is shutting down, we need not replace anything
    2574            0 :                 }
    2575            0 :                 Ok(()) => {}
    2576              :             }
    2577            0 :         }
    2578            0 :     }
    2579              : 
    2580              :     /// We may never drop our old value until it is cleanly shut down: otherwise we might leave
    2581              :     /// rogue background tasks that would write to the local tenant directory that this guard
    2582              :     /// is responsible for protecting
    2583           12 :     fn old_value_is_shutdown(&self) -> bool {
    2584           12 :         match self.old_value.as_ref() {
    2585           12 :             Some(TenantSlot::Attached(tenant)) => tenant.gate.close_complete(),
    2586            0 :             Some(TenantSlot::Secondary(secondary_tenant)) => secondary_tenant.gate.close_complete(),
    2587              :             Some(TenantSlot::InProgress(_)) => {
    2588              :                 // A SlotGuard cannot be constructed for a slot that was already InProgress
    2589            0 :                 unreachable!()
    2590              :             }
    2591            0 :             None => true,
    2592              :         }
    2593           12 :     }
    2594              : 
    2595              :     /// The guard holder is done with the old value of the slot: they are obliged to already
    2596              :     /// shut it down before we reach this point.
    2597           12 :     fn drop_old_value(&mut self) -> Result<(), TenantSlotDropError> {
    2598           12 :         if !self.old_value_is_shutdown() {
    2599            0 :             Err(TenantSlotDropError::NotShutdown)
    2600              :         } else {
    2601           12 :             self.old_value.take();
    2602           12 :             Ok(())
    2603              :         }
    2604           12 :     }
    2605              : }
    2606              : 
    2607              : impl Drop for SlotGuard {
    2608           12 :     fn drop(&mut self) {
    2609           12 :         if self.upserted {
    2610            0 :             return;
    2611           12 :         }
    2612           12 :         // Our old value is already shutdown, or it never existed: it is safe
    2613           12 :         // for us to fully release the TenantSlot back into an empty state
    2614           12 : 
    2615           12 :         let mut locked = TENANTS.write().unwrap();
    2616              : 
    2617           12 :         let m = match &mut *locked {
    2618              :             TenantsMap::Initializing => {
    2619              :                 // There is no map, this should never happen.
    2620           12 :                 return;
    2621              :             }
    2622              :             TenantsMap::ShuttingDown(_) => {
    2623              :                 // When we transition to shutdown, InProgress elements are removed
    2624              :                 // from the map, so we do not need to clean up our Inprogress marker.
    2625              :                 // See [`shutdown_all_tenants0`]
    2626            0 :                 return;
    2627              :             }
    2628            0 :             TenantsMap::Open(m) => m,
    2629              :         };
    2630              : 
    2631              :         use std::collections::btree_map::Entry;
    2632            0 :         match m.entry(self.tenant_shard_id) {
    2633            0 :             Entry::Occupied(mut entry) => {
    2634            0 :                 if !matches!(entry.get(), TenantSlot::InProgress(_)) {
    2635            0 :                     METRICS.unexpected_errors.inc();
    2636            0 :                     error!(tenant_shard_id=%self.tenant_shard_id, "Unexpected contents of TenantSlot during drop, this is a bug.  Contents: {:?}", entry.get());
    2637            0 :                 }
    2638              : 
    2639            0 :                 if self.old_value_is_shutdown() {
    2640            0 :                     METRICS.slot_removed(entry.get());
    2641            0 :                     entry.remove();
    2642            0 :                 } else {
    2643            0 :                     let inserting = self.old_value.take().unwrap();
    2644            0 :                     METRICS.slot_inserted(&inserting);
    2645            0 :                     let replaced = entry.insert(inserting);
    2646            0 :                     METRICS.slot_removed(&replaced);
    2647            0 :                 }
    2648              :             }
    2649              :             Entry::Vacant(_) => {
    2650            0 :                 METRICS.unexpected_errors.inc();
    2651            0 :                 error!(
    2652              :                     tenant_shard_id = %self.tenant_shard_id,
    2653            0 :                     "Missing InProgress marker during SlotGuard drop, this is a bug."
    2654              :                 );
    2655              :             }
    2656              :         }
    2657           12 :     }
    2658              : }
    2659              : 
    2660              : enum TenantSlotPeekMode {
    2661              :     /// In Read mode, peek will be permitted to see the slots even if the pageserver is shutting down
    2662              :     Read,
    2663              :     /// In Write mode, trying to peek at a slot while the pageserver is shutting down is an error
    2664              :     Write,
    2665              : }
    2666              : 
    2667            0 : fn tenant_map_peek_slot<'a>(
    2668            0 :     tenants: &'a std::sync::RwLockReadGuard<'a, TenantsMap>,
    2669            0 :     tenant_shard_id: &TenantShardId,
    2670            0 :     mode: TenantSlotPeekMode,
    2671            0 : ) -> Result<Option<&'a TenantSlot>, TenantMapError> {
    2672            0 :     match tenants.deref() {
    2673            0 :         TenantsMap::Initializing => Err(TenantMapError::StillInitializing),
    2674            0 :         TenantsMap::ShuttingDown(m) => match mode {
    2675              :             TenantSlotPeekMode::Read => Ok(Some(
    2676              :                 // When reading in ShuttingDown state, we must translate None results
    2677              :                 // into a ShuttingDown error, because absence of a tenant shard ID in the map
    2678              :                 // isn't a reliable indicator of the tenant being gone: it might have been
    2679              :                 // InProgress when shutdown started, and cleaned up from that state such
    2680              :                 // that it's now no longer in the map.  Callers will have to wait until
    2681              :                 // we next start up to get a proper answer.  This avoids incorrect 404 API responses.
    2682            0 :                 m.get(tenant_shard_id).ok_or(TenantMapError::ShuttingDown)?,
    2683              :             )),
    2684            0 :             TenantSlotPeekMode::Write => Err(TenantMapError::ShuttingDown),
    2685              :         },
    2686            0 :         TenantsMap::Open(m) => Ok(m.get(tenant_shard_id)),
    2687              :     }
    2688            0 : }
    2689              : 
    2690              : enum TenantSlotAcquireMode {
    2691              :     /// Acquire the slot irrespective of current state, or whether it already exists
    2692              :     Any,
    2693              :     /// Return an error if trying to acquire a slot and it doesn't already exist
    2694              :     MustExist,
    2695              : }
    2696              : 
    2697            0 : fn tenant_map_acquire_slot(
    2698            0 :     tenant_shard_id: &TenantShardId,
    2699            0 :     mode: TenantSlotAcquireMode,
    2700            0 : ) -> Result<SlotGuard, TenantSlotError> {
    2701            0 :     tenant_map_acquire_slot_impl(tenant_shard_id, &TENANTS, mode)
    2702            0 : }
    2703              : 
    2704           12 : fn tenant_map_acquire_slot_impl(
    2705           12 :     tenant_shard_id: &TenantShardId,
    2706           12 :     tenants: &std::sync::RwLock<TenantsMap>,
    2707           12 :     mode: TenantSlotAcquireMode,
    2708           12 : ) -> Result<SlotGuard, TenantSlotError> {
    2709              :     use TenantSlotAcquireMode::*;
    2710           12 :     METRICS.tenant_slot_writes.inc();
    2711           12 : 
    2712           12 :     let mut locked = tenants.write().unwrap();
    2713           12 :     let span = tracing::info_span!("acquire_slot", tenant_id=%tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug());
    2714           12 :     let _guard = span.enter();
    2715              : 
    2716           12 :     let m = match &mut *locked {
    2717            0 :         TenantsMap::Initializing => return Err(TenantMapError::StillInitializing.into()),
    2718            0 :         TenantsMap::ShuttingDown(_) => return Err(TenantMapError::ShuttingDown.into()),
    2719           12 :         TenantsMap::Open(m) => m,
    2720              :     };
    2721              : 
    2722              :     use std::collections::btree_map::Entry;
    2723              : 
    2724           12 :     let entry = m.entry(*tenant_shard_id);
    2725           12 : 
    2726           12 :     match entry {
    2727            0 :         Entry::Vacant(v) => match mode {
    2728              :             MustExist => {
    2729            0 :                 tracing::debug!("Vacant && MustExist: return NotFound");
    2730            0 :                 Err(TenantSlotError::NotFound(*tenant_shard_id))
    2731              :             }
    2732              :             _ => {
    2733            0 :                 let (completion, barrier) = utils::completion::channel();
    2734            0 :                 let inserting = TenantSlot::InProgress(barrier);
    2735            0 :                 METRICS.slot_inserted(&inserting);
    2736            0 :                 v.insert(inserting);
    2737            0 :                 tracing::debug!("Vacant, inserted InProgress");
    2738            0 :                 Ok(SlotGuard::new(*tenant_shard_id, None, completion))
    2739              :             }
    2740              :         },
    2741           12 :         Entry::Occupied(mut o) => {
    2742           12 :             // Apply mode-driven checks
    2743           12 :             match (o.get(), mode) {
    2744              :                 (TenantSlot::InProgress(_), _) => {
    2745            0 :                     tracing::debug!("Occupied, failing for InProgress");
    2746            0 :                     Err(TenantSlotError::InProgress)
    2747              :                 }
    2748              :                 _ => {
    2749              :                     // Happy case: the slot was not in any state that violated our mode
    2750           12 :                     let (completion, barrier) = utils::completion::channel();
    2751           12 :                     let in_progress = TenantSlot::InProgress(barrier);
    2752           12 :                     METRICS.slot_inserted(&in_progress);
    2753           12 :                     let old_value = o.insert(in_progress);
    2754           12 :                     METRICS.slot_removed(&old_value);
    2755           12 :                     tracing::debug!("Occupied, replaced with InProgress");
    2756           12 :                     Ok(SlotGuard::new(
    2757           12 :                         *tenant_shard_id,
    2758           12 :                         Some(old_value),
    2759           12 :                         completion,
    2760           12 :                     ))
    2761              :                 }
    2762              :             }
    2763              :         }
    2764              :     }
    2765           12 : }
    2766              : 
    2767              : /// Stops and removes the tenant from memory, if it's not [`TenantState::Stopping`] already, bails otherwise.
    2768              : /// Allows to remove other tenant resources manually, via `tenant_cleanup`.
    2769              : /// If the cleanup fails, tenant will stay in memory in [`TenantState::Broken`] state, and another removal
    2770              : /// operation would be needed to remove it.
    2771           12 : async fn remove_tenant_from_memory<V, F>(
    2772           12 :     tenants: &std::sync::RwLock<TenantsMap>,
    2773           12 :     tenant_shard_id: TenantShardId,
    2774           12 :     tenant_cleanup: F,
    2775           12 : ) -> Result<V, TenantStateError>
    2776           12 : where
    2777           12 :     F: std::future::Future<Output = anyhow::Result<V>>,
    2778           12 : {
    2779           12 :     let mut slot_guard =
    2780           12 :         tenant_map_acquire_slot_impl(&tenant_shard_id, tenants, TenantSlotAcquireMode::MustExist)?;
    2781              : 
    2782              :     // allow pageserver shutdown to await for our completion
    2783           12 :     let (_guard, progress) = completion::channel();
    2784              : 
    2785              :     // The SlotGuard allows us to manipulate the Tenant object without fear of some
    2786              :     // concurrent API request doing something else for the same tenant ID.
    2787           12 :     let attached_tenant = match slot_guard.get_old_value() {
    2788           12 :         Some(TenantSlot::Attached(tenant)) => {
    2789           12 :             // whenever we remove a tenant from memory, we don't want to flush and wait for upload
    2790           12 :             let shutdown_mode = ShutdownMode::Hard;
    2791           12 : 
    2792           12 :             // shutdown is sure to transition tenant to stopping, and wait for all tasks to complete, so
    2793           12 :             // that we can continue safely to cleanup.
    2794           12 :             match tenant.shutdown(progress, shutdown_mode).await {
    2795           12 :                 Ok(()) => {}
    2796            0 :                 Err(_other) => {
    2797            0 :                     // if pageserver shutdown or other detach/ignore is already ongoing, we don't want to
    2798            0 :                     // wait for it but return an error right away because these are distinct requests.
    2799            0 :                     slot_guard.revert();
    2800            0 :                     return Err(TenantStateError::IsStopping(tenant_shard_id));
    2801              :                 }
    2802              :             }
    2803           12 :             Some(tenant)
    2804              :         }
    2805            0 :         Some(TenantSlot::Secondary(secondary_state)) => {
    2806            0 :             tracing::info!("Shutting down in secondary mode");
    2807            0 :             secondary_state.shutdown().await;
    2808            0 :             None
    2809              :         }
    2810              :         Some(TenantSlot::InProgress(_)) => {
    2811              :             // Acquiring a slot guarantees its old value was not InProgress
    2812            0 :             unreachable!();
    2813              :         }
    2814            0 :         None => None,
    2815              :     };
    2816              : 
    2817           12 :     match tenant_cleanup
    2818           12 :         .await
    2819           12 :         .with_context(|| format!("Failed to run cleanup for tenant {tenant_shard_id}"))
    2820              :     {
    2821           12 :         Ok(hook_value) => {
    2822           12 :             // Success: drop the old TenantSlot::Attached.
    2823           12 :             slot_guard
    2824           12 :                 .drop_old_value()
    2825           12 :                 .expect("We just called shutdown");
    2826           12 : 
    2827           12 :             Ok(hook_value)
    2828              :         }
    2829            0 :         Err(e) => {
    2830              :             // If we had a Tenant, set it to Broken and put it back in the TenantsMap
    2831            0 :             if let Some(attached_tenant) = attached_tenant {
    2832            0 :                 attached_tenant.set_broken(e.to_string()).await;
    2833            0 :             }
    2834              :             // Leave the broken tenant in the map
    2835            0 :             slot_guard.revert();
    2836            0 : 
    2837            0 :             Err(TenantStateError::Other(e))
    2838              :         }
    2839              :     }
    2840           12 : }
    2841              : 
    2842              : use http_utils::error::ApiError;
    2843              : use pageserver_api::models::TimelineGcRequest;
    2844              : 
    2845              : use crate::tenant::gc_result::GcResult;
    2846              : 
    2847              : #[cfg(test)]
    2848              : mod tests {
    2849              :     use std::collections::BTreeMap;
    2850              :     use std::sync::Arc;
    2851              : 
    2852              :     use tracing::Instrument;
    2853              : 
    2854              :     use super::super::harness::TenantHarness;
    2855              :     use super::TenantsMap;
    2856              :     use crate::tenant::mgr::TenantSlot;
    2857              : 
    2858              :     #[tokio::test(start_paused = true)]
    2859           12 :     async fn shutdown_awaits_in_progress_tenant() {
    2860           12 :         // Test that if an InProgress tenant is in the map during shutdown, the shutdown will gracefully
    2861           12 :         // wait for it to complete before proceeding.
    2862           12 : 
    2863           12 :         let h = TenantHarness::create("shutdown_awaits_in_progress_tenant")
    2864           12 :             .await
    2865           12 :             .unwrap();
    2866           12 :         let (t, _ctx) = h.load().await;
    2867           12 : 
    2868           12 :         // harness loads it to active, which is forced and nothing is running on the tenant
    2869           12 : 
    2870           12 :         let id = t.tenant_shard_id();
    2871           12 : 
    2872           12 :         // tenant harness configures the logging and we cannot escape it
    2873           12 :         let span = h.span();
    2874           12 :         let _e = span.enter();
    2875           12 : 
    2876           12 :         let tenants = BTreeMap::from([(id, TenantSlot::Attached(t.clone()))]);
    2877           12 :         let tenants = Arc::new(std::sync::RwLock::new(TenantsMap::Open(tenants)));
    2878           12 : 
    2879           12 :         // Invoke remove_tenant_from_memory with a cleanup hook that blocks until we manually
    2880           12 :         // permit it to proceed: that will stick the tenant in InProgress
    2881           12 : 
    2882           12 :         let (until_cleanup_completed, can_complete_cleanup) = utils::completion::channel();
    2883           12 :         let (until_cleanup_started, cleanup_started) = utils::completion::channel();
    2884           12 :         let mut remove_tenant_from_memory_task = {
    2885           12 :             let jh = tokio::spawn({
    2886           12 :                 let tenants = tenants.clone();
    2887           12 :                 async move {
    2888           12 :                     let cleanup = async move {
    2889           12 :                         drop(until_cleanup_started);
    2890           12 :                         can_complete_cleanup.wait().await;
    2891           12 :                         anyhow::Ok(())
    2892           12 :                     };
    2893           12 :                     super::remove_tenant_from_memory(&tenants, id, cleanup).await
    2894           12 :                 }
    2895           12 :                 .instrument(h.span())
    2896           12 :             });
    2897           12 : 
    2898           12 :             // now the long cleanup should be in place, with the stopping state
    2899           12 :             cleanup_started.wait().await;
    2900           12 :             jh
    2901           12 :         };
    2902           12 : 
    2903           12 :         let mut shutdown_task = {
    2904           12 :             let (until_shutdown_started, shutdown_started) = utils::completion::channel();
    2905           12 : 
    2906           12 :             let shutdown_task = tokio::spawn(async move {
    2907           12 :                 drop(until_shutdown_started);
    2908           12 :                 super::shutdown_all_tenants0(&tenants).await;
    2909           12 :             });
    2910           12 : 
    2911           12 :             shutdown_started.wait().await;
    2912           12 :             shutdown_task
    2913           12 :         };
    2914           12 : 
    2915           12 :         let long_time = std::time::Duration::from_secs(15);
    2916           12 :         tokio::select! {
    2917           12 :             _ = &mut shutdown_task => unreachable!("shutdown should block on remove_tenant_from_memory completing"),
    2918           12 :             _ = &mut remove_tenant_from_memory_task => unreachable!("remove_tenant_from_memory_task should not complete until explicitly unblocked"),
    2919           12 :             _ = tokio::time::sleep(long_time) => {},
    2920           12 :         }
    2921           12 : 
    2922           12 :         drop(until_cleanup_completed);
    2923           12 : 
    2924           12 :         // Now that we allow it to proceed, shutdown should complete immediately
    2925           12 :         remove_tenant_from_memory_task.await.unwrap().unwrap();
    2926           12 :         shutdown_task.await.unwrap();
    2927           12 :     }
    2928              : }
        

Generated by: LCOV version 2.1-beta