LCOV - code coverage report
Current view: top level - pageserver/src/tenant - mgr.rs (source / functions) Coverage Total Hit
Test: 36bb8dd7c7efcb53483d1a7d9f7cb33e8406dcf0.info Lines: 14.6 % 1408 206
Test Date: 2024-04-08 10:22:05 Functions: 8.5 % 247 21

            Line data    Source code
       1              : //! This module acts as a switchboard to access different repositories managed by this
       2              : //! page server.
       3              : 
       4              : use camino::{Utf8DirEntry, Utf8Path, Utf8PathBuf};
       5              : use itertools::Itertools;
       6              : use pageserver_api::key::Key;
       7              : use pageserver_api::models::LocationConfigMode;
       8              : use pageserver_api::shard::{
       9              :     ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
      10              : };
      11              : use pageserver_api::upcall_api::ReAttachResponseTenant;
      12              : use rand::{distributions::Alphanumeric, Rng};
      13              : use std::borrow::Cow;
      14              : use std::cmp::Ordering;
      15              : use std::collections::{BTreeMap, HashMap};
      16              : use std::ops::Deref;
      17              : use std::sync::Arc;
      18              : use std::time::{Duration, Instant};
      19              : use sysinfo::SystemExt;
      20              : use tokio::fs;
      21              : use utils::timeout::{timeout_cancellable, TimeoutCancellableError};
      22              : 
      23              : use anyhow::Context;
      24              : use once_cell::sync::Lazy;
      25              : use tokio::task::JoinSet;
      26              : use tokio_util::sync::CancellationToken;
      27              : use tracing::*;
      28              : 
      29              : use remote_storage::GenericRemoteStorage;
      30              : use utils::{completion, crashsafe};
      31              : 
      32              : use crate::config::PageServerConf;
      33              : use crate::context::{DownloadBehavior, RequestContext};
      34              : use crate::control_plane_client::{
      35              :     ControlPlaneClient, ControlPlaneGenerationsApi, RetryForeverError,
      36              : };
      37              : use crate::deletion_queue::DeletionQueueClient;
      38              : use crate::http::routes::ACTIVE_TENANT_TIMEOUT;
      39              : use crate::metrics::{TENANT, TENANT_MANAGER as METRICS};
      40              : use crate::task_mgr::{self, TaskKind};
      41              : use crate::tenant::config::{
      42              :     AttachedLocationConfig, AttachmentMode, LocationConf, LocationMode, SecondaryLocationConfig,
      43              : };
      44              : use crate::tenant::delete::DeleteTenantFlow;
      45              : use crate::tenant::span::debug_assert_current_span_has_tenant_id;
      46              : use crate::tenant::storage_layer::inmemory_layer;
      47              : use crate::tenant::timeline::ShutdownMode;
      48              : use crate::tenant::{AttachedTenantConf, SpawnMode, Tenant, TenantState};
      49              : use crate::{InitializationOrder, IGNORED_TENANT_FILE_NAME, METADATA_FILE_NAME, TEMP_FILE_SUFFIX};
      50              : 
      51              : use utils::crashsafe::path_with_suffix_extension;
      52              : use utils::fs_ext::PathExt;
      53              : use utils::generation::Generation;
      54              : use utils::id::{TenantId, TimelineId};
      55              : 
      56              : use super::delete::DeleteTenantError;
      57              : use super::secondary::SecondaryTenant;
      58              : use super::TenantSharedResources;
      59              : 
      60              : /// For a tenant that appears in TenantsMap, it may either be
      61              : /// - `Attached`: has a full Tenant object, is elegible to service
      62              : ///    reads and ingest WAL.
      63              : /// - `Secondary`: is only keeping a local cache warm.
      64              : ///
      65              : /// Secondary is a totally distinct state rather than being a mode of a `Tenant`, because
      66              : /// that way we avoid having to carefully switch a tenant's ingestion etc on and off during
      67              : /// its lifetime, and we can preserve some important safety invariants like `Tenant` always
      68              : /// having a properly acquired generation (Secondary doesn't need a generation)
      69              : #[derive(Clone)]
      70              : pub(crate) enum TenantSlot {
      71              :     Attached(Arc<Tenant>),
      72              :     Secondary(Arc<SecondaryTenant>),
      73              :     /// In this state, other administrative operations acting on the TenantId should
      74              :     /// block, or return a retry indicator equivalent to HTTP 503.
      75              :     InProgress(utils::completion::Barrier),
      76              : }
      77              : 
      78              : impl std::fmt::Debug for TenantSlot {
      79            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      80            0 :         match self {
      81            0 :             Self::Attached(tenant) => write!(f, "Attached({})", tenant.current_state()),
      82            0 :             Self::Secondary(_) => write!(f, "Secondary"),
      83            0 :             Self::InProgress(_) => write!(f, "InProgress"),
      84              :         }
      85            0 :     }
      86              : }
      87              : 
      88              : impl TenantSlot {
      89              :     /// Return the `Tenant` in this slot if attached, else None
      90            0 :     fn get_attached(&self) -> Option<&Arc<Tenant>> {
      91            0 :         match self {
      92            0 :             Self::Attached(t) => Some(t),
      93            0 :             Self::Secondary(_) => None,
      94            0 :             Self::InProgress(_) => None,
      95              :         }
      96            0 :     }
      97              : }
      98              : 
      99              : /// The tenants known to the pageserver.
     100              : /// The enum variants are used to distinguish the different states that the pageserver can be in.
     101              : pub(crate) enum TenantsMap {
     102              :     /// [`init_tenant_mgr`] is not done yet.
     103              :     Initializing,
     104              :     /// [`init_tenant_mgr`] is done, all on-disk tenants have been loaded.
     105              :     /// New tenants can be added using [`tenant_map_acquire_slot`].
     106              :     Open(BTreeMap<TenantShardId, TenantSlot>),
     107              :     /// The pageserver has entered shutdown mode via [`TenantManager::shutdown`].
     108              :     /// Existing tenants are still accessible, but no new tenants can be created.
     109              :     ShuttingDown(BTreeMap<TenantShardId, TenantSlot>),
     110              : }
     111              : 
     112              : pub(crate) enum TenantsMapRemoveResult {
     113              :     Occupied(TenantSlot),
     114              :     Vacant,
     115              :     InProgress(utils::completion::Barrier),
     116              : }
     117              : 
     118              : /// When resolving a TenantId to a shard, we may be looking for the 0th
     119              : /// shard, or we might be looking for whichever shard holds a particular page.
     120              : pub(crate) enum ShardSelector {
     121              :     /// Only return the 0th shard, if it is present.  If a non-0th shard is present,
     122              :     /// ignore it.
     123              :     Zero,
     124              :     /// Pick the first shard we find for the TenantId
     125              :     First,
     126              :     /// Pick the shard that holds this key
     127              :     Page(Key),
     128              : }
     129              : 
     130              : /// A convenience for use with the re_attach ControlPlaneClient function: rather
     131              : /// than the serializable struct, we build this enum that encapsulates
     132              : /// the invariant that attached tenants always have generations.
     133              : ///
     134              : /// This represents the subset of a LocationConfig that we receive during re-attach.
     135              : pub(crate) enum TenantStartupMode {
     136              :     Attached((AttachmentMode, Generation)),
     137              :     Secondary,
     138              : }
     139              : 
     140              : impl TenantStartupMode {
     141              :     /// Return the generation & mode that should be used when starting
     142              :     /// this tenant.
     143              :     ///
     144              :     /// If this returns None, the re-attach struct is in an invalid state and
     145              :     /// should be ignored in the response.
     146            0 :     fn from_reattach_tenant(rart: ReAttachResponseTenant) -> Option<Self> {
     147            0 :         match (rart.mode, rart.gen) {
     148            0 :             (LocationConfigMode::Detached, _) => None,
     149            0 :             (LocationConfigMode::Secondary, _) => Some(Self::Secondary),
     150            0 :             (LocationConfigMode::AttachedMulti, Some(g)) => {
     151            0 :                 Some(Self::Attached((AttachmentMode::Multi, Generation::new(g))))
     152              :             }
     153            0 :             (LocationConfigMode::AttachedSingle, Some(g)) => {
     154            0 :                 Some(Self::Attached((AttachmentMode::Single, Generation::new(g))))
     155              :             }
     156            0 :             (LocationConfigMode::AttachedStale, Some(g)) => {
     157            0 :                 Some(Self::Attached((AttachmentMode::Stale, Generation::new(g))))
     158              :             }
     159              :             _ => {
     160            0 :                 tracing::warn!(
     161            0 :                     "Received invalid re-attach state for tenant {}: {rart:?}",
     162            0 :                     rart.id
     163            0 :                 );
     164            0 :                 None
     165              :             }
     166              :         }
     167            0 :     }
     168              : }
     169              : 
     170              : impl TenantsMap {
     171              :     /// Convenience function for typical usage, where we want to get a `Tenant` object, for
     172              :     /// working with attached tenants.  If the TenantId is in the map but in Secondary state,
     173              :     /// None is returned.
     174            0 :     pub(crate) fn get(&self, tenant_shard_id: &TenantShardId) -> Option<&Arc<Tenant>> {
     175            0 :         match self {
     176            0 :             TenantsMap::Initializing => None,
     177            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => {
     178            0 :                 m.get(tenant_shard_id).and_then(|slot| slot.get_attached())
     179              :             }
     180              :         }
     181            0 :     }
     182              : 
     183              :     /// A page service client sends a TenantId, and to look up the correct Tenant we must
     184              :     /// resolve this to a fully qualified TenantShardId.
     185            0 :     fn resolve_attached_shard(
     186            0 :         &self,
     187            0 :         tenant_id: &TenantId,
     188            0 :         selector: ShardSelector,
     189            0 :     ) -> Option<TenantShardId> {
     190            0 :         let mut want_shard = None;
     191            0 :         match self {
     192            0 :             TenantsMap::Initializing => None,
     193            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => {
     194            0 :                 for slot in m.range(TenantShardId::tenant_range(*tenant_id)) {
     195              :                     // Ignore all slots that don't contain an attached tenant
     196            0 :                     let tenant = match &slot.1 {
     197            0 :                         TenantSlot::Attached(t) => t,
     198            0 :                         _ => continue,
     199              :                     };
     200              : 
     201            0 :                     match selector {
     202            0 :                         ShardSelector::First => return Some(*slot.0),
     203            0 :                         ShardSelector::Zero if slot.0.shard_number == ShardNumber(0) => {
     204            0 :                             return Some(*slot.0)
     205              :                         }
     206            0 :                         ShardSelector::Page(key) => {
     207            0 :                             // First slot we see for this tenant, calculate the expected shard number
     208            0 :                             // for the key: we will use this for checking if this and subsequent
     209            0 :                             // slots contain the key, rather than recalculating the hash each time.
     210            0 :                             if want_shard.is_none() {
     211            0 :                                 want_shard = Some(tenant.shard_identity.get_shard_number(&key));
     212            0 :                             }
     213              : 
     214            0 :                             if Some(tenant.shard_identity.number) == want_shard {
     215            0 :                                 return Some(*slot.0);
     216            0 :                             }
     217              :                         }
     218            0 :                         _ => continue,
     219              :                     }
     220              :                 }
     221              : 
     222              :                 // Fall through: we didn't find an acceptable shard
     223            0 :                 None
     224              :             }
     225              :         }
     226            0 :     }
     227              : 
     228              :     /// Only for use from DeleteTenantFlow.  This method directly removes a TenantSlot from the map.
     229              :     ///
     230              :     /// The normal way to remove a tenant is using a SlotGuard, which will gracefully remove the guarded
     231              :     /// slot if the enclosed tenant is shutdown.
     232            0 :     pub(crate) fn remove(&mut self, tenant_shard_id: TenantShardId) -> TenantsMapRemoveResult {
     233            0 :         use std::collections::btree_map::Entry;
     234            0 :         match self {
     235            0 :             TenantsMap::Initializing => TenantsMapRemoveResult::Vacant,
     236            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => match m.entry(tenant_shard_id) {
     237            0 :                 Entry::Occupied(entry) => match entry.get() {
     238            0 :                     TenantSlot::InProgress(barrier) => {
     239            0 :                         TenantsMapRemoveResult::InProgress(barrier.clone())
     240              :                     }
     241            0 :                     _ => TenantsMapRemoveResult::Occupied(entry.remove()),
     242              :                 },
     243            0 :                 Entry::Vacant(_entry) => TenantsMapRemoveResult::Vacant,
     244              :             },
     245              :         }
     246            0 :     }
     247              : 
     248            0 :     pub(crate) fn len(&self) -> usize {
     249            0 :         match self {
     250            0 :             TenantsMap::Initializing => 0,
     251            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => m.len(),
     252              :         }
     253            0 :     }
     254              : }
     255              : 
     256              : /// This is "safe" in that that it won't leave behind a partially deleted directory
     257              : /// at the original path, because we rename with TEMP_FILE_SUFFIX before starting deleting
     258              : /// the contents.
     259              : ///
     260              : /// This is pageserver-specific, as it relies on future processes after a crash to check
     261              : /// for TEMP_FILE_SUFFIX when loading things.
     262            0 : async fn safe_remove_tenant_dir_all(path: impl AsRef<Utf8Path>) -> std::io::Result<()> {
     263            0 :     let tmp_path = safe_rename_tenant_dir(path).await?;
     264            0 :     fs::remove_dir_all(tmp_path).await
     265            0 : }
     266              : 
     267            0 : async fn safe_rename_tenant_dir(path: impl AsRef<Utf8Path>) -> std::io::Result<Utf8PathBuf> {
     268            0 :     let parent = path
     269            0 :         .as_ref()
     270            0 :         .parent()
     271            0 :         // It is invalid to call this function with a relative path.  Tenant directories
     272            0 :         // should always have a parent.
     273            0 :         .ok_or(std::io::Error::new(
     274            0 :             std::io::ErrorKind::InvalidInput,
     275            0 :             "Path must be absolute",
     276            0 :         ))?;
     277            0 :     let rand_suffix = rand::thread_rng()
     278            0 :         .sample_iter(&Alphanumeric)
     279            0 :         .take(8)
     280            0 :         .map(char::from)
     281            0 :         .collect::<String>()
     282            0 :         + TEMP_FILE_SUFFIX;
     283            0 :     let tmp_path = path_with_suffix_extension(&path, &rand_suffix);
     284            0 :     fs::rename(path.as_ref(), &tmp_path).await?;
     285            0 :     fs::File::open(parent).await?.sync_all().await?;
     286            0 :     Ok(tmp_path)
     287            0 : }
     288              : 
     289              : static TENANTS: Lazy<std::sync::RwLock<TenantsMap>> =
     290            2 :     Lazy::new(|| std::sync::RwLock::new(TenantsMap::Initializing));
     291              : 
     292              : /// The TenantManager is responsible for storing and mutating the collection of all tenants
     293              : /// that this pageserver process has state for.  Every Tenant and SecondaryTenant instance
     294              : /// lives inside the TenantManager.
     295              : ///
     296              : /// The most important role of the TenantManager is to prevent conflicts: e.g. trying to attach
     297              : /// the same tenant twice concurrently, or trying to configure the same tenant into secondary
     298              : /// and attached modes concurrently.
     299              : pub struct TenantManager {
     300              :     conf: &'static PageServerConf,
     301              :     // TODO: currently this is a &'static pointing to TENANTs.  When we finish refactoring
     302              :     // out of that static variable, the TenantManager can own this.
     303              :     // See https://github.com/neondatabase/neon/issues/5796
     304              :     tenants: &'static std::sync::RwLock<TenantsMap>,
     305              :     resources: TenantSharedResources,
     306              : 
     307              :     // Long-running operations that happen outside of a [`Tenant`] lifetime should respect this token.
     308              :     // This is for edge cases like tenant deletion.  In normal cases (within a Tenant lifetime),
     309              :     // tenants have their own cancellation tokens, which we fire individually in [`Self::shutdown`], or
     310              :     // when the tenant detaches.
     311              :     cancel: CancellationToken,
     312              : }
     313              : 
     314            0 : fn emergency_generations(
     315            0 :     tenant_confs: &HashMap<TenantShardId, anyhow::Result<LocationConf>>,
     316            0 : ) -> HashMap<TenantShardId, TenantStartupMode> {
     317            0 :     tenant_confs
     318            0 :         .iter()
     319            0 :         .filter_map(|(tid, lc)| {
     320            0 :             let lc = match lc {
     321            0 :                 Ok(lc) => lc,
     322            0 :                 Err(_) => return None,
     323              :             };
     324              :             Some((
     325            0 :                 *tid,
     326            0 :                 match &lc.mode {
     327            0 :                     LocationMode::Attached(alc) => {
     328            0 :                         TenantStartupMode::Attached((alc.attach_mode, alc.generation))
     329              :                     }
     330            0 :                     LocationMode::Secondary(_) => TenantStartupMode::Secondary,
     331              :                 },
     332              :             ))
     333            0 :         })
     334            0 :         .collect()
     335            0 : }
     336              : 
     337            0 : async fn init_load_generations(
     338            0 :     conf: &'static PageServerConf,
     339            0 :     tenant_confs: &HashMap<TenantShardId, anyhow::Result<LocationConf>>,
     340            0 :     resources: &TenantSharedResources,
     341            0 :     cancel: &CancellationToken,
     342            0 : ) -> anyhow::Result<Option<HashMap<TenantShardId, TenantStartupMode>>> {
     343            0 :     let generations = if conf.control_plane_emergency_mode {
     344            0 :         error!(
     345            0 :             "Emergency mode!  Tenants will be attached unsafely using their last known generation"
     346            0 :         );
     347            0 :         emergency_generations(tenant_confs)
     348            0 :     } else if let Some(client) = ControlPlaneClient::new(conf, cancel) {
     349            0 :         info!("Calling control plane API to re-attach tenants");
     350              :         // If we are configured to use the control plane API, then it is the source of truth for what tenants to load.
     351            0 :         match client.re_attach(conf).await {
     352            0 :             Ok(tenants) => tenants
     353            0 :                 .into_iter()
     354            0 :                 .flat_map(|(id, rart)| {
     355            0 :                     TenantStartupMode::from_reattach_tenant(rart).map(|tsm| (id, tsm))
     356            0 :                 })
     357            0 :                 .collect(),
     358              :             Err(RetryForeverError::ShuttingDown) => {
     359            0 :                 anyhow::bail!("Shut down while waiting for control plane re-attach response")
     360              :             }
     361              :         }
     362              :     } else {
     363            0 :         info!("Control plane API not configured, tenant generations are disabled");
     364            0 :         return Ok(None);
     365              :     };
     366              : 
     367              :     // The deletion queue needs to know about the startup attachment state to decide which (if any) stored
     368              :     // deletion list entries may still be valid.  We provide that by pushing a recovery operation into
     369              :     // the queue. Sequential processing of te queue ensures that recovery is done before any new tenant deletions
     370              :     // are processed, even though we don't block on recovery completing here.
     371              :     //
     372              :     // Must only do this if remote storage is enabled, otherwise deletion queue
     373              :     // is not running and channel push will fail.
     374            0 :     if resources.remote_storage.is_some() {
     375            0 :         let attached_tenants = generations
     376            0 :             .iter()
     377            0 :             .flat_map(|(id, start_mode)| {
     378            0 :                 match start_mode {
     379            0 :                     TenantStartupMode::Attached((_mode, generation)) => Some(generation),
     380            0 :                     TenantStartupMode::Secondary => None,
     381              :                 }
     382            0 :                 .map(|gen| (*id, *gen))
     383            0 :             })
     384            0 :             .collect();
     385            0 :         resources.deletion_queue_client.recover(attached_tenants)?;
     386            0 :     }
     387              : 
     388            0 :     Ok(Some(generations))
     389            0 : }
     390              : 
     391              : /// Given a directory discovered in the pageserver's tenants/ directory, attempt
     392              : /// to load a tenant config from it.
     393              : ///
     394              : /// If file is missing, return Ok(None)
     395            0 : fn load_tenant_config(
     396            0 :     conf: &'static PageServerConf,
     397            0 :     dentry: Utf8DirEntry,
     398            0 : ) -> anyhow::Result<Option<(TenantShardId, anyhow::Result<LocationConf>)>> {
     399            0 :     let tenant_dir_path = dentry.path().to_path_buf();
     400            0 :     if crate::is_temporary(&tenant_dir_path) {
     401            0 :         info!("Found temporary tenant directory, removing: {tenant_dir_path}");
     402              :         // No need to use safe_remove_tenant_dir_all because this is already
     403              :         // a temporary path
     404            0 :         if let Err(e) = std::fs::remove_dir_all(&tenant_dir_path) {
     405            0 :             error!(
     406            0 :                 "Failed to remove temporary directory '{}': {:?}",
     407            0 :                 tenant_dir_path, e
     408            0 :             );
     409            0 :         }
     410            0 :         return Ok(None);
     411            0 :     }
     412              : 
     413              :     // This case happens if we crash during attachment before writing a config into the dir
     414            0 :     let is_empty = tenant_dir_path
     415            0 :         .is_empty_dir()
     416            0 :         .with_context(|| format!("Failed to check whether {tenant_dir_path:?} is an empty dir"))?;
     417            0 :     if is_empty {
     418            0 :         info!("removing empty tenant directory {tenant_dir_path:?}");
     419            0 :         if let Err(e) = std::fs::remove_dir(&tenant_dir_path) {
     420            0 :             error!(
     421            0 :                 "Failed to remove empty tenant directory '{}': {e:#}",
     422            0 :                 tenant_dir_path
     423            0 :             )
     424            0 :         }
     425            0 :         return Ok(None);
     426            0 :     }
     427              : 
     428            0 :     let tenant_shard_id = match tenant_dir_path
     429            0 :         .file_name()
     430            0 :         .unwrap_or_default()
     431            0 :         .parse::<TenantShardId>()
     432              :     {
     433            0 :         Ok(id) => id,
     434              :         Err(_) => {
     435            0 :             warn!("Invalid tenant path (garbage in our repo directory?): {tenant_dir_path}",);
     436            0 :             return Ok(None);
     437              :         }
     438              :     };
     439              : 
     440              :     // Clean up legacy `metadata` files.
     441              :     // Doing it here because every single tenant directory is visited here.
     442              :     // In any later code, there's different treatment of tenant dirs
     443              :     // ... depending on whether the tenant is in re-attach response or not
     444              :     // ... epending on whether the tenant is ignored or not
     445            0 :     assert_eq!(
     446            0 :         &conf.tenant_path(&tenant_shard_id),
     447            0 :         &tenant_dir_path,
     448            0 :         "later use of conf....path() methods would be dubious"
     449              :     );
     450            0 :     let timelines: Vec<TimelineId> = match conf.timelines_path(&tenant_shard_id).read_dir_utf8() {
     451            0 :         Ok(iter) => {
     452            0 :             let mut timelines = Vec::new();
     453            0 :             for res in iter {
     454            0 :                 let p = res?;
     455            0 :                 let Some(timeline_id) = p.file_name().parse::<TimelineId>().ok() else {
     456              :                     // skip any entries that aren't TimelineId, such as
     457              :                     // - *.___temp dirs
     458              :                     // - unfinished initdb uploads (test_non_uploaded_root_timeline_is_deleted_after_restart)
     459            0 :                     continue;
     460              :                 };
     461            0 :                 timelines.push(timeline_id);
     462              :             }
     463            0 :             timelines
     464              :         }
     465            0 :         Err(e) if e.kind() == std::io::ErrorKind::NotFound => vec![],
     466            0 :         Err(e) => return Err(anyhow::anyhow!(e)),
     467              :     };
     468            0 :     for timeline_id in timelines {
     469            0 :         let timeline_path = &conf.timeline_path(&tenant_shard_id, &timeline_id);
     470            0 :         let metadata_path = timeline_path.join(METADATA_FILE_NAME);
     471            0 :         match std::fs::remove_file(&metadata_path) {
     472              :             Ok(()) => {
     473            0 :                 crashsafe::fsync(timeline_path)
     474            0 :                     .context("fsync timeline dir after removing legacy metadata file")?;
     475            0 :                 info!("removed legacy metadata file at {metadata_path}");
     476              :             }
     477            0 :             Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
     478            0 :                 // something removed the file earlier, or it was never there
     479            0 :                 // We don't care, this software version doesn't write it again, so, we're good.
     480            0 :             }
     481            0 :             Err(e) => {
     482            0 :                 anyhow::bail!("remove legacy metadata file: {e}: {metadata_path}");
     483              :             }
     484              :         }
     485              :     }
     486              : 
     487            0 :     let tenant_ignore_mark_file = tenant_dir_path.join(IGNORED_TENANT_FILE_NAME);
     488            0 :     if tenant_ignore_mark_file.exists() {
     489            0 :         info!("Found an ignore mark file {tenant_ignore_mark_file:?}, skipping the tenant");
     490            0 :         return Ok(None);
     491            0 :     }
     492            0 : 
     493            0 :     Ok(Some((
     494            0 :         tenant_shard_id,
     495            0 :         Tenant::load_tenant_config(conf, &tenant_shard_id),
     496            0 :     )))
     497            0 : }
     498              : 
     499              : /// Initial stage of load: walk the local tenants directory, clean up any temp files,
     500              : /// and load configurations for the tenants we found.
     501              : ///
     502              : /// Do this in parallel, because we expect 10k+ tenants, so serial execution can take
     503              : /// seconds even on reasonably fast drives.
     504            0 : async fn init_load_tenant_configs(
     505            0 :     conf: &'static PageServerConf,
     506            0 : ) -> anyhow::Result<HashMap<TenantShardId, anyhow::Result<LocationConf>>> {
     507            0 :     let tenants_dir = conf.tenants_path();
     508              : 
     509            0 :     let dentries = tokio::task::spawn_blocking(move || -> anyhow::Result<Vec<Utf8DirEntry>> {
     510            0 :         let dir_entries = tenants_dir
     511            0 :             .read_dir_utf8()
     512            0 :             .with_context(|| format!("Failed to list tenants dir {tenants_dir:?}"))?;
     513              : 
     514            0 :         Ok(dir_entries.collect::<Result<Vec<_>, std::io::Error>>()?)
     515            0 :     })
     516            0 :     .await??;
     517              : 
     518            0 :     let mut configs = HashMap::new();
     519            0 : 
     520            0 :     let mut join_set = JoinSet::new();
     521            0 :     for dentry in dentries {
     522            0 :         join_set.spawn_blocking(move || load_tenant_config(conf, dentry));
     523            0 :     }
     524              : 
     525            0 :     while let Some(r) = join_set.join_next().await {
     526            0 :         if let Some((tenant_id, tenant_config)) = r?? {
     527            0 :             configs.insert(tenant_id, tenant_config);
     528            0 :         }
     529              :     }
     530              : 
     531            0 :     Ok(configs)
     532            0 : }
     533              : 
     534              : /// Initialize repositories with locally available timelines.
     535              : /// Timelines that are only partially available locally (remote storage has more data than this pageserver)
     536              : /// are scheduled for download and added to the tenant once download is completed.
     537            0 : #[instrument(skip_all)]
     538              : pub async fn init_tenant_mgr(
     539              :     conf: &'static PageServerConf,
     540              :     resources: TenantSharedResources,
     541              :     init_order: InitializationOrder,
     542              :     cancel: CancellationToken,
     543              : ) -> anyhow::Result<TenantManager> {
     544              :     let mut tenants = BTreeMap::new();
     545              : 
     546              :     let ctx = RequestContext::todo_child(TaskKind::Startup, DownloadBehavior::Warn);
     547              : 
     548              :     // Initialize dynamic limits that depend on system resources
     549              :     let system_memory =
     550              :         sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_memory())
     551              :             .total_memory();
     552              :     let max_ephemeral_layer_bytes =
     553              :         conf.ephemeral_bytes_per_memory_kb as u64 * (system_memory / 1024);
     554            0 :     tracing::info!("Initialized ephemeral layer size limit to {max_ephemeral_layer_bytes}, for {system_memory} bytes of memory");
     555              :     inmemory_layer::GLOBAL_RESOURCES.max_dirty_bytes.store(
     556              :         max_ephemeral_layer_bytes,
     557              :         std::sync::atomic::Ordering::Relaxed,
     558              :     );
     559              : 
     560              :     // Scan local filesystem for attached tenants
     561              :     let tenant_configs = init_load_tenant_configs(conf).await?;
     562              : 
     563              :     // Determine which tenants are to be secondary or attached, and in which generation
     564              :     let tenant_modes = init_load_generations(conf, &tenant_configs, &resources, &cancel).await?;
     565              : 
     566            0 :     tracing::info!(
     567            0 :         "Attaching {} tenants at startup, warming up {} at a time",
     568            0 :         tenant_configs.len(),
     569            0 :         conf.concurrent_tenant_warmup.initial_permits()
     570            0 :     );
     571              :     TENANT.startup_scheduled.inc_by(tenant_configs.len() as u64);
     572              : 
     573              :     // Construct `Tenant` objects and start them running
     574              :     for (tenant_shard_id, location_conf) in tenant_configs {
     575              :         let tenant_dir_path = conf.tenant_path(&tenant_shard_id);
     576              : 
     577              :         let mut location_conf = match location_conf {
     578              :             Ok(l) => l,
     579              :             Err(e) => {
     580            0 :                 warn!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Marking tenant broken, failed to {e:#}");
     581              : 
     582              :                 tenants.insert(
     583              :                     tenant_shard_id,
     584              :                     TenantSlot::Attached(Tenant::create_broken_tenant(
     585              :                         conf,
     586              :                         tenant_shard_id,
     587              :                         format!("{}", e),
     588              :                     )),
     589              :                 );
     590              :                 continue;
     591              :             }
     592              :         };
     593              : 
     594              :         // FIXME: if we were attached, and get demoted to secondary on re-attach, we
     595              :         // don't have a place to get a config.
     596              :         // (https://github.com/neondatabase/neon/issues/5377)
     597              :         const DEFAULT_SECONDARY_CONF: SecondaryLocationConfig =
     598              :             SecondaryLocationConfig { warm: true };
     599              : 
     600              :         // Update the location config according to the re-attach response
     601              :         if let Some(tenant_modes) = &tenant_modes {
     602              :             // We have a generation map: treat it as the authority for whether
     603              :             // this tenant is really attached.
     604              :             match tenant_modes.get(&tenant_shard_id) {
     605              :                 None => {
     606            0 :                     info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Detaching tenant, control plane omitted it in re-attach response");
     607              :                     if let Err(e) = safe_remove_tenant_dir_all(&tenant_dir_path).await {
     608            0 :                         error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
     609            0 :                             "Failed to remove detached tenant directory '{tenant_dir_path}': {e:?}",
     610            0 :                         );
     611              :                     }
     612              : 
     613              :                     // We deleted local content: move on to next tenant, don't try and spawn this one.
     614              :                     continue;
     615              :                 }
     616              :                 Some(TenantStartupMode::Secondary) => {
     617              :                     if !matches!(location_conf.mode, LocationMode::Secondary(_)) {
     618              :                         location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF);
     619              :                     }
     620              :                 }
     621              :                 Some(TenantStartupMode::Attached((attach_mode, generation))) => {
     622              :                     let old_gen_higher = match &location_conf.mode {
     623              :                         LocationMode::Attached(AttachedLocationConfig {
     624              :                             generation: old_generation,
     625              :                             attach_mode: _attach_mode,
     626              :                         }) => {
     627              :                             if old_generation > generation {
     628              :                                 Some(old_generation)
     629              :                             } else {
     630              :                                 None
     631              :                             }
     632              :                         }
     633              :                         _ => None,
     634              :                     };
     635              :                     if let Some(old_generation) = old_gen_higher {
     636            0 :                         tracing::error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
     637            0 :                             "Control plane gave decreasing generation ({generation:?}) in re-attach response for tenant that was attached in generation {:?}, demoting to secondary",
     638            0 :                             old_generation
     639            0 :                         );
     640              : 
     641              :                         // We cannot safely attach this tenant given a bogus generation number, but let's avoid throwing away
     642              :                         // local disk content: demote to secondary rather than detaching.
     643              :                         location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF);
     644              :                     } else {
     645              :                         location_conf.attach_in_generation(*attach_mode, *generation);
     646              :                     }
     647              :                 }
     648              :             }
     649              :         } else {
     650              :             // Legacy mode: no generation information, any tenant present
     651              :             // on local disk may activate
     652            0 :             info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Starting tenant in legacy mode, no generation",);
     653              :         };
     654              : 
     655              :         // Presence of a generation number implies attachment: attach the tenant
     656              :         // if it wasn't already, and apply the generation number.
     657              :         Tenant::persist_tenant_config(conf, &tenant_shard_id, &location_conf).await?;
     658              : 
     659              :         let shard_identity = location_conf.shard;
     660              :         let slot = match location_conf.mode {
     661              :             LocationMode::Attached(attached_conf) => {
     662              :                 match tenant_spawn(
     663              :                     conf,
     664              :                     tenant_shard_id,
     665              :                     &tenant_dir_path,
     666              :                     resources.clone(),
     667              :                     AttachedTenantConf::new(location_conf.tenant_conf, attached_conf),
     668              :                     shard_identity,
     669              :                     Some(init_order.clone()),
     670              :                     &TENANTS,
     671              :                     SpawnMode::Lazy,
     672              :                     &ctx,
     673              :                 ) {
     674              :                     Ok(tenant) => TenantSlot::Attached(tenant),
     675              :                     Err(e) => {
     676            0 :                         error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Failed to start tenant: {e:#}");
     677              :                         continue;
     678              :                     }
     679              :                 }
     680              :             }
     681              :             LocationMode::Secondary(secondary_conf) => TenantSlot::Secondary(SecondaryTenant::new(
     682              :                 tenant_shard_id,
     683              :                 shard_identity,
     684              :                 location_conf.tenant_conf,
     685              :                 &secondary_conf,
     686              :             )),
     687              :         };
     688              : 
     689              :         tenants.insert(tenant_shard_id, slot);
     690              :     }
     691              : 
     692            0 :     info!("Processed {} local tenants at startup", tenants.len());
     693              : 
     694              :     let mut tenants_map = TENANTS.write().unwrap();
     695              :     assert!(matches!(&*tenants_map, &TenantsMap::Initializing));
     696              :     METRICS.tenant_slots.set(tenants.len() as u64);
     697              :     *tenants_map = TenantsMap::Open(tenants);
     698              : 
     699              :     Ok(TenantManager {
     700              :         conf,
     701              :         tenants: &TENANTS,
     702              :         resources,
     703              :         cancel: CancellationToken::new(),
     704              :     })
     705              : }
     706              : 
     707              : /// Wrapper for Tenant::spawn that checks invariants before running, and inserts
     708              : /// a broken tenant in the map if Tenant::spawn fails.
     709              : #[allow(clippy::too_many_arguments)]
     710            0 : fn tenant_spawn(
     711            0 :     conf: &'static PageServerConf,
     712            0 :     tenant_shard_id: TenantShardId,
     713            0 :     tenant_path: &Utf8Path,
     714            0 :     resources: TenantSharedResources,
     715            0 :     location_conf: AttachedTenantConf,
     716            0 :     shard_identity: ShardIdentity,
     717            0 :     init_order: Option<InitializationOrder>,
     718            0 :     tenants: &'static std::sync::RwLock<TenantsMap>,
     719            0 :     mode: SpawnMode,
     720            0 :     ctx: &RequestContext,
     721            0 : ) -> anyhow::Result<Arc<Tenant>> {
     722            0 :     anyhow::ensure!(
     723            0 :         tenant_path.is_dir(),
     724            0 :         "Cannot load tenant from path {tenant_path:?}, it either does not exist or not a directory"
     725              :     );
     726            0 :     anyhow::ensure!(
     727            0 :         !crate::is_temporary(tenant_path),
     728            0 :         "Cannot load tenant from temporary path {tenant_path:?}"
     729              :     );
     730            0 :     anyhow::ensure!(
     731            0 :         !tenant_path.is_empty_dir().with_context(|| {
     732            0 :             format!("Failed to check whether {tenant_path:?} is an empty dir")
     733            0 :         })?,
     734            0 :         "Cannot load tenant from empty directory {tenant_path:?}"
     735              :     );
     736              : 
     737            0 :     let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_shard_id);
     738            0 :     anyhow::ensure!(
     739            0 :         !conf.tenant_ignore_mark_file_path(&tenant_shard_id).exists(),
     740            0 :         "Cannot load tenant, ignore mark found at {tenant_ignore_mark:?}"
     741              :     );
     742              : 
     743            0 :     let tenant = match Tenant::spawn(
     744            0 :         conf,
     745            0 :         tenant_shard_id,
     746            0 :         resources,
     747            0 :         location_conf,
     748            0 :         shard_identity,
     749            0 :         init_order,
     750            0 :         tenants,
     751            0 :         mode,
     752            0 :         ctx,
     753            0 :     ) {
     754            0 :         Ok(tenant) => tenant,
     755            0 :         Err(e) => {
     756            0 :             error!("Failed to spawn tenant {tenant_shard_id}, reason: {e:#}");
     757            0 :             Tenant::create_broken_tenant(conf, tenant_shard_id, format!("{e:#}"))
     758              :         }
     759              :     };
     760              : 
     761            0 :     Ok(tenant)
     762            0 : }
     763              : 
     764            2 : async fn shutdown_all_tenants0(tenants: &std::sync::RwLock<TenantsMap>) {
     765            2 :     let mut join_set = JoinSet::new();
     766              : 
     767              :     // Atomically, 1. create the shutdown tasks and 2. prevent creation of new tenants.
     768            2 :     let (total_in_progress, total_attached) = {
     769            2 :         let mut m = tenants.write().unwrap();
     770            2 :         match &mut *m {
     771              :             TenantsMap::Initializing => {
     772            0 :                 *m = TenantsMap::ShuttingDown(BTreeMap::default());
     773            0 :                 info!("tenants map is empty");
     774            0 :                 return;
     775              :             }
     776            2 :             TenantsMap::Open(tenants) => {
     777            2 :                 let mut shutdown_state = BTreeMap::new();
     778            2 :                 let mut total_in_progress = 0;
     779            2 :                 let mut total_attached = 0;
     780              : 
     781            2 :                 for (tenant_shard_id, v) in std::mem::take(tenants).into_iter() {
     782            2 :                     match v {
     783            0 :                         TenantSlot::Attached(t) => {
     784            0 :                             shutdown_state.insert(tenant_shard_id, TenantSlot::Attached(t.clone()));
     785            0 :                             join_set.spawn(
     786            0 :                                 async move {
     787            0 :                                     let res = {
     788            0 :                                         let (_guard, shutdown_progress) = completion::channel();
     789            0 :                                         t.shutdown(shutdown_progress, ShutdownMode::FreezeAndFlush).await
     790              :                                     };
     791              : 
     792            0 :                                     if let Err(other_progress) = res {
     793              :                                         // join the another shutdown in progress
     794            0 :                                         other_progress.wait().await;
     795            0 :                                     }
     796              : 
     797              :                                     // we cannot afford per tenant logging here, because if s3 is degraded, we are
     798              :                                     // going to log too many lines
     799            0 :                                     debug!("tenant successfully stopped");
     800            0 :                                 }
     801            0 :                                 .instrument(info_span!("shutdown", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())),
     802              :                             );
     803              : 
     804            0 :                             total_attached += 1;
     805              :                         }
     806            0 :                         TenantSlot::Secondary(state) => {
     807            0 :                             // We don't need to wait for this individually per-tenant: the
     808            0 :                             // downloader task will be waited on eventually, this cancel
     809            0 :                             // is just to encourage it to drop out if it is doing work
     810            0 :                             // for this tenant right now.
     811            0 :                             state.cancel.cancel();
     812            0 : 
     813            0 :                             shutdown_state.insert(tenant_shard_id, TenantSlot::Secondary(state));
     814            0 :                         }
     815            2 :                         TenantSlot::InProgress(notify) => {
     816            2 :                             // InProgress tenants are not visible in TenantsMap::ShuttingDown: we will
     817            2 :                             // wait for their notifications to fire in this function.
     818            2 :                             join_set.spawn(async move {
     819            2 :                                 notify.wait().await;
     820            2 :                             });
     821            2 : 
     822            2 :                             total_in_progress += 1;
     823            2 :                         }
     824              :                     }
     825              :                 }
     826            2 :                 *m = TenantsMap::ShuttingDown(shutdown_state);
     827            2 :                 (total_in_progress, total_attached)
     828              :             }
     829              :             TenantsMap::ShuttingDown(_) => {
     830            0 :                 error!("already shutting down, this function isn't supposed to be called more than once");
     831            0 :                 return;
     832              :             }
     833              :         }
     834              :     };
     835              : 
     836            2 :     let started_at = std::time::Instant::now();
     837            2 : 
     838            2 :     info!(
     839            2 :         "Waiting for {} InProgress tenants and {} Attached tenants to shut down",
     840            2 :         total_in_progress, total_attached
     841            2 :     );
     842              : 
     843            2 :     let total = join_set.len();
     844            2 :     let mut panicked = 0;
     845            2 :     let mut buffering = true;
     846            2 :     const BUFFER_FOR: std::time::Duration = std::time::Duration::from_millis(500);
     847            2 :     let mut buffered = std::pin::pin!(tokio::time::sleep(BUFFER_FOR));
     848              : 
     849            6 :     while !join_set.is_empty() {
     850            8 :         tokio::select! {
     851            2 :             Some(joined) = join_set.join_next() => {
     852              :                 match joined {
     853              :                     Ok(()) => {},
     854              :                     Err(join_error) if join_error.is_cancelled() => {
     855              :                         unreachable!("we are not cancelling any of the tasks");
     856              :                     }
     857              :                     Err(join_error) if join_error.is_panic() => {
     858              :                         // cannot really do anything, as this panic is likely a bug
     859              :                         panicked += 1;
     860              :                     }
     861              :                     Err(join_error) => {
     862            0 :                         warn!("unknown kind of JoinError: {join_error}");
     863              :                     }
     864              :                 }
     865              :                 if !buffering {
     866              :                     // buffer so that every 500ms since the first update (or starting) we'll log
     867              :                     // how far away we are; this is because we will get SIGKILL'd at 10s, and we
     868              :                     // are not able to log *then*.
     869              :                     buffering = true;
     870              :                     buffered.as_mut().reset(tokio::time::Instant::now() + BUFFER_FOR);
     871              :                 }
     872              :             },
     873              :             _ = &mut buffered, if buffering => {
     874              :                 buffering = false;
     875            2 :                 info!(remaining = join_set.len(), total, elapsed_ms = started_at.elapsed().as_millis(), "waiting for tenants to shutdown");
     876              :             }
     877              :         }
     878              :     }
     879              : 
     880            2 :     if panicked > 0 {
     881            0 :         warn!(
     882            0 :             panicked,
     883            0 :             total, "observed panicks while shutting down tenants"
     884            0 :         );
     885            2 :     }
     886              : 
     887              :     // caller will log how long we took
     888            2 : }
     889              : 
     890            0 : #[derive(thiserror::Error, Debug)]
     891              : pub(crate) enum UpsertLocationError {
     892              :     #[error("Bad config request: {0}")]
     893              :     BadRequest(anyhow::Error),
     894              : 
     895              :     #[error("Cannot change config in this state: {0}")]
     896              :     Unavailable(#[from] TenantMapError),
     897              : 
     898              :     #[error("Tenant is already being modified")]
     899              :     InProgress,
     900              : 
     901              :     #[error("Failed to flush: {0}")]
     902              :     Flush(anyhow::Error),
     903              : 
     904              :     #[error("Internal error: {0}")]
     905              :     Other(#[from] anyhow::Error),
     906              : }
     907              : 
     908              : impl TenantManager {
     909              :     /// Convenience function so that anyone with a TenantManager can get at the global configuration, without
     910              :     /// having to pass it around everywhere as a separate object.
     911            0 :     pub(crate) fn get_conf(&self) -> &'static PageServerConf {
     912            0 :         self.conf
     913            0 :     }
     914              : 
     915              :     /// Gets the attached tenant from the in-memory data, erroring if it's absent, in secondary mode, or currently
     916              :     /// undergoing a state change (i.e. slot is InProgress).
     917              :     ///
     918              :     /// The return Tenant is not guaranteed to be active: check its status after obtaing it, or
     919              :     /// use [`Tenant::wait_to_become_active`] before using it if you will do I/O on it.
     920            0 :     pub(crate) fn get_attached_tenant_shard(
     921            0 :         &self,
     922            0 :         tenant_shard_id: TenantShardId,
     923            0 :     ) -> Result<Arc<Tenant>, GetTenantError> {
     924            0 :         let locked = self.tenants.read().unwrap();
     925              : 
     926            0 :         let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)?;
     927              : 
     928            0 :         match peek_slot {
     929            0 :             Some(TenantSlot::Attached(tenant)) => Ok(Arc::clone(tenant)),
     930            0 :             Some(TenantSlot::InProgress(_)) => Err(GetTenantError::NotActive(tenant_shard_id)),
     931              :             None | Some(TenantSlot::Secondary(_)) => {
     932            0 :                 Err(GetTenantError::NotFound(tenant_shard_id.tenant_id))
     933              :             }
     934              :         }
     935            0 :     }
     936              : 
     937            0 :     pub(crate) fn get_secondary_tenant_shard(
     938            0 :         &self,
     939            0 :         tenant_shard_id: TenantShardId,
     940            0 :     ) -> Option<Arc<SecondaryTenant>> {
     941            0 :         let locked = self.tenants.read().unwrap();
     942            0 : 
     943            0 :         let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
     944            0 :             .ok()
     945            0 :             .flatten();
     946              : 
     947            0 :         match peek_slot {
     948            0 :             Some(TenantSlot::Secondary(s)) => Some(s.clone()),
     949            0 :             _ => None,
     950              :         }
     951            0 :     }
     952              : 
     953              :     /// Whether the `TenantManager` is responsible for the tenant shard
     954            0 :     pub(crate) fn manages_tenant_shard(&self, tenant_shard_id: TenantShardId) -> bool {
     955            0 :         let locked = self.tenants.read().unwrap();
     956            0 : 
     957            0 :         let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
     958            0 :             .ok()
     959            0 :             .flatten();
     960            0 : 
     961            0 :         peek_slot.is_some()
     962            0 :     }
     963              : 
     964            0 :     #[instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
     965              :     pub(crate) async fn upsert_location(
     966              :         &self,
     967              :         tenant_shard_id: TenantShardId,
     968              :         new_location_config: LocationConf,
     969              :         flush: Option<Duration>,
     970              :         mut spawn_mode: SpawnMode,
     971              :         ctx: &RequestContext,
     972              :     ) -> Result<Option<Arc<Tenant>>, UpsertLocationError> {
     973              :         debug_assert_current_span_has_tenant_id();
     974            0 :         info!("configuring tenant location to state {new_location_config:?}");
     975              : 
     976              :         enum FastPathModified {
     977              :             Attached(Arc<Tenant>),
     978              :             Secondary(Arc<SecondaryTenant>),
     979              :         }
     980              : 
     981              :         // Special case fast-path for updates to existing slots: if our upsert is only updating configuration,
     982              :         // then we do not need to set the slot to InProgress, we can just call into the
     983              :         // existng tenant.
     984              :         let fast_path_taken = {
     985              :             let locked = self.tenants.read().unwrap();
     986              :             let peek_slot =
     987              :                 tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Write)?;
     988              :             match (&new_location_config.mode, peek_slot) {
     989              :                 (LocationMode::Attached(attach_conf), Some(TenantSlot::Attached(tenant))) => {
     990              :                     match attach_conf.generation.cmp(&tenant.generation) {
     991              :                         Ordering::Equal => {
     992              :                             // A transition from Attached to Attached in the same generation, we may
     993              :                             // take our fast path and just provide the updated configuration
     994              :                             // to the tenant.
     995              :                             tenant.set_new_location_config(
     996              :                                 AttachedTenantConf::try_from(new_location_config.clone())
     997              :                                     .map_err(UpsertLocationError::BadRequest)?,
     998              :                             );
     999              : 
    1000              :                             Some(FastPathModified::Attached(tenant.clone()))
    1001              :                         }
    1002              :                         Ordering::Less => {
    1003              :                             return Err(UpsertLocationError::BadRequest(anyhow::anyhow!(
    1004              :                                 "Generation {:?} is less than existing {:?}",
    1005              :                                 attach_conf.generation,
    1006              :                                 tenant.generation
    1007              :                             )));
    1008              :                         }
    1009              :                         Ordering::Greater => {
    1010              :                             // Generation advanced, fall through to general case of replacing `Tenant` object
    1011              :                             None
    1012              :                         }
    1013              :                     }
    1014              :                 }
    1015              :                 (
    1016              :                     LocationMode::Secondary(secondary_conf),
    1017              :                     Some(TenantSlot::Secondary(secondary_tenant)),
    1018              :                 ) => {
    1019              :                     secondary_tenant.set_config(secondary_conf);
    1020              :                     secondary_tenant.set_tenant_conf(&new_location_config.tenant_conf);
    1021              :                     Some(FastPathModified::Secondary(secondary_tenant.clone()))
    1022              :                 }
    1023              :                 _ => {
    1024              :                     // Not an Attached->Attached transition, fall through to general case
    1025              :                     None
    1026              :                 }
    1027              :             }
    1028              :         };
    1029              : 
    1030              :         // Fast-path continued: having dropped out of the self.tenants lock, do the async
    1031              :         // phase of writing config and/or waiting for flush, before returning.
    1032              :         match fast_path_taken {
    1033              :             Some(FastPathModified::Attached(tenant)) => {
    1034              :                 Tenant::persist_tenant_config(self.conf, &tenant_shard_id, &new_location_config)
    1035              :                     .await?;
    1036              : 
    1037              :                 // Transition to AttachedStale means we may well hold a valid generation
    1038              :                 // still, and have been requested to go stale as part of a migration.  If
    1039              :                 // the caller set `flush`, then flush to remote storage.
    1040              :                 if let LocationMode::Attached(AttachedLocationConfig {
    1041              :                     generation: _,
    1042              :                     attach_mode: AttachmentMode::Stale,
    1043              :                 }) = &new_location_config.mode
    1044              :                 {
    1045              :                     if let Some(flush_timeout) = flush {
    1046              :                         match tokio::time::timeout(flush_timeout, tenant.flush_remote()).await {
    1047              :                             Ok(Err(e)) => {
    1048              :                                 return Err(UpsertLocationError::Flush(e));
    1049              :                             }
    1050              :                             Ok(Ok(_)) => return Ok(Some(tenant)),
    1051              :                             Err(_) => {
    1052            0 :                                 tracing::warn!(
    1053            0 :                                 timeout_ms = flush_timeout.as_millis(),
    1054            0 :                                 "Timed out waiting for flush to remote storage, proceeding anyway."
    1055            0 :                             )
    1056              :                             }
    1057              :                         }
    1058              :                     }
    1059              :                 }
    1060              : 
    1061              :                 return Ok(Some(tenant));
    1062              :             }
    1063              :             Some(FastPathModified::Secondary(_secondary_tenant)) => {
    1064              :                 Tenant::persist_tenant_config(self.conf, &tenant_shard_id, &new_location_config)
    1065              :                     .await?;
    1066              : 
    1067              :                 return Ok(None);
    1068              :             }
    1069              :             None => {
    1070              :                 // Proceed with the general case procedure, where we will shutdown & remove any existing
    1071              :                 // slot contents and replace with a fresh one
    1072              :             }
    1073              :         };
    1074              : 
    1075              :         // General case for upserts to TenantsMap, excluding the case above: we will substitute an
    1076              :         // InProgress value to the slot while we make whatever changes are required.  The state for
    1077              :         // the tenant is inaccessible to the outside world while we are doing this, but that is sensible:
    1078              :         // the state is ill-defined while we're in transition.  Transitions are async, but fast: we do
    1079              :         // not do significant I/O, and shutdowns should be prompt via cancellation tokens.
    1080              :         let mut slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)
    1081            0 :             .map_err(|e| match e {
    1082              :                 TenantSlotError::AlreadyExists(_, _) | TenantSlotError::NotFound(_) => {
    1083            0 :                     unreachable!("Called with mode Any")
    1084              :                 }
    1085            0 :                 TenantSlotError::InProgress => UpsertLocationError::InProgress,
    1086            0 :                 TenantSlotError::MapState(s) => UpsertLocationError::Unavailable(s),
    1087            0 :             })?;
    1088              : 
    1089              :         match slot_guard.get_old_value() {
    1090              :             Some(TenantSlot::Attached(tenant)) => {
    1091              :                 // The case where we keep a Tenant alive was covered above in the special case
    1092              :                 // for Attached->Attached transitions in the same generation.  By this point,
    1093              :                 // if we see an attached tenant we know it will be discarded and should be
    1094              :                 // shut down.
    1095              :                 let (_guard, progress) = utils::completion::channel();
    1096              : 
    1097              :                 match tenant.get_attach_mode() {
    1098              :                     AttachmentMode::Single | AttachmentMode::Multi => {
    1099              :                         // Before we leave our state as the presumed holder of the latest generation,
    1100              :                         // flush any outstanding deletions to reduce the risk of leaking objects.
    1101              :                         self.resources.deletion_queue_client.flush_advisory()
    1102              :                     }
    1103              :                     AttachmentMode::Stale => {
    1104              :                         // If we're stale there's not point trying to flush deletions
    1105              :                     }
    1106              :                 };
    1107              : 
    1108            0 :                 info!("Shutting down attached tenant");
    1109              :                 match tenant.shutdown(progress, ShutdownMode::Hard).await {
    1110              :                     Ok(()) => {}
    1111              :                     Err(barrier) => {
    1112            0 :                         info!("Shutdown already in progress, waiting for it to complete");
    1113              :                         barrier.wait().await;
    1114              :                     }
    1115              :                 }
    1116              :                 slot_guard.drop_old_value().expect("We just shut it down");
    1117              : 
    1118              :                 // Edge case: if we were called with SpawnMode::Create, but a Tenant already existed, then
    1119              :                 // the caller thinks they're creating but the tenant already existed.  We must switch to
    1120              :                 // Eager mode so that when starting this Tenant we properly probe remote storage for timelines,
    1121              :                 // rather than assuming it to be empty.
    1122              :                 spawn_mode = SpawnMode::Eager;
    1123              :             }
    1124              :             Some(TenantSlot::Secondary(state)) => {
    1125            0 :                 info!("Shutting down secondary tenant");
    1126              :                 state.shutdown().await;
    1127              :             }
    1128              :             Some(TenantSlot::InProgress(_)) => {
    1129              :                 // This should never happen: acquire_slot should error out
    1130              :                 // if the contents of a slot were InProgress.
    1131              :                 return Err(UpsertLocationError::Other(anyhow::anyhow!(
    1132              :                     "Acquired an InProgress slot, this is a bug."
    1133              :                 )));
    1134              :             }
    1135              :             None => {
    1136              :                 // Slot was vacant, nothing needs shutting down.
    1137              :             }
    1138              :         }
    1139              : 
    1140              :         let tenant_path = self.conf.tenant_path(&tenant_shard_id);
    1141              :         let timelines_path = self.conf.timelines_path(&tenant_shard_id);
    1142              : 
    1143              :         // Directory structure is the same for attached and secondary modes:
    1144              :         // create it if it doesn't exist.  Timeline load/creation expects the
    1145              :         // timelines/ subdir to already exist.
    1146              :         //
    1147              :         // Does not need to be fsync'd because local storage is just a cache.
    1148              :         tokio::fs::create_dir_all(&timelines_path)
    1149              :             .await
    1150            0 :             .with_context(|| format!("Creating {timelines_path}"))?;
    1151              : 
    1152              :         // Before activating either secondary or attached mode, persist the
    1153              :         // configuration, so that on restart we will re-attach (or re-start
    1154              :         // secondary) on the tenant.
    1155              :         Tenant::persist_tenant_config(self.conf, &tenant_shard_id, &new_location_config).await?;
    1156              : 
    1157              :         let new_slot = match &new_location_config.mode {
    1158              :             LocationMode::Secondary(secondary_config) => {
    1159              :                 let shard_identity = new_location_config.shard;
    1160              :                 TenantSlot::Secondary(SecondaryTenant::new(
    1161              :                     tenant_shard_id,
    1162              :                     shard_identity,
    1163              :                     new_location_config.tenant_conf,
    1164              :                     secondary_config,
    1165              :                 ))
    1166              :             }
    1167              :             LocationMode::Attached(_attach_config) => {
    1168              :                 let shard_identity = new_location_config.shard;
    1169              : 
    1170              :                 // Testing hack: if we are configured with no control plane, then drop the generation
    1171              :                 // from upserts.  This enables creating generation-less tenants even though neon_local
    1172              :                 // always uses generations when calling the location conf API.
    1173              :                 let attached_conf = if cfg!(feature = "testing") {
    1174              :                     let mut conf = AttachedTenantConf::try_from(new_location_config)?;
    1175              :                     if self.conf.control_plane_api.is_none() {
    1176              :                         conf.location.generation = Generation::none();
    1177              :                     }
    1178              :                     conf
    1179              :                 } else {
    1180              :                     AttachedTenantConf::try_from(new_location_config)?
    1181              :                 };
    1182              : 
    1183              :                 let tenant = tenant_spawn(
    1184              :                     self.conf,
    1185              :                     tenant_shard_id,
    1186              :                     &tenant_path,
    1187              :                     self.resources.clone(),
    1188              :                     attached_conf,
    1189              :                     shard_identity,
    1190              :                     None,
    1191              :                     self.tenants,
    1192              :                     spawn_mode,
    1193              :                     ctx,
    1194              :                 )?;
    1195              : 
    1196              :                 TenantSlot::Attached(tenant)
    1197              :             }
    1198              :         };
    1199              : 
    1200              :         let attached_tenant = if let TenantSlot::Attached(tenant) = &new_slot {
    1201              :             Some(tenant.clone())
    1202              :         } else {
    1203              :             None
    1204              :         };
    1205              : 
    1206              :         match slot_guard.upsert(new_slot) {
    1207              :             Err(TenantSlotUpsertError::InternalError(e)) => {
    1208              :                 Err(UpsertLocationError::Other(anyhow::anyhow!(e)))
    1209              :             }
    1210              :             Err(TenantSlotUpsertError::MapState(e)) => Err(UpsertLocationError::Unavailable(e)),
    1211              :             Err(TenantSlotUpsertError::ShuttingDown((new_slot, _completion))) => {
    1212              :                 // If we just called tenant_spawn() on a new tenant, and can't insert it into our map, then
    1213              :                 // we must not leak it: this would violate the invariant that after shutdown_all_tenants, all tenants
    1214              :                 // are shutdown.
    1215              :                 //
    1216              :                 // We must shut it down inline here.
    1217              :                 match new_slot {
    1218              :                     TenantSlot::InProgress(_) => {
    1219              :                         // Unreachable because we never insert an InProgress
    1220              :                         unreachable!()
    1221              :                     }
    1222              :                     TenantSlot::Attached(tenant) => {
    1223              :                         let (_guard, progress) = utils::completion::channel();
    1224            0 :                         info!("Shutting down just-spawned tenant, because tenant manager is shut down");
    1225              :                         match tenant.shutdown(progress, ShutdownMode::Hard).await {
    1226              :                             Ok(()) => {
    1227            0 :                                 info!("Finished shutting down just-spawned tenant");
    1228              :                             }
    1229              :                             Err(barrier) => {
    1230            0 :                                 info!("Shutdown already in progress, waiting for it to complete");
    1231              :                                 barrier.wait().await;
    1232              :                             }
    1233              :                         }
    1234              :                     }
    1235              :                     TenantSlot::Secondary(secondary_tenant) => {
    1236              :                         secondary_tenant.shutdown().await;
    1237              :                     }
    1238              :                 }
    1239              : 
    1240              :                 Err(UpsertLocationError::Unavailable(
    1241              :                     TenantMapError::ShuttingDown,
    1242              :                 ))
    1243              :             }
    1244              :             Ok(()) => Ok(attached_tenant),
    1245              :         }
    1246              :     }
    1247              : 
    1248              :     /// Resetting a tenant is equivalent to detaching it, then attaching it again with the same
    1249              :     /// LocationConf that was last used to attach it.  Optionally, the local file cache may be
    1250              :     /// dropped before re-attaching.
    1251              :     ///
    1252              :     /// This is not part of a tenant's normal lifecycle: it is used for debug/support, in situations
    1253              :     /// where an issue is identified that would go away with a restart of the tenant.
    1254              :     ///
    1255              :     /// This does not have any special "force" shutdown of a tenant: it relies on the tenant's tasks
    1256              :     /// to respect the cancellation tokens used in normal shutdown().
    1257            0 :     #[instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %drop_cache))]
    1258              :     pub(crate) async fn reset_tenant(
    1259              :         &self,
    1260              :         tenant_shard_id: TenantShardId,
    1261              :         drop_cache: bool,
    1262              :         ctx: &RequestContext,
    1263              :     ) -> anyhow::Result<()> {
    1264              :         let mut slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?;
    1265              :         let Some(old_slot) = slot_guard.get_old_value() else {
    1266              :             anyhow::bail!("Tenant not found when trying to reset");
    1267              :         };
    1268              : 
    1269              :         let Some(tenant) = old_slot.get_attached() else {
    1270              :             slot_guard.revert();
    1271              :             anyhow::bail!("Tenant is not in attached state");
    1272              :         };
    1273              : 
    1274              :         let (_guard, progress) = utils::completion::channel();
    1275              :         match tenant.shutdown(progress, ShutdownMode::Hard).await {
    1276              :             Ok(()) => {
    1277              :                 slot_guard.drop_old_value()?;
    1278              :             }
    1279              :             Err(_barrier) => {
    1280              :                 slot_guard.revert();
    1281              :                 anyhow::bail!("Cannot reset Tenant, already shutting down");
    1282              :             }
    1283              :         }
    1284              : 
    1285              :         let tenant_path = self.conf.tenant_path(&tenant_shard_id);
    1286              :         let timelines_path = self.conf.timelines_path(&tenant_shard_id);
    1287              :         let config = Tenant::load_tenant_config(self.conf, &tenant_shard_id)?;
    1288              : 
    1289              :         if drop_cache {
    1290            0 :             tracing::info!("Dropping local file cache");
    1291              : 
    1292              :             match tokio::fs::read_dir(&timelines_path).await {
    1293              :                 Err(e) => {
    1294            0 :                     tracing::warn!("Failed to list timelines while dropping cache: {}", e);
    1295              :                 }
    1296              :                 Ok(mut entries) => {
    1297              :                     while let Some(entry) = entries.next_entry().await? {
    1298              :                         tokio::fs::remove_dir_all(entry.path()).await?;
    1299              :                     }
    1300              :                 }
    1301              :             }
    1302              :         }
    1303              : 
    1304              :         let shard_identity = config.shard;
    1305              :         let tenant = tenant_spawn(
    1306              :             self.conf,
    1307              :             tenant_shard_id,
    1308              :             &tenant_path,
    1309              :             self.resources.clone(),
    1310              :             AttachedTenantConf::try_from(config)?,
    1311              :             shard_identity,
    1312              :             None,
    1313              :             self.tenants,
    1314              :             SpawnMode::Eager,
    1315              :             ctx,
    1316              :         )?;
    1317              : 
    1318              :         slot_guard.upsert(TenantSlot::Attached(tenant))?;
    1319              : 
    1320              :         Ok(())
    1321              :     }
    1322              : 
    1323            0 :     pub(crate) fn get_attached_active_tenant_shards(&self) -> Vec<Arc<Tenant>> {
    1324            0 :         let locked = self.tenants.read().unwrap();
    1325            0 :         match &*locked {
    1326            0 :             TenantsMap::Initializing => Vec::new(),
    1327            0 :             TenantsMap::Open(map) | TenantsMap::ShuttingDown(map) => map
    1328            0 :                 .values()
    1329            0 :                 .filter_map(|slot| {
    1330            0 :                     slot.get_attached()
    1331            0 :                         .and_then(|t| if t.is_active() { Some(t.clone()) } else { None })
    1332            0 :                 })
    1333            0 :                 .collect(),
    1334              :         }
    1335            0 :     }
    1336              :     // Do some synchronous work for all tenant slots in Secondary state.  The provided
    1337              :     // callback should be small and fast, as it will be called inside the global
    1338              :     // TenantsMap lock.
    1339            0 :     pub(crate) fn foreach_secondary_tenants<F>(&self, mut func: F)
    1340            0 :     where
    1341            0 :         // TODO: let the callback return a hint to drop out of the loop early
    1342            0 :         F: FnMut(&TenantShardId, &Arc<SecondaryTenant>),
    1343            0 :     {
    1344            0 :         let locked = self.tenants.read().unwrap();
    1345              : 
    1346            0 :         let map = match &*locked {
    1347            0 :             TenantsMap::Initializing | TenantsMap::ShuttingDown(_) => return,
    1348            0 :             TenantsMap::Open(m) => m,
    1349              :         };
    1350              : 
    1351            0 :         for (tenant_id, slot) in map {
    1352            0 :             if let TenantSlot::Secondary(state) = slot {
    1353              :                 // Only expose secondary tenants that are not currently shutting down
    1354            0 :                 if !state.cancel.is_cancelled() {
    1355            0 :                     func(tenant_id, state)
    1356            0 :                 }
    1357            0 :             }
    1358              :         }
    1359            0 :     }
    1360              : 
    1361              :     /// Total list of all tenant slots: this includes attached, secondary, and InProgress.
    1362            0 :     pub(crate) fn list(&self) -> Vec<(TenantShardId, TenantSlot)> {
    1363            0 :         let locked = self.tenants.read().unwrap();
    1364            0 :         match &*locked {
    1365            0 :             TenantsMap::Initializing => Vec::new(),
    1366            0 :             TenantsMap::Open(map) | TenantsMap::ShuttingDown(map) => {
    1367            0 :                 map.iter().map(|(k, v)| (*k, v.clone())).collect()
    1368              :             }
    1369              :         }
    1370            0 :     }
    1371              : 
    1372            0 :     pub(crate) fn get(&self, tenant_shard_id: TenantShardId) -> Option<TenantSlot> {
    1373            0 :         let locked = self.tenants.read().unwrap();
    1374            0 :         match &*locked {
    1375            0 :             TenantsMap::Initializing => None,
    1376            0 :             TenantsMap::Open(map) | TenantsMap::ShuttingDown(map) => {
    1377            0 :                 map.get(&tenant_shard_id).cloned()
    1378              :             }
    1379              :         }
    1380            0 :     }
    1381              : 
    1382            0 :     pub(crate) async fn delete_tenant(
    1383            0 :         &self,
    1384            0 :         tenant_shard_id: TenantShardId,
    1385            0 :         activation_timeout: Duration,
    1386            0 :     ) -> Result<(), DeleteTenantError> {
    1387            0 :         super::span::debug_assert_current_span_has_tenant_id();
    1388              :         // We acquire a SlotGuard during this function to protect against concurrent
    1389              :         // changes while the ::prepare phase of DeleteTenantFlow executes, but then
    1390              :         // have to return the Tenant to the map while the background deletion runs.
    1391              :         //
    1392              :         // TODO: refactor deletion to happen outside the lifetime of a Tenant.
    1393              :         // Currently, deletion requires a reference to the tenants map in order to
    1394              :         // keep the Tenant in the map until deletion is complete, and then remove
    1395              :         // it at the end.
    1396              :         //
    1397              :         // See https://github.com/neondatabase/neon/issues/5080
    1398              : 
    1399            0 :         let slot_guard =
    1400            0 :             tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustExist)?;
    1401              : 
    1402              :         // unwrap is safe because we used MustExist mode when acquiring
    1403            0 :         let tenant = match slot_guard.get_old_value().as_ref().unwrap() {
    1404            0 :             TenantSlot::Attached(tenant) => tenant.clone(),
    1405              :             _ => {
    1406              :                 // Express "not attached" as equivalent to "not found"
    1407            0 :                 return Err(DeleteTenantError::NotAttached);
    1408              :             }
    1409              :         };
    1410              : 
    1411            0 :         match tenant.current_state() {
    1412            0 :             TenantState::Broken { .. } | TenantState::Stopping { .. } => {
    1413            0 :                 // If a tenant is broken or stopping, DeleteTenantFlow can
    1414            0 :                 // handle it: broken tenants proceed to delete, stopping tenants
    1415            0 :                 // are checked for deletion already in progress.
    1416            0 :             }
    1417              :             _ => {
    1418            0 :                 tenant
    1419            0 :                     .wait_to_become_active(activation_timeout)
    1420            0 :                     .await
    1421            0 :                     .map_err(|e| match e {
    1422              :                         GetActiveTenantError::WillNotBecomeActive(_)
    1423              :                         | GetActiveTenantError::Broken(_) => {
    1424            0 :                             DeleteTenantError::InvalidState(tenant.current_state())
    1425              :                         }
    1426            0 :                         GetActiveTenantError::Cancelled => DeleteTenantError::Cancelled,
    1427            0 :                         GetActiveTenantError::NotFound(_) => DeleteTenantError::NotAttached,
    1428              :                         GetActiveTenantError::WaitForActiveTimeout {
    1429            0 :                             latest_state: _latest_state,
    1430            0 :                             wait_time: _wait_time,
    1431            0 :                         } => DeleteTenantError::InvalidState(tenant.current_state()),
    1432            0 :                     })?;
    1433              :             }
    1434              :         }
    1435              : 
    1436            0 :         let result = DeleteTenantFlow::run(
    1437            0 :             self.conf,
    1438            0 :             self.resources.remote_storage.clone(),
    1439            0 :             &TENANTS,
    1440            0 :             tenant,
    1441            0 :             &self.cancel,
    1442            0 :         )
    1443            0 :         .await;
    1444              : 
    1445              :         // The Tenant goes back into the map in Stopping state, it will eventually be removed by DeleteTenantFLow
    1446            0 :         slot_guard.revert();
    1447            0 :         result
    1448            0 :     }
    1449              : 
    1450            0 :     #[instrument(skip_all, fields(tenant_id=%tenant.get_tenant_shard_id().tenant_id, shard_id=%tenant.get_tenant_shard_id().shard_slug(), new_shard_count=%new_shard_count.literal()))]
    1451              :     pub(crate) async fn shard_split(
    1452              :         &self,
    1453              :         tenant: Arc<Tenant>,
    1454              :         new_shard_count: ShardCount,
    1455              :         new_stripe_size: Option<ShardStripeSize>,
    1456              :         ctx: &RequestContext,
    1457              :     ) -> anyhow::Result<Vec<TenantShardId>> {
    1458              :         let tenant_shard_id = *tenant.get_tenant_shard_id();
    1459              :         let r = self
    1460              :             .do_shard_split(tenant, new_shard_count, new_stripe_size, ctx)
    1461              :             .await;
    1462              :         if r.is_err() {
    1463              :             // Shard splitting might have left the original shard in a partially shut down state (it
    1464              :             // stops the shard's remote timeline client).  Reset it to ensure we leave things in
    1465              :             // a working state.
    1466              :             if self.get(tenant_shard_id).is_some() {
    1467            0 :                 tracing::warn!("Resetting after shard split failure");
    1468              :                 if let Err(e) = self.reset_tenant(tenant_shard_id, false, ctx).await {
    1469              :                     // Log this error because our return value will still be the original error, not this one.  This is
    1470              :                     // a severe error: if this happens, we might be leaving behind a tenant that is not fully functional
    1471              :                     // (e.g. has uploads disabled).  We can't do anything else: if reset fails then shutting the tenant down or
    1472              :                     // setting it broken probably won't help either.
    1473            0 :                     tracing::error!("Failed to reset: {e}");
    1474              :                 }
    1475              :             }
    1476              :         }
    1477              : 
    1478              :         r
    1479              :     }
    1480              : 
    1481            0 :     pub(crate) async fn do_shard_split(
    1482            0 :         &self,
    1483            0 :         tenant: Arc<Tenant>,
    1484            0 :         new_shard_count: ShardCount,
    1485            0 :         new_stripe_size: Option<ShardStripeSize>,
    1486            0 :         ctx: &RequestContext,
    1487            0 :     ) -> anyhow::Result<Vec<TenantShardId>> {
    1488            0 :         let tenant_shard_id = *tenant.get_tenant_shard_id();
    1489            0 : 
    1490            0 :         // Validate the incoming request
    1491            0 :         if new_shard_count.count() <= tenant_shard_id.shard_count.count() {
    1492            0 :             anyhow::bail!("Requested shard count is not an increase");
    1493            0 :         }
    1494            0 :         let expansion_factor = new_shard_count.count() / tenant_shard_id.shard_count.count();
    1495            0 :         if !expansion_factor.is_power_of_two() {
    1496            0 :             anyhow::bail!("Requested split is not a power of two");
    1497            0 :         }
    1498              : 
    1499            0 :         if let Some(new_stripe_size) = new_stripe_size {
    1500            0 :             if tenant.get_shard_stripe_size() != new_stripe_size
    1501            0 :                 && tenant_shard_id.shard_count.count() > 1
    1502              :             {
    1503              :                 // This tenant already has multiple shards, it is illegal to try and change its stripe size
    1504            0 :                 anyhow::bail!(
    1505            0 :                     "Shard stripe size may not be modified once tenant has multiple shards"
    1506            0 :                 );
    1507            0 :             }
    1508            0 :         }
    1509              : 
    1510              :         // Plan: identify what the new child shards will be
    1511            0 :         let child_shards = tenant_shard_id.split(new_shard_count);
    1512            0 :         tracing::info!(
    1513            0 :             "Shard {} splits into: {}",
    1514            0 :             tenant_shard_id.to_index(),
    1515            0 :             child_shards
    1516            0 :                 .iter()
    1517            0 :                 .map(|id| format!("{}", id.to_index()))
    1518            0 :                 .join(",")
    1519            0 :         );
    1520              : 
    1521            0 :         fail::fail_point!("shard-split-pre-prepare", |_| Err(anyhow::anyhow!(
    1522            0 :             "failpoint"
    1523            0 :         )));
    1524              : 
    1525            0 :         let parent_shard_identity = tenant.shard_identity;
    1526            0 :         let parent_tenant_conf = tenant.get_tenant_conf();
    1527            0 :         let parent_generation = tenant.generation;
    1528              : 
    1529              :         // Phase 1: Write out child shards' remote index files, in the parent tenant's current generation
    1530            0 :         if let Err(e) = tenant.split_prepare(&child_shards).await {
    1531              :             // If [`Tenant::split_prepare`] fails, we must reload the tenant, because it might
    1532              :             // have been left in a partially-shut-down state.
    1533            0 :             tracing::warn!("Failed to prepare for split: {e}, reloading Tenant before returning");
    1534            0 :             return Err(e);
    1535            0 :         }
    1536            0 : 
    1537            0 :         fail::fail_point!("shard-split-post-prepare", |_| Err(anyhow::anyhow!(
    1538            0 :             "failpoint"
    1539            0 :         )));
    1540              : 
    1541            0 :         self.resources.deletion_queue_client.flush_advisory();
    1542            0 : 
    1543            0 :         // Phase 2: Put the parent shard to InProgress and grab a reference to the parent Tenant
    1544            0 :         drop(tenant);
    1545            0 :         let mut parent_slot_guard =
    1546            0 :             tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?;
    1547            0 :         let parent = match parent_slot_guard.get_old_value() {
    1548            0 :             Some(TenantSlot::Attached(t)) => t,
    1549            0 :             Some(TenantSlot::Secondary(_)) => anyhow::bail!("Tenant location in secondary mode"),
    1550              :             Some(TenantSlot::InProgress(_)) => {
    1551              :                 // tenant_map_acquire_slot never returns InProgress, if a slot was InProgress
    1552              :                 // it would return an error.
    1553            0 :                 unreachable!()
    1554              :             }
    1555              :             None => {
    1556              :                 // We don't actually need the parent shard to still be attached to do our work, but it's
    1557              :                 // a weird enough situation that the caller probably didn't want us to continue working
    1558              :                 // if they had detached the tenant they requested the split on.
    1559            0 :                 anyhow::bail!("Detached parent shard in the middle of split!")
    1560              :             }
    1561              :         };
    1562            0 :         fail::fail_point!("shard-split-pre-hardlink", |_| Err(anyhow::anyhow!(
    1563            0 :             "failpoint"
    1564            0 :         )));
    1565              :         // Optimization: hardlink layers from the parent into the children, so that they don't have to
    1566              :         // re-download & duplicate the data referenced in their initial IndexPart
    1567            0 :         self.shard_split_hardlink(parent, child_shards.clone())
    1568            0 :             .await?;
    1569            0 :         fail::fail_point!("shard-split-post-hardlink", |_| Err(anyhow::anyhow!(
    1570            0 :             "failpoint"
    1571            0 :         )));
    1572              : 
    1573              :         // Take a snapshot of where the parent's WAL ingest had got to: we will wait for
    1574              :         // child shards to reach this point.
    1575            0 :         let mut target_lsns = HashMap::new();
    1576            0 :         for timeline in parent.timelines.lock().unwrap().clone().values() {
    1577            0 :             target_lsns.insert(timeline.timeline_id, timeline.get_last_record_lsn());
    1578            0 :         }
    1579              : 
    1580              :         // TODO: we should have the parent shard stop its WAL ingest here, it's a waste of resources
    1581              :         // and could slow down the children trying to catch up.
    1582              : 
    1583              :         // Phase 3: Spawn the child shards
    1584            0 :         for child_shard in &child_shards {
    1585            0 :             let mut child_shard_identity = parent_shard_identity;
    1586            0 :             if let Some(new_stripe_size) = new_stripe_size {
    1587            0 :                 child_shard_identity.stripe_size = new_stripe_size;
    1588            0 :             }
    1589            0 :             child_shard_identity.count = child_shard.shard_count;
    1590            0 :             child_shard_identity.number = child_shard.shard_number;
    1591            0 : 
    1592            0 :             let child_location_conf = LocationConf {
    1593            0 :                 mode: LocationMode::Attached(AttachedLocationConfig {
    1594            0 :                     generation: parent_generation,
    1595            0 :                     attach_mode: AttachmentMode::Single,
    1596            0 :                 }),
    1597            0 :                 shard: child_shard_identity,
    1598            0 :                 tenant_conf: parent_tenant_conf.clone(),
    1599            0 :             };
    1600            0 : 
    1601            0 :             self.upsert_location(
    1602            0 :                 *child_shard,
    1603            0 :                 child_location_conf,
    1604            0 :                 None,
    1605            0 :                 SpawnMode::Eager,
    1606            0 :                 ctx,
    1607            0 :             )
    1608            0 :             .await?;
    1609              :         }
    1610              : 
    1611            0 :         fail::fail_point!("shard-split-post-child-conf", |_| Err(anyhow::anyhow!(
    1612            0 :             "failpoint"
    1613            0 :         )));
    1614              : 
    1615              :         // Phase 4: wait for child chards WAL ingest to catch up to target LSN
    1616            0 :         for child_shard_id in &child_shards {
    1617            0 :             let child_shard_id = *child_shard_id;
    1618            0 :             let child_shard = {
    1619            0 :                 let locked = TENANTS.read().unwrap();
    1620            0 :                 let peek_slot =
    1621            0 :                     tenant_map_peek_slot(&locked, &child_shard_id, TenantSlotPeekMode::Read)?;
    1622            0 :                 peek_slot.and_then(|s| s.get_attached()).cloned()
    1623              :             };
    1624            0 :             if let Some(t) = child_shard {
    1625              :                 // Wait for the child shard to become active: this should be very quick because it only
    1626              :                 // has to download the index_part that we just uploaded when creating it.
    1627            0 :                 if let Err(e) = t.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await {
    1628              :                     // This is not fatal: we have durably created the child shard.  It just makes the
    1629              :                     // split operation less seamless for clients, as we will may detach the parent
    1630              :                     // shard before the child shards are fully ready to serve requests.
    1631            0 :                     tracing::warn!("Failed to wait for shard {child_shard_id} to activate: {e}");
    1632            0 :                     continue;
    1633            0 :                 }
    1634            0 : 
    1635            0 :                 let timelines = t.timelines.lock().unwrap().clone();
    1636            0 :                 for timeline in timelines.values() {
    1637            0 :                     let Some(target_lsn) = target_lsns.get(&timeline.timeline_id) else {
    1638            0 :                         continue;
    1639              :                     };
    1640              : 
    1641            0 :                     tracing::info!(
    1642            0 :                         "Waiting for child shard {}/{} to reach target lsn {}...",
    1643            0 :                         child_shard_id,
    1644            0 :                         timeline.timeline_id,
    1645            0 :                         target_lsn
    1646            0 :                     );
    1647              : 
    1648            0 :                     fail::fail_point!("shard-split-lsn-wait", |_| Err(anyhow::anyhow!(
    1649            0 :                         "failpoint"
    1650            0 :                     )));
    1651            0 :                     if let Err(e) = timeline
    1652            0 :                         .wait_lsn(
    1653            0 :                             *target_lsn,
    1654            0 :                             crate::tenant::timeline::WaitLsnWaiter::Tenant,
    1655            0 :                             ctx,
    1656            0 :                         )
    1657            0 :                         .await
    1658              :                     {
    1659              :                         // Failure here might mean shutdown, in any case this part is an optimization
    1660              :                         // and we shouldn't hold up the split operation.
    1661            0 :                         tracing::warn!(
    1662            0 :                             "Failed to wait for timeline {} to reach lsn {target_lsn}: {e}",
    1663            0 :                             timeline.timeline_id
    1664            0 :                         );
    1665              :                     } else {
    1666            0 :                         tracing::info!(
    1667            0 :                             "Child shard {}/{} reached target lsn {}",
    1668            0 :                             child_shard_id,
    1669            0 :                             timeline.timeline_id,
    1670            0 :                             target_lsn
    1671            0 :                         );
    1672              :                     }
    1673              :                 }
    1674            0 :             }
    1675              :         }
    1676              : 
    1677              :         // Phase 5: Shut down the parent shard, and erase it from disk
    1678            0 :         let (_guard, progress) = completion::channel();
    1679            0 :         match parent.shutdown(progress, ShutdownMode::Hard).await {
    1680            0 :             Ok(()) => {}
    1681            0 :             Err(other) => {
    1682            0 :                 other.wait().await;
    1683              :             }
    1684              :         }
    1685            0 :         let local_tenant_directory = self.conf.tenant_path(&tenant_shard_id);
    1686            0 :         let tmp_path = safe_rename_tenant_dir(&local_tenant_directory)
    1687            0 :             .await
    1688            0 :             .with_context(|| format!("local tenant directory {local_tenant_directory:?} rename"))?;
    1689            0 :         self.spawn_background_purge(tmp_path);
    1690            0 : 
    1691            0 :         fail::fail_point!("shard-split-pre-finish", |_| Err(anyhow::anyhow!(
    1692            0 :             "failpoint"
    1693            0 :         )));
    1694              : 
    1695            0 :         parent_slot_guard.drop_old_value()?;
    1696              : 
    1697              :         // Phase 6: Release the InProgress on the parent shard
    1698            0 :         drop(parent_slot_guard);
    1699            0 : 
    1700            0 :         Ok(child_shards)
    1701            0 :     }
    1702              : 
    1703              :     /// Part of [`Self::shard_split`]: hard link parent shard layers into child shards, as an optimization
    1704              :     /// to avoid the children downloading them again.
    1705              :     ///
    1706              :     /// For each resident layer in the parent shard, we will hard link it into all of the child shards.
    1707            0 :     async fn shard_split_hardlink(
    1708            0 :         &self,
    1709            0 :         parent_shard: &Tenant,
    1710            0 :         child_shards: Vec<TenantShardId>,
    1711            0 :     ) -> anyhow::Result<()> {
    1712            0 :         debug_assert_current_span_has_tenant_id();
    1713            0 : 
    1714            0 :         let parent_path = self.conf.tenant_path(parent_shard.get_tenant_shard_id());
    1715            0 :         let (parent_timelines, parent_layers) = {
    1716            0 :             let mut parent_layers = Vec::new();
    1717            0 :             let timelines = parent_shard.timelines.lock().unwrap().clone();
    1718            0 :             let parent_timelines = timelines.keys().cloned().collect::<Vec<_>>();
    1719            0 :             for timeline in timelines.values() {
    1720            0 :                 let timeline_layers = timeline
    1721            0 :                     .layers
    1722            0 :                     .read()
    1723            0 :                     .await
    1724            0 :                     .likely_resident_layers()
    1725            0 :                     .collect::<Vec<_>>();
    1726              : 
    1727            0 :                 for layer in timeline_layers {
    1728            0 :                     let relative_path = layer
    1729            0 :                         .local_path()
    1730            0 :                         .strip_prefix(&parent_path)
    1731            0 :                         .context("Removing prefix from parent layer path")?;
    1732            0 :                     parent_layers.push(relative_path.to_owned());
    1733              :                 }
    1734              :             }
    1735            0 :             debug_assert!(
    1736            0 :                 !parent_layers.is_empty(),
    1737            0 :                 "shutdown cannot empty the layermap"
    1738              :             );
    1739            0 :             (parent_timelines, parent_layers)
    1740            0 :         };
    1741            0 : 
    1742            0 :         let mut child_prefixes = Vec::new();
    1743            0 :         let mut create_dirs = Vec::new();
    1744              : 
    1745            0 :         for child in child_shards {
    1746            0 :             let child_prefix = self.conf.tenant_path(&child);
    1747            0 :             create_dirs.push(child_prefix.clone());
    1748            0 :             create_dirs.extend(
    1749            0 :                 parent_timelines
    1750            0 :                     .iter()
    1751            0 :                     .map(|t| self.conf.timeline_path(&child, t)),
    1752            0 :             );
    1753            0 : 
    1754            0 :             child_prefixes.push(child_prefix);
    1755            0 :         }
    1756              : 
    1757              :         // Since we will do a large number of small filesystem metadata operations, batch them into
    1758              :         // spawn_blocking calls rather than doing each one as a tokio::fs round-trip.
    1759            0 :         let jh = tokio::task::spawn_blocking(move || -> anyhow::Result<usize> {
    1760            0 :             for dir in &create_dirs {
    1761            0 :                 if let Err(e) = std::fs::create_dir_all(dir) {
    1762              :                     // Ignore AlreadyExists errors, drop out on all other errors
    1763            0 :                     match e.kind() {
    1764            0 :                         std::io::ErrorKind::AlreadyExists => {}
    1765              :                         _ => {
    1766            0 :                             return Err(anyhow::anyhow!(e).context(format!("Creating {dir}")));
    1767              :                         }
    1768              :                     }
    1769            0 :                 }
    1770              :             }
    1771              : 
    1772            0 :             for child_prefix in child_prefixes {
    1773            0 :                 for relative_layer in &parent_layers {
    1774            0 :                     let parent_path = parent_path.join(relative_layer);
    1775            0 :                     let child_path = child_prefix.join(relative_layer);
    1776            0 :                     if let Err(e) = std::fs::hard_link(&parent_path, &child_path) {
    1777            0 :                         match e.kind() {
    1778            0 :                             std::io::ErrorKind::AlreadyExists => {}
    1779              :                             std::io::ErrorKind::NotFound => {
    1780            0 :                                 tracing::info!(
    1781            0 :                                     "Layer {} not found during hard-linking, evicted during split?",
    1782            0 :                                     relative_layer
    1783            0 :                                 );
    1784              :                             }
    1785              :                             _ => {
    1786            0 :                                 return Err(anyhow::anyhow!(e).context(format!(
    1787            0 :                                     "Hard linking {relative_layer} into {child_prefix}"
    1788            0 :                                 )))
    1789              :                             }
    1790              :                         }
    1791            0 :                     }
    1792              :                 }
    1793              :             }
    1794              : 
    1795              :             // Durability is not required for correctness, but if we crashed during split and
    1796              :             // then came restarted with empty timeline dirs, it would be very inefficient to
    1797              :             // re-populate from remote storage.
    1798            0 :             for dir in create_dirs {
    1799            0 :                 if let Err(e) = crashsafe::fsync(&dir) {
    1800              :                     // Something removed a newly created timeline dir out from underneath us?  Extremely
    1801              :                     // unexpected, but not worth panic'ing over as this whole function is just an
    1802              :                     // optimization.
    1803            0 :                     tracing::warn!("Failed to fsync directory {dir}: {e}")
    1804            0 :                 }
    1805              :             }
    1806              : 
    1807            0 :             Ok(parent_layers.len())
    1808            0 :         });
    1809            0 : 
    1810            0 :         match jh.await {
    1811            0 :             Ok(Ok(layer_count)) => {
    1812            0 :                 tracing::info!(count = layer_count, "Hard linked layers into child shards");
    1813              :             }
    1814            0 :             Ok(Err(e)) => {
    1815            0 :                 // This is an optimization, so we tolerate failure.
    1816            0 :                 tracing::warn!("Error hard-linking layers, proceeding anyway: {e}")
    1817              :             }
    1818            0 :             Err(e) => {
    1819            0 :                 // This is something totally unexpected like a panic, so bail out.
    1820            0 :                 anyhow::bail!("Error joining hard linking task: {e}");
    1821              :             }
    1822              :         }
    1823              : 
    1824            0 :         Ok(())
    1825            0 :     }
    1826              : 
    1827              :     ///
    1828              :     /// Shut down all tenants. This runs as part of pageserver shutdown.
    1829              :     ///
    1830              :     /// NB: We leave the tenants in the map, so that they remain accessible through
    1831              :     /// the management API until we shut it down. If we removed the shut-down tenants
    1832              :     /// from the tenants map, the management API would return 404 for these tenants,
    1833              :     /// because TenantsMap::get() now returns `None`.
    1834              :     /// That could be easily misinterpreted by control plane, the consumer of the
    1835              :     /// management API. For example, it could attach the tenant on a different pageserver.
    1836              :     /// We would then be in split-brain once this pageserver restarts.
    1837            0 :     #[instrument(skip_all)]
    1838              :     pub(crate) async fn shutdown(&self) {
    1839              :         self.cancel.cancel();
    1840              : 
    1841              :         shutdown_all_tenants0(self.tenants).await
    1842              :     }
    1843              : 
    1844              :     /// When we have moved a tenant's content to a temporary directory, we may delete it lazily in
    1845              :     /// the background, and thereby avoid blocking any API requests on this deletion completing.
    1846            0 :     fn spawn_background_purge(&self, tmp_path: Utf8PathBuf) {
    1847            0 :         // Although we are cleaning up the tenant, this task is not meant to be bound by the lifetime of the tenant in memory.
    1848            0 :         // After a tenant is detached, there are no more task_mgr tasks for that tenant_id.
    1849            0 :         let task_tenant_id = None;
    1850            0 : 
    1851            0 :         task_mgr::spawn(
    1852            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
    1853            0 :             TaskKind::MgmtRequest,
    1854            0 :             task_tenant_id,
    1855            0 :             None,
    1856            0 :             "tenant_files_delete",
    1857            0 :             false,
    1858            0 :             async move {
    1859            0 :                 fs::remove_dir_all(tmp_path.as_path())
    1860            0 :                     .await
    1861            0 :                     .with_context(|| format!("tenant directory {:?} deletion", tmp_path))
    1862            0 :             },
    1863            0 :         );
    1864            0 :     }
    1865              : 
    1866            0 :     pub(crate) async fn detach_tenant(
    1867            0 :         &self,
    1868            0 :         conf: &'static PageServerConf,
    1869            0 :         tenant_shard_id: TenantShardId,
    1870            0 :         detach_ignored: bool,
    1871            0 :         deletion_queue_client: &DeletionQueueClient,
    1872            0 :     ) -> Result<(), TenantStateError> {
    1873            0 :         let tmp_path = self
    1874            0 :             .detach_tenant0(
    1875            0 :                 conf,
    1876            0 :                 &TENANTS,
    1877            0 :                 tenant_shard_id,
    1878            0 :                 detach_ignored,
    1879            0 :                 deletion_queue_client,
    1880            0 :             )
    1881            0 :             .await?;
    1882            0 :         self.spawn_background_purge(tmp_path);
    1883            0 : 
    1884            0 :         Ok(())
    1885            0 :     }
    1886              : 
    1887            0 :     async fn detach_tenant0(
    1888            0 :         &self,
    1889            0 :         conf: &'static PageServerConf,
    1890            0 :         tenants: &std::sync::RwLock<TenantsMap>,
    1891            0 :         tenant_shard_id: TenantShardId,
    1892            0 :         detach_ignored: bool,
    1893            0 :         deletion_queue_client: &DeletionQueueClient,
    1894            0 :     ) -> Result<Utf8PathBuf, TenantStateError> {
    1895            0 :         let tenant_dir_rename_operation = |tenant_id_to_clean: TenantShardId| async move {
    1896            0 :             let local_tenant_directory = conf.tenant_path(&tenant_id_to_clean);
    1897            0 :             safe_rename_tenant_dir(&local_tenant_directory)
    1898            0 :                 .await
    1899            0 :                 .with_context(|| {
    1900            0 :                     format!("local tenant directory {local_tenant_directory:?} rename")
    1901            0 :                 })
    1902            0 :         };
    1903              : 
    1904            0 :         let removal_result = remove_tenant_from_memory(
    1905            0 :             tenants,
    1906            0 :             tenant_shard_id,
    1907            0 :             tenant_dir_rename_operation(tenant_shard_id),
    1908            0 :         )
    1909            0 :         .await;
    1910              : 
    1911              :         // Flush pending deletions, so that they have a good chance of passing validation
    1912              :         // before this tenant is potentially re-attached elsewhere.
    1913            0 :         deletion_queue_client.flush_advisory();
    1914            0 : 
    1915            0 :         // Ignored tenants are not present in memory and will bail the removal from memory operation.
    1916            0 :         // Before returning the error, check for ignored tenant removal case — we only need to clean its local files then.
    1917            0 :         if detach_ignored
    1918            0 :             && matches!(
    1919            0 :                 removal_result,
    1920              :                 Err(TenantStateError::SlotError(TenantSlotError::NotFound(_)))
    1921              :             )
    1922              :         {
    1923            0 :             let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_shard_id);
    1924            0 :             if tenant_ignore_mark.exists() {
    1925            0 :                 info!("Detaching an ignored tenant");
    1926            0 :                 let tmp_path = tenant_dir_rename_operation(tenant_shard_id)
    1927            0 :                     .await
    1928            0 :                     .with_context(|| {
    1929            0 :                         format!("Ignored tenant {tenant_shard_id} local directory rename")
    1930            0 :                     })?;
    1931            0 :                 return Ok(tmp_path);
    1932            0 :             }
    1933            0 :         }
    1934              : 
    1935            0 :         removal_result
    1936            0 :     }
    1937              : 
    1938            0 :     pub(crate) fn list_tenants(
    1939            0 :         &self,
    1940            0 :     ) -> Result<Vec<(TenantShardId, TenantState, Generation)>, TenantMapListError> {
    1941            0 :         let tenants = TENANTS.read().unwrap();
    1942            0 :         let m = match &*tenants {
    1943            0 :             TenantsMap::Initializing => return Err(TenantMapListError::Initializing),
    1944            0 :             TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => m,
    1945            0 :         };
    1946            0 :         Ok(m.iter()
    1947            0 :             .filter_map(|(id, tenant)| match tenant {
    1948            0 :                 TenantSlot::Attached(tenant) => {
    1949            0 :                     Some((*id, tenant.current_state(), tenant.generation()))
    1950              :                 }
    1951            0 :                 TenantSlot::Secondary(_) => None,
    1952            0 :                 TenantSlot::InProgress(_) => None,
    1953            0 :             })
    1954            0 :             .collect())
    1955            0 :     }
    1956              : }
    1957              : 
    1958            0 : #[derive(Debug, thiserror::Error)]
    1959              : pub(crate) enum GetTenantError {
    1960              :     /// NotFound is a TenantId rather than TenantShardId, because this error type is used from
    1961              :     /// getters that use a TenantId and a ShardSelector, not just getters that target a specific shard.
    1962              :     #[error("Tenant {0} not found")]
    1963              :     NotFound(TenantId),
    1964              : 
    1965              :     #[error("Tenant {0} is not active")]
    1966              :     NotActive(TenantShardId),
    1967              : 
    1968              :     // Initializing or shutting down: cannot authoritatively say whether we have this tenant
    1969              :     #[error("Tenant map is not available: {0}")]
    1970              :     MapState(#[from] TenantMapError),
    1971              : }
    1972              : 
    1973            0 : #[derive(thiserror::Error, Debug)]
    1974              : pub(crate) enum GetActiveTenantError {
    1975              :     /// We may time out either while TenantSlot is InProgress, or while the Tenant
    1976              :     /// is in a non-Active state
    1977              :     #[error(
    1978              :         "Timed out waiting {wait_time:?} for tenant active state. Latest state: {latest_state:?}"
    1979              :     )]
    1980              :     WaitForActiveTimeout {
    1981              :         latest_state: Option<TenantState>,
    1982              :         wait_time: Duration,
    1983              :     },
    1984              : 
    1985              :     /// The TenantSlot is absent, or in secondary mode
    1986              :     #[error(transparent)]
    1987              :     NotFound(#[from] GetTenantError),
    1988              : 
    1989              :     /// Cancellation token fired while we were waiting
    1990              :     #[error("cancelled")]
    1991              :     Cancelled,
    1992              : 
    1993              :     /// Tenant exists, but is in a state that cannot become active (e.g. Stopping, Broken)
    1994              :     #[error("will not become active.  Current state: {0}")]
    1995              :     WillNotBecomeActive(TenantState),
    1996              : 
    1997              :     /// Broken is logically a subset of WillNotBecomeActive, but a distinct error is useful as
    1998              :     /// WillNotBecomeActive is a permitted error under some circumstances, whereas broken should
    1999              :     /// never happen.
    2000              :     #[error("Tenant is broken: {0}")]
    2001              :     Broken(String),
    2002              : }
    2003              : 
    2004              : /// Get a [`Tenant`] in its active state. If the tenant_id is currently in [`TenantSlot::InProgress`]
    2005              : /// state, then wait for up to `timeout`.  If the [`Tenant`] is not currently in [`TenantState::Active`],
    2006              : /// then wait for up to `timeout` (minus however long we waited for the slot).
    2007            0 : pub(crate) async fn get_active_tenant_with_timeout(
    2008            0 :     tenant_id: TenantId,
    2009            0 :     shard_selector: ShardSelector,
    2010            0 :     timeout: Duration,
    2011            0 :     cancel: &CancellationToken,
    2012            0 : ) -> Result<Arc<Tenant>, GetActiveTenantError> {
    2013            0 :     enum WaitFor {
    2014            0 :         Barrier(utils::completion::Barrier),
    2015            0 :         Tenant(Arc<Tenant>),
    2016            0 :     }
    2017            0 : 
    2018            0 :     let wait_start = Instant::now();
    2019            0 :     let deadline = wait_start + timeout;
    2020              : 
    2021            0 :     let (wait_for, tenant_shard_id) = {
    2022            0 :         let locked = TENANTS.read().unwrap();
    2023              : 
    2024              :         // Resolve TenantId to TenantShardId
    2025            0 :         let tenant_shard_id = locked
    2026            0 :             .resolve_attached_shard(&tenant_id, shard_selector)
    2027            0 :             .ok_or(GetActiveTenantError::NotFound(GetTenantError::NotFound(
    2028            0 :                 tenant_id,
    2029            0 :             )))?;
    2030              : 
    2031            0 :         let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
    2032            0 :             .map_err(GetTenantError::MapState)?;
    2033            0 :         match peek_slot {
    2034            0 :             Some(TenantSlot::Attached(tenant)) => {
    2035            0 :                 match tenant.current_state() {
    2036              :                     TenantState::Active => {
    2037              :                         // Fast path: we don't need to do any async waiting.
    2038            0 :                         return Ok(tenant.clone());
    2039              :                     }
    2040              :                     _ => {
    2041            0 :                         tenant.activate_now();
    2042            0 :                         (WaitFor::Tenant(tenant.clone()), tenant_shard_id)
    2043              :                     }
    2044              :                 }
    2045              :             }
    2046              :             Some(TenantSlot::Secondary(_)) => {
    2047            0 :                 return Err(GetActiveTenantError::NotFound(GetTenantError::NotActive(
    2048            0 :                     tenant_shard_id,
    2049            0 :                 )))
    2050              :             }
    2051            0 :             Some(TenantSlot::InProgress(barrier)) => {
    2052            0 :                 (WaitFor::Barrier(barrier.clone()), tenant_shard_id)
    2053              :             }
    2054              :             None => {
    2055            0 :                 return Err(GetActiveTenantError::NotFound(GetTenantError::NotFound(
    2056            0 :                     tenant_id,
    2057            0 :                 )))
    2058              :             }
    2059              :         }
    2060              :     };
    2061              : 
    2062            0 :     let tenant = match wait_for {
    2063            0 :         WaitFor::Barrier(barrier) => {
    2064            0 :             tracing::debug!("Waiting for tenant InProgress state to pass...");
    2065            0 :             timeout_cancellable(
    2066            0 :                 deadline.duration_since(Instant::now()),
    2067            0 :                 cancel,
    2068            0 :                 barrier.wait(),
    2069            0 :             )
    2070            0 :             .await
    2071            0 :             .map_err(|e| match e {
    2072            0 :                 TimeoutCancellableError::Timeout => GetActiveTenantError::WaitForActiveTimeout {
    2073            0 :                     latest_state: None,
    2074            0 :                     wait_time: wait_start.elapsed(),
    2075            0 :                 },
    2076            0 :                 TimeoutCancellableError::Cancelled => GetActiveTenantError::Cancelled,
    2077            0 :             })?;
    2078              :             {
    2079            0 :                 let locked = TENANTS.read().unwrap();
    2080            0 :                 let peek_slot =
    2081            0 :                     tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
    2082            0 :                         .map_err(GetTenantError::MapState)?;
    2083            0 :                 match peek_slot {
    2084            0 :                     Some(TenantSlot::Attached(tenant)) => tenant.clone(),
    2085              :                     _ => {
    2086            0 :                         return Err(GetActiveTenantError::NotFound(GetTenantError::NotActive(
    2087            0 :                             tenant_shard_id,
    2088            0 :                         )))
    2089              :                     }
    2090              :                 }
    2091              :             }
    2092              :         }
    2093            0 :         WaitFor::Tenant(tenant) => tenant,
    2094              :     };
    2095              : 
    2096            0 :     tracing::debug!("Waiting for tenant to enter active state...");
    2097            0 :     tenant
    2098            0 :         .wait_to_become_active(deadline.duration_since(Instant::now()))
    2099            0 :         .await?;
    2100            0 :     Ok(tenant)
    2101            0 : }
    2102              : 
    2103            0 : #[derive(Debug, thiserror::Error)]
    2104              : pub(crate) enum DeleteTimelineError {
    2105              :     #[error("Tenant {0}")]
    2106              :     Tenant(#[from] GetTenantError),
    2107              : 
    2108              :     #[error("Timeline {0}")]
    2109              :     Timeline(#[from] crate::tenant::DeleteTimelineError),
    2110              : }
    2111              : 
    2112            0 : #[derive(Debug, thiserror::Error)]
    2113              : pub(crate) enum TenantStateError {
    2114              :     #[error("Tenant {0} is stopping")]
    2115              :     IsStopping(TenantShardId),
    2116              :     #[error(transparent)]
    2117              :     SlotError(#[from] TenantSlotError),
    2118              :     #[error(transparent)]
    2119              :     SlotUpsertError(#[from] TenantSlotUpsertError),
    2120              :     #[error(transparent)]
    2121              :     Other(#[from] anyhow::Error),
    2122              : }
    2123              : 
    2124            0 : pub(crate) async fn load_tenant(
    2125            0 :     conf: &'static PageServerConf,
    2126            0 :     tenant_id: TenantId,
    2127            0 :     generation: Generation,
    2128            0 :     broker_client: storage_broker::BrokerClientChannel,
    2129            0 :     remote_storage: Option<GenericRemoteStorage>,
    2130            0 :     deletion_queue_client: DeletionQueueClient,
    2131            0 :     ctx: &RequestContext,
    2132            0 : ) -> Result<(), TenantMapInsertError> {
    2133            0 :     // This is a legacy API (replaced by `/location_conf`).  It does not support sharding
    2134            0 :     let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2135              : 
    2136            0 :     let slot_guard =
    2137            0 :         tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustNotExist)?;
    2138            0 :     let tenant_path = conf.tenant_path(&tenant_shard_id);
    2139            0 : 
    2140            0 :     let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_shard_id);
    2141            0 :     if tenant_ignore_mark.exists() {
    2142            0 :         std::fs::remove_file(&tenant_ignore_mark).with_context(|| {
    2143            0 :             format!(
    2144            0 :                 "Failed to remove tenant ignore mark {tenant_ignore_mark:?} during tenant loading"
    2145            0 :             )
    2146            0 :         })?;
    2147            0 :     }
    2148              : 
    2149            0 :     let resources = TenantSharedResources {
    2150            0 :         broker_client,
    2151            0 :         remote_storage,
    2152            0 :         deletion_queue_client,
    2153            0 :     };
    2154              : 
    2155            0 :     let mut location_conf =
    2156            0 :         Tenant::load_tenant_config(conf, &tenant_shard_id).map_err(TenantMapInsertError::Other)?;
    2157            0 :     location_conf.attach_in_generation(AttachmentMode::Single, generation);
    2158            0 : 
    2159            0 :     Tenant::persist_tenant_config(conf, &tenant_shard_id, &location_conf).await?;
    2160              : 
    2161            0 :     let shard_identity = location_conf.shard;
    2162            0 :     let new_tenant = tenant_spawn(
    2163            0 :         conf,
    2164            0 :         tenant_shard_id,
    2165            0 :         &tenant_path,
    2166            0 :         resources,
    2167            0 :         AttachedTenantConf::try_from(location_conf)?,
    2168            0 :         shard_identity,
    2169            0 :         None,
    2170            0 :         &TENANTS,
    2171            0 :         SpawnMode::Eager,
    2172            0 :         ctx,
    2173            0 :     )
    2174            0 :     .with_context(|| format!("Failed to schedule tenant processing in path {tenant_path:?}"))?;
    2175              : 
    2176            0 :     slot_guard.upsert(TenantSlot::Attached(new_tenant))?;
    2177            0 :     Ok(())
    2178            0 : }
    2179              : 
    2180            0 : pub(crate) async fn ignore_tenant(
    2181            0 :     conf: &'static PageServerConf,
    2182            0 :     tenant_id: TenantId,
    2183            0 : ) -> Result<(), TenantStateError> {
    2184            0 :     ignore_tenant0(conf, &TENANTS, tenant_id).await
    2185            0 : }
    2186              : 
    2187            0 : #[instrument(skip_all, fields(shard_id))]
    2188              : async fn ignore_tenant0(
    2189              :     conf: &'static PageServerConf,
    2190              :     tenants: &std::sync::RwLock<TenantsMap>,
    2191              :     tenant_id: TenantId,
    2192              : ) -> Result<(), TenantStateError> {
    2193              :     // This is a legacy API (replaced by `/location_conf`).  It does not support sharding
    2194              :     let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2195              :     tracing::Span::current().record(
    2196              :         "shard_id",
    2197              :         tracing::field::display(tenant_shard_id.shard_slug()),
    2198              :     );
    2199              : 
    2200            0 :     remove_tenant_from_memory(tenants, tenant_shard_id, async {
    2201            0 :         let ignore_mark_file = conf.tenant_ignore_mark_file_path(&tenant_shard_id);
    2202            0 :         fs::File::create(&ignore_mark_file)
    2203            0 :             .await
    2204            0 :             .context("Failed to create ignore mark file")
    2205            0 :             .and_then(|_| {
    2206            0 :                 crashsafe::fsync_file_and_parent(&ignore_mark_file)
    2207            0 :                     .context("Failed to fsync ignore mark file")
    2208            0 :             })
    2209            0 :             .with_context(|| format!("Failed to crate ignore mark for tenant {tenant_shard_id}"))?;
    2210            0 :         Ok(())
    2211            0 :     })
    2212              :     .await
    2213              : }
    2214              : 
    2215            0 : #[derive(Debug, thiserror::Error)]
    2216              : pub(crate) enum TenantMapListError {
    2217              :     #[error("tenant map is still initiailizing")]
    2218              :     Initializing,
    2219              : }
    2220              : 
    2221            0 : #[derive(Debug, thiserror::Error)]
    2222              : pub(crate) enum TenantMapInsertError {
    2223              :     #[error(transparent)]
    2224              :     SlotError(#[from] TenantSlotError),
    2225              :     #[error(transparent)]
    2226              :     SlotUpsertError(#[from] TenantSlotUpsertError),
    2227              :     #[error(transparent)]
    2228              :     Other(#[from] anyhow::Error),
    2229              : }
    2230              : 
    2231              : /// Superset of TenantMapError: issues that can occur when acquiring a slot
    2232              : /// for a particular tenant ID.
    2233            0 : #[derive(Debug, thiserror::Error)]
    2234              : pub(crate) enum TenantSlotError {
    2235              :     /// When acquiring a slot with the expectation that the tenant already exists.
    2236              :     #[error("Tenant {0} not found")]
    2237              :     NotFound(TenantShardId),
    2238              : 
    2239              :     /// When acquiring a slot with the expectation that the tenant does not already exist.
    2240              :     #[error("tenant {0} already exists, state: {1:?}")]
    2241              :     AlreadyExists(TenantShardId, TenantState),
    2242              : 
    2243              :     // Tried to read a slot that is currently being mutated by another administrative
    2244              :     // operation.
    2245              :     #[error("tenant has a state change in progress, try again later")]
    2246              :     InProgress,
    2247              : 
    2248              :     #[error(transparent)]
    2249              :     MapState(#[from] TenantMapError),
    2250              : }
    2251              : 
    2252              : /// Superset of TenantMapError: issues that can occur when using a SlotGuard
    2253              : /// to insert a new value.
    2254            0 : #[derive(thiserror::Error)]
    2255              : pub(crate) enum TenantSlotUpsertError {
    2256              :     /// An error where the slot is in an unexpected state, indicating a code bug
    2257              :     #[error("Internal error updating Tenant")]
    2258              :     InternalError(Cow<'static, str>),
    2259              : 
    2260              :     #[error(transparent)]
    2261              :     MapState(TenantMapError),
    2262              : 
    2263              :     // If we encounter TenantManager shutdown during upsert, we must carry the Completion
    2264              :     // from the SlotGuard, so that the caller can hold it while they clean up: otherwise
    2265              :     // TenantManager shutdown might race ahead before we're done cleaning up any Tenant that
    2266              :     // was protected by the SlotGuard.
    2267              :     #[error("Shutting down")]
    2268              :     ShuttingDown((TenantSlot, utils::completion::Completion)),
    2269              : }
    2270              : 
    2271              : impl std::fmt::Debug for TenantSlotUpsertError {
    2272            0 :     fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
    2273            0 :         match self {
    2274            0 :             Self::InternalError(reason) => write!(f, "Internal Error {reason}"),
    2275            0 :             Self::MapState(map_error) => write!(f, "Tenant map state: {map_error:?}"),
    2276            0 :             Self::ShuttingDown(_completion) => write!(f, "Tenant map shutting down"),
    2277              :         }
    2278            0 :     }
    2279              : }
    2280              : 
    2281            0 : #[derive(Debug, thiserror::Error)]
    2282              : enum TenantSlotDropError {
    2283              :     /// It is only legal to drop a TenantSlot if its contents are fully shut down
    2284              :     #[error("Tenant was not shut down")]
    2285              :     NotShutdown,
    2286              : }
    2287              : 
    2288              : /// Errors that can happen any time we are walking the tenant map to try and acquire
    2289              : /// the TenantSlot for a particular tenant.
    2290            0 : #[derive(Debug, thiserror::Error)]
    2291              : pub enum TenantMapError {
    2292              :     // Tried to read while initializing
    2293              :     #[error("tenant map is still initializing")]
    2294              :     StillInitializing,
    2295              : 
    2296              :     // Tried to read while shutting down
    2297              :     #[error("tenant map is shutting down")]
    2298              :     ShuttingDown,
    2299              : }
    2300              : 
    2301              : /// Guards a particular tenant_id's content in the TenantsMap.  While this
    2302              : /// structure exists, the TenantsMap will contain a [`TenantSlot::InProgress`]
    2303              : /// for this tenant, which acts as a marker for any operations targeting
    2304              : /// this tenant to retry later, or wait for the InProgress state to end.
    2305              : ///
    2306              : /// This structure enforces the important invariant that we do not have overlapping
    2307              : /// tasks that will try use local storage for a the same tenant ID: we enforce that
    2308              : /// the previous contents of a slot have been shut down before the slot can be
    2309              : /// left empty or used for something else
    2310              : ///
    2311              : /// Holders of a SlotGuard should explicitly dispose of it, using either `upsert`
    2312              : /// to provide a new value, or `revert` to put the slot back into its initial
    2313              : /// state.  If the SlotGuard is dropped without calling either of these, then
    2314              : /// we will leave the slot empty if our `old_value` is already shut down, else
    2315              : /// we will replace the slot with `old_value` (equivalent to doing a revert).
    2316              : ///
    2317              : /// The `old_value` may be dropped before the SlotGuard is dropped, by calling
    2318              : /// `drop_old_value`.  It is an error to call this without shutting down
    2319              : /// the conents of `old_value`.
    2320              : pub struct SlotGuard {
    2321              :     tenant_shard_id: TenantShardId,
    2322              :     old_value: Option<TenantSlot>,
    2323              :     upserted: bool,
    2324              : 
    2325              :     /// [`TenantSlot::InProgress`] carries the corresponding Barrier: it will
    2326              :     /// release any waiters as soon as this SlotGuard is dropped.
    2327              :     completion: utils::completion::Completion,
    2328              : }
    2329              : 
    2330              : impl SlotGuard {
    2331            2 :     fn new(
    2332            2 :         tenant_shard_id: TenantShardId,
    2333            2 :         old_value: Option<TenantSlot>,
    2334            2 :         completion: utils::completion::Completion,
    2335            2 :     ) -> Self {
    2336            2 :         Self {
    2337            2 :             tenant_shard_id,
    2338            2 :             old_value,
    2339            2 :             upserted: false,
    2340            2 :             completion,
    2341            2 :         }
    2342            2 :     }
    2343              : 
    2344              :     /// Get any value that was present in the slot before we acquired ownership
    2345              :     /// of it: in state transitions, this will be the old state.
    2346            2 :     fn get_old_value(&self) -> &Option<TenantSlot> {
    2347            2 :         &self.old_value
    2348            2 :     }
    2349              : 
    2350              :     /// Emplace a new value in the slot.  This consumes the guard, and after
    2351              :     /// returning, the slot is no longer protected from concurrent changes.
    2352            0 :     fn upsert(mut self, new_value: TenantSlot) -> Result<(), TenantSlotUpsertError> {
    2353            0 :         if !self.old_value_is_shutdown() {
    2354              :             // This is a bug: callers should never try to drop an old value without
    2355              :             // shutting it down
    2356            0 :             return Err(TenantSlotUpsertError::InternalError(
    2357            0 :                 "Old TenantSlot value not shut down".into(),
    2358            0 :             ));
    2359            0 :         }
    2360              : 
    2361            0 :         let replaced = {
    2362            0 :             let mut locked = TENANTS.write().unwrap();
    2363            0 : 
    2364            0 :             if let TenantSlot::InProgress(_) = new_value {
    2365              :                 // It is never expected to try and upsert InProgress via this path: it should
    2366              :                 // only be written via the tenant_map_acquire_slot path.  If we hit this it's a bug.
    2367            0 :                 return Err(TenantSlotUpsertError::InternalError(
    2368            0 :                     "Attempt to upsert an InProgress state".into(),
    2369            0 :                 ));
    2370            0 :             }
    2371              : 
    2372            0 :             let m = match &mut *locked {
    2373              :                 TenantsMap::Initializing => {
    2374            0 :                     return Err(TenantSlotUpsertError::MapState(
    2375            0 :                         TenantMapError::StillInitializing,
    2376            0 :                     ))
    2377              :                 }
    2378              :                 TenantsMap::ShuttingDown(_) => {
    2379            0 :                     return Err(TenantSlotUpsertError::ShuttingDown((
    2380            0 :                         new_value,
    2381            0 :                         self.completion.clone(),
    2382            0 :                     )));
    2383              :                 }
    2384            0 :                 TenantsMap::Open(m) => m,
    2385            0 :             };
    2386            0 : 
    2387            0 :             let replaced = m.insert(self.tenant_shard_id, new_value);
    2388            0 :             self.upserted = true;
    2389            0 : 
    2390            0 :             METRICS.tenant_slots.set(m.len() as u64);
    2391            0 : 
    2392            0 :             replaced
    2393              :         };
    2394              : 
    2395              :         // Sanity check: on an upsert we should always be replacing an InProgress marker
    2396            0 :         match replaced {
    2397              :             Some(TenantSlot::InProgress(_)) => {
    2398              :                 // Expected case: we find our InProgress in the map: nothing should have
    2399              :                 // replaced it because the code that acquires slots will not grant another
    2400              :                 // one for the same TenantId.
    2401            0 :                 Ok(())
    2402              :             }
    2403              :             None => {
    2404            0 :                 METRICS.unexpected_errors.inc();
    2405            0 :                 error!(
    2406            0 :                     tenant_shard_id = %self.tenant_shard_id,
    2407            0 :                     "Missing InProgress marker during tenant upsert, this is a bug."
    2408            0 :                 );
    2409            0 :                 Err(TenantSlotUpsertError::InternalError(
    2410            0 :                     "Missing InProgress marker during tenant upsert".into(),
    2411            0 :                 ))
    2412              :             }
    2413            0 :             Some(slot) => {
    2414            0 :                 METRICS.unexpected_errors.inc();
    2415            0 :                 error!(tenant_shard_id=%self.tenant_shard_id, "Unexpected contents of TenantSlot during upsert, this is a bug.  Contents: {:?}", slot);
    2416            0 :                 Err(TenantSlotUpsertError::InternalError(
    2417            0 :                     "Unexpected contents of TenantSlot".into(),
    2418            0 :                 ))
    2419              :             }
    2420              :         }
    2421            0 :     }
    2422              : 
    2423              :     /// Replace the InProgress slot with whatever was in the guard when we started
    2424            0 :     fn revert(mut self) {
    2425            0 :         if let Some(value) = self.old_value.take() {
    2426            0 :             match self.upsert(value) {
    2427            0 :                 Err(TenantSlotUpsertError::InternalError(_)) => {
    2428            0 :                     // We already logged the error, nothing else we can do.
    2429            0 :                 }
    2430              :                 Err(
    2431              :                     TenantSlotUpsertError::MapState(_) | TenantSlotUpsertError::ShuttingDown(_),
    2432            0 :                 ) => {
    2433            0 :                     // If the map is shutting down, we need not replace anything
    2434            0 :                 }
    2435            0 :                 Ok(()) => {}
    2436              :             }
    2437            0 :         }
    2438            0 :     }
    2439              : 
    2440              :     /// We may never drop our old value until it is cleanly shut down: otherwise we might leave
    2441              :     /// rogue background tasks that would write to the local tenant directory that this guard
    2442              :     /// is responsible for protecting
    2443            2 :     fn old_value_is_shutdown(&self) -> bool {
    2444            2 :         match self.old_value.as_ref() {
    2445            2 :             Some(TenantSlot::Attached(tenant)) => tenant.gate.close_complete(),
    2446            0 :             Some(TenantSlot::Secondary(secondary_tenant)) => secondary_tenant.gate.close_complete(),
    2447              :             Some(TenantSlot::InProgress(_)) => {
    2448              :                 // A SlotGuard cannot be constructed for a slot that was already InProgress
    2449            0 :                 unreachable!()
    2450              :             }
    2451            0 :             None => true,
    2452              :         }
    2453            2 :     }
    2454              : 
    2455              :     /// The guard holder is done with the old value of the slot: they are obliged to already
    2456              :     /// shut it down before we reach this point.
    2457            2 :     fn drop_old_value(&mut self) -> Result<(), TenantSlotDropError> {
    2458            2 :         if !self.old_value_is_shutdown() {
    2459            0 :             Err(TenantSlotDropError::NotShutdown)
    2460              :         } else {
    2461            2 :             self.old_value.take();
    2462            2 :             Ok(())
    2463              :         }
    2464            2 :     }
    2465              : }
    2466              : 
    2467              : impl Drop for SlotGuard {
    2468            2 :     fn drop(&mut self) {
    2469            2 :         if self.upserted {
    2470            0 :             return;
    2471            2 :         }
    2472            2 :         // Our old value is already shutdown, or it never existed: it is safe
    2473            2 :         // for us to fully release the TenantSlot back into an empty state
    2474            2 : 
    2475            2 :         let mut locked = TENANTS.write().unwrap();
    2476              : 
    2477            2 :         let m = match &mut *locked {
    2478              :             TenantsMap::Initializing => {
    2479              :                 // There is no map, this should never happen.
    2480            2 :                 return;
    2481              :             }
    2482              :             TenantsMap::ShuttingDown(_) => {
    2483              :                 // When we transition to shutdown, InProgress elements are removed
    2484              :                 // from the map, so we do not need to clean up our Inprogress marker.
    2485              :                 // See [`shutdown_all_tenants0`]
    2486            0 :                 return;
    2487              :             }
    2488            0 :             TenantsMap::Open(m) => m,
    2489            0 :         };
    2490            0 : 
    2491            0 :         use std::collections::btree_map::Entry;
    2492            0 :         match m.entry(self.tenant_shard_id) {
    2493            0 :             Entry::Occupied(mut entry) => {
    2494            0 :                 if !matches!(entry.get(), TenantSlot::InProgress(_)) {
    2495            0 :                     METRICS.unexpected_errors.inc();
    2496            0 :                     error!(tenant_shard_id=%self.tenant_shard_id, "Unexpected contents of TenantSlot during drop, this is a bug.  Contents: {:?}", entry.get());
    2497            0 :                 }
    2498              : 
    2499            0 :                 if self.old_value_is_shutdown() {
    2500            0 :                     entry.remove();
    2501            0 :                 } else {
    2502            0 :                     entry.insert(self.old_value.take().unwrap());
    2503            0 :                 }
    2504              :             }
    2505              :             Entry::Vacant(_) => {
    2506            0 :                 METRICS.unexpected_errors.inc();
    2507            0 :                 error!(
    2508            0 :                     tenant_shard_id = %self.tenant_shard_id,
    2509            0 :                     "Missing InProgress marker during SlotGuard drop, this is a bug."
    2510            0 :                 );
    2511              :             }
    2512              :         }
    2513              : 
    2514            0 :         METRICS.tenant_slots.set(m.len() as u64);
    2515            2 :     }
    2516              : }
    2517              : 
    2518              : enum TenantSlotPeekMode {
    2519              :     /// In Read mode, peek will be permitted to see the slots even if the pageserver is shutting down
    2520              :     Read,
    2521              :     /// In Write mode, trying to peek at a slot while the pageserver is shutting down is an error
    2522              :     Write,
    2523              : }
    2524              : 
    2525            0 : fn tenant_map_peek_slot<'a>(
    2526            0 :     tenants: &'a std::sync::RwLockReadGuard<'a, TenantsMap>,
    2527            0 :     tenant_shard_id: &TenantShardId,
    2528            0 :     mode: TenantSlotPeekMode,
    2529            0 : ) -> Result<Option<&'a TenantSlot>, TenantMapError> {
    2530            0 :     match tenants.deref() {
    2531            0 :         TenantsMap::Initializing => Err(TenantMapError::StillInitializing),
    2532            0 :         TenantsMap::ShuttingDown(m) => match mode {
    2533              :             TenantSlotPeekMode::Read => Ok(Some(
    2534              :                 // When reading in ShuttingDown state, we must translate None results
    2535              :                 // into a ShuttingDown error, because absence of a tenant shard ID in the map
    2536              :                 // isn't a reliable indicator of the tenant being gone: it might have been
    2537              :                 // InProgress when shutdown started, and cleaned up from that state such
    2538              :                 // that it's now no longer in the map.  Callers will have to wait until
    2539              :                 // we next start up to get a proper answer.  This avoids incorrect 404 API responses.
    2540            0 :                 m.get(tenant_shard_id).ok_or(TenantMapError::ShuttingDown)?,
    2541              :             )),
    2542            0 :             TenantSlotPeekMode::Write => Err(TenantMapError::ShuttingDown),
    2543              :         },
    2544            0 :         TenantsMap::Open(m) => Ok(m.get(tenant_shard_id)),
    2545              :     }
    2546            0 : }
    2547              : 
    2548              : enum TenantSlotAcquireMode {
    2549              :     /// Acquire the slot irrespective of current state, or whether it already exists
    2550              :     Any,
    2551              :     /// Return an error if trying to acquire a slot and it doesn't already exist
    2552              :     MustExist,
    2553              :     /// Return an error if trying to acquire a slot and it already exists
    2554              :     MustNotExist,
    2555              : }
    2556              : 
    2557            0 : fn tenant_map_acquire_slot(
    2558            0 :     tenant_shard_id: &TenantShardId,
    2559            0 :     mode: TenantSlotAcquireMode,
    2560            0 : ) -> Result<SlotGuard, TenantSlotError> {
    2561            0 :     tenant_map_acquire_slot_impl(tenant_shard_id, &TENANTS, mode)
    2562            0 : }
    2563              : 
    2564            2 : fn tenant_map_acquire_slot_impl(
    2565            2 :     tenant_shard_id: &TenantShardId,
    2566            2 :     tenants: &std::sync::RwLock<TenantsMap>,
    2567            2 :     mode: TenantSlotAcquireMode,
    2568            2 : ) -> Result<SlotGuard, TenantSlotError> {
    2569            2 :     use TenantSlotAcquireMode::*;
    2570            2 :     METRICS.tenant_slot_writes.inc();
    2571            2 : 
    2572            2 :     let mut locked = tenants.write().unwrap();
    2573            2 :     let span = tracing::info_span!("acquire_slot", tenant_id=%tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug());
    2574            2 :     let _guard = span.enter();
    2575              : 
    2576            2 :     let m = match &mut *locked {
    2577            0 :         TenantsMap::Initializing => return Err(TenantMapError::StillInitializing.into()),
    2578            0 :         TenantsMap::ShuttingDown(_) => return Err(TenantMapError::ShuttingDown.into()),
    2579            2 :         TenantsMap::Open(m) => m,
    2580            2 :     };
    2581            2 : 
    2582            2 :     use std::collections::btree_map::Entry;
    2583            2 : 
    2584            2 :     let entry = m.entry(*tenant_shard_id);
    2585            2 : 
    2586            2 :     match entry {
    2587            0 :         Entry::Vacant(v) => match mode {
    2588              :             MustExist => {
    2589            0 :                 tracing::debug!("Vacant && MustExist: return NotFound");
    2590            0 :                 Err(TenantSlotError::NotFound(*tenant_shard_id))
    2591              :             }
    2592              :             _ => {
    2593            0 :                 let (completion, barrier) = utils::completion::channel();
    2594            0 :                 v.insert(TenantSlot::InProgress(barrier));
    2595            0 :                 tracing::debug!("Vacant, inserted InProgress");
    2596            0 :                 Ok(SlotGuard::new(*tenant_shard_id, None, completion))
    2597              :             }
    2598              :         },
    2599            2 :         Entry::Occupied(mut o) => {
    2600            2 :             // Apply mode-driven checks
    2601            2 :             match (o.get(), mode) {
    2602              :                 (TenantSlot::InProgress(_), _) => {
    2603            0 :                     tracing::debug!("Occupied, failing for InProgress");
    2604            0 :                     Err(TenantSlotError::InProgress)
    2605              :                 }
    2606            0 :                 (slot, MustNotExist) => match slot {
    2607            0 :                     TenantSlot::Attached(tenant) => {
    2608            0 :                         tracing::debug!("Attached && MustNotExist, return AlreadyExists");
    2609            0 :                         Err(TenantSlotError::AlreadyExists(
    2610            0 :                             *tenant_shard_id,
    2611            0 :                             tenant.current_state(),
    2612            0 :                         ))
    2613              :                     }
    2614              :                     _ => {
    2615              :                         // FIXME: the AlreadyExists error assumes that we have a Tenant
    2616              :                         // to get the state from
    2617            0 :                         tracing::debug!("Occupied & MustNotExist, return AlreadyExists");
    2618            0 :                         Err(TenantSlotError::AlreadyExists(
    2619            0 :                             *tenant_shard_id,
    2620            0 :                             TenantState::Broken {
    2621            0 :                                 reason: "Present but not attached".to_string(),
    2622            0 :                                 backtrace: "".to_string(),
    2623            0 :                             },
    2624            0 :                         ))
    2625              :                     }
    2626              :                 },
    2627              :                 _ => {
    2628              :                     // Happy case: the slot was not in any state that violated our mode
    2629            2 :                     let (completion, barrier) = utils::completion::channel();
    2630            2 :                     let old_value = o.insert(TenantSlot::InProgress(barrier));
    2631            2 :                     tracing::debug!("Occupied, replaced with InProgress");
    2632            2 :                     Ok(SlotGuard::new(
    2633            2 :                         *tenant_shard_id,
    2634            2 :                         Some(old_value),
    2635            2 :                         completion,
    2636            2 :                     ))
    2637              :                 }
    2638              :             }
    2639              :         }
    2640              :     }
    2641            2 : }
    2642              : 
    2643              : /// Stops and removes the tenant from memory, if it's not [`TenantState::Stopping`] already, bails otherwise.
    2644              : /// Allows to remove other tenant resources manually, via `tenant_cleanup`.
    2645              : /// If the cleanup fails, tenant will stay in memory in [`TenantState::Broken`] state, and another removal
    2646              : /// operation would be needed to remove it.
    2647            2 : async fn remove_tenant_from_memory<V, F>(
    2648            2 :     tenants: &std::sync::RwLock<TenantsMap>,
    2649            2 :     tenant_shard_id: TenantShardId,
    2650            2 :     tenant_cleanup: F,
    2651            2 : ) -> Result<V, TenantStateError>
    2652            2 : where
    2653            2 :     F: std::future::Future<Output = anyhow::Result<V>>,
    2654            2 : {
    2655            2 :     let mut slot_guard =
    2656            2 :         tenant_map_acquire_slot_impl(&tenant_shard_id, tenants, TenantSlotAcquireMode::MustExist)?;
    2657              : 
    2658              :     // allow pageserver shutdown to await for our completion
    2659            2 :     let (_guard, progress) = completion::channel();
    2660              : 
    2661              :     // The SlotGuard allows us to manipulate the Tenant object without fear of some
    2662              :     // concurrent API request doing something else for the same tenant ID.
    2663            2 :     let attached_tenant = match slot_guard.get_old_value() {
    2664            2 :         Some(TenantSlot::Attached(tenant)) => {
    2665            2 :             // whenever we remove a tenant from memory, we don't want to flush and wait for upload
    2666            2 :             let shutdown_mode = ShutdownMode::Hard;
    2667            2 : 
    2668            2 :             // shutdown is sure to transition tenant to stopping, and wait for all tasks to complete, so
    2669            2 :             // that we can continue safely to cleanup.
    2670            2 :             match tenant.shutdown(progress, shutdown_mode).await {
    2671            2 :                 Ok(()) => {}
    2672            0 :                 Err(_other) => {
    2673            0 :                     // if pageserver shutdown or other detach/ignore is already ongoing, we don't want to
    2674            0 :                     // wait for it but return an error right away because these are distinct requests.
    2675            0 :                     slot_guard.revert();
    2676            0 :                     return Err(TenantStateError::IsStopping(tenant_shard_id));
    2677              :                 }
    2678              :             }
    2679            2 :             Some(tenant)
    2680              :         }
    2681            0 :         Some(TenantSlot::Secondary(secondary_state)) => {
    2682            0 :             tracing::info!("Shutting down in secondary mode");
    2683            0 :             secondary_state.shutdown().await;
    2684            0 :             None
    2685              :         }
    2686              :         Some(TenantSlot::InProgress(_)) => {
    2687              :             // Acquiring a slot guarantees its old value was not InProgress
    2688            0 :             unreachable!();
    2689              :         }
    2690            0 :         None => None,
    2691              :     };
    2692              : 
    2693            2 :     match tenant_cleanup
    2694            2 :         .await
    2695            2 :         .with_context(|| format!("Failed to run cleanup for tenant {tenant_shard_id}"))
    2696              :     {
    2697            2 :         Ok(hook_value) => {
    2698            2 :             // Success: drop the old TenantSlot::Attached.
    2699            2 :             slot_guard
    2700            2 :                 .drop_old_value()
    2701            2 :                 .expect("We just called shutdown");
    2702            2 : 
    2703            2 :             Ok(hook_value)
    2704              :         }
    2705            0 :         Err(e) => {
    2706              :             // If we had a Tenant, set it to Broken and put it back in the TenantsMap
    2707            0 :             if let Some(attached_tenant) = attached_tenant {
    2708            0 :                 attached_tenant.set_broken(e.to_string()).await;
    2709            0 :             }
    2710              :             // Leave the broken tenant in the map
    2711            0 :             slot_guard.revert();
    2712            0 : 
    2713            0 :             Err(TenantStateError::Other(e))
    2714              :         }
    2715              :     }
    2716            2 : }
    2717              : 
    2718              : use {
    2719              :     crate::repository::GcResult, pageserver_api::models::TimelineGcRequest,
    2720              :     utils::http::error::ApiError,
    2721              : };
    2722              : 
    2723            0 : pub(crate) fn immediate_gc(
    2724            0 :     tenant_shard_id: TenantShardId,
    2725            0 :     timeline_id: TimelineId,
    2726            0 :     gc_req: TimelineGcRequest,
    2727            0 :     cancel: CancellationToken,
    2728            0 :     ctx: &RequestContext,
    2729            0 : ) -> Result<tokio::sync::oneshot::Receiver<Result<GcResult, anyhow::Error>>, ApiError> {
    2730            0 :     let guard = TENANTS.read().unwrap();
    2731              : 
    2732            0 :     let tenant = guard
    2733            0 :         .get(&tenant_shard_id)
    2734            0 :         .cloned()
    2735            0 :         .with_context(|| format!("tenant {tenant_shard_id}"))
    2736            0 :         .map_err(|e| ApiError::NotFound(e.into()))?;
    2737              : 
    2738            0 :     let gc_horizon = gc_req.gc_horizon.unwrap_or_else(|| tenant.get_gc_horizon());
    2739            0 :     // Use tenant's pitr setting
    2740            0 :     let pitr = tenant.get_pitr_interval();
    2741            0 : 
    2742            0 :     // Run in task_mgr to avoid race with tenant_detach operation
    2743            0 :     let ctx = ctx.detached_child(TaskKind::GarbageCollector, DownloadBehavior::Download);
    2744            0 :     let (task_done, wait_task_done) = tokio::sync::oneshot::channel();
    2745            0 :     let span = info_span!("manual_gc", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id);
    2746              : 
    2747              :     // TODO: spawning is redundant now, need to hold the gate
    2748            0 :     task_mgr::spawn(
    2749            0 :         &tokio::runtime::Handle::current(),
    2750            0 :         TaskKind::GarbageCollector,
    2751            0 :         Some(tenant_shard_id),
    2752            0 :         Some(timeline_id),
    2753            0 :         &format!("timeline_gc_handler garbage collection run for tenant {tenant_shard_id} timeline {timeline_id}"),
    2754            0 :         false,
    2755            0 :         async move {
    2756            0 :             fail::fail_point!("immediate_gc_task_pre");
    2757              : 
    2758              :             #[allow(unused_mut)]
    2759            0 :             let mut result = tenant
    2760            0 :                 .gc_iteration(Some(timeline_id), gc_horizon, pitr, &cancel, &ctx)
    2761            0 :                 .await;
    2762              :                 // FIXME: `gc_iteration` can return an error for multiple reasons; we should handle it
    2763              :                 // better once the types support it.
    2764              : 
    2765              :             #[cfg(feature = "testing")]
    2766              :             {
    2767              :                 // we need to synchronize with drop completion for python tests without polling for
    2768              :                 // log messages
    2769            0 :                 if let Ok(result) = result.as_mut() {
    2770            0 :                     let mut js = tokio::task::JoinSet::new();
    2771            0 :                     for layer in std::mem::take(&mut result.doomed_layers) {
    2772            0 :                         js.spawn(layer.wait_drop());
    2773            0 :                     }
    2774            0 :                     tracing::info!(total = js.len(), "starting to wait for the gc'd layers to be dropped");
    2775            0 :                     while let Some(res) = js.join_next().await {
    2776            0 :                         res.expect("wait_drop should not panic");
    2777            0 :                     }
    2778            0 :                 }
    2779              : 
    2780            0 :                 let timeline = tenant.get_timeline(timeline_id, false).ok();
    2781            0 :                 let rtc = timeline.as_ref().and_then(|x| x.remote_client.as_ref());
    2782              : 
    2783            0 :                 if let Some(rtc) = rtc {
    2784              :                     // layer drops schedule actions on remote timeline client to actually do the
    2785              :                     // deletions; don't care about the shutdown error, just exit fast
    2786            0 :                     drop(rtc.wait_completion().await);
    2787            0 :                 }
    2788              :             }
    2789              : 
    2790            0 :             match task_done.send(result) {
    2791            0 :                 Ok(_) => (),
    2792            0 :                 Err(result) => error!("failed to send gc result: {result:?}"),
    2793              :             }
    2794            0 :             Ok(())
    2795            0 :         }
    2796            0 :         .instrument(span)
    2797            0 :     );
    2798            0 : 
    2799            0 :     // drop the guard until after we've spawned the task so that timeline shutdown will wait for the task
    2800            0 :     drop(guard);
    2801            0 : 
    2802            0 :     Ok(wait_task_done)
    2803            0 : }
    2804              : 
    2805              : #[cfg(test)]
    2806              : mod tests {
    2807              :     use std::collections::BTreeMap;
    2808              :     use std::sync::Arc;
    2809              :     use tracing::Instrument;
    2810              : 
    2811              :     use crate::tenant::mgr::TenantSlot;
    2812              : 
    2813              :     use super::{super::harness::TenantHarness, TenantsMap};
    2814              : 
    2815              :     #[tokio::test(start_paused = true)]
    2816            2 :     async fn shutdown_awaits_in_progress_tenant() {
    2817            2 :         // Test that if an InProgress tenant is in the map during shutdown, the shutdown will gracefully
    2818            2 :         // wait for it to complete before proceeding.
    2819            2 : 
    2820            2 :         let h = TenantHarness::create("shutdown_awaits_in_progress_tenant").unwrap();
    2821            2 :         let (t, _ctx) = h.load().await;
    2822            2 : 
    2823            2 :         // harness loads it to active, which is forced and nothing is running on the tenant
    2824            2 : 
    2825            2 :         let id = t.tenant_shard_id();
    2826            2 : 
    2827            2 :         // tenant harness configures the logging and we cannot escape it
    2828            2 :         let span = h.span();
    2829            2 :         let _e = span.enter();
    2830            2 : 
    2831            2 :         let tenants = BTreeMap::from([(id, TenantSlot::Attached(t.clone()))]);
    2832            2 :         let tenants = Arc::new(std::sync::RwLock::new(TenantsMap::Open(tenants)));
    2833            2 : 
    2834            2 :         // Invoke remove_tenant_from_memory with a cleanup hook that blocks until we manually
    2835            2 :         // permit it to proceed: that will stick the tenant in InProgress
    2836            2 : 
    2837            2 :         let (until_cleanup_completed, can_complete_cleanup) = utils::completion::channel();
    2838            2 :         let (until_cleanup_started, cleanup_started) = utils::completion::channel();
    2839            2 :         let mut remove_tenant_from_memory_task = {
    2840            2 :             let jh = tokio::spawn({
    2841            2 :                 let tenants = tenants.clone();
    2842            2 :                 async move {
    2843            2 :                     let cleanup = async move {
    2844            2 :                         drop(until_cleanup_started);
    2845            2 :                         can_complete_cleanup.wait().await;
    2846            2 :                         anyhow::Ok(())
    2847            2 :                     };
    2848            2 :                     super::remove_tenant_from_memory(&tenants, id, cleanup).await
    2849            2 :                 }
    2850            2 :                 .instrument(h.span())
    2851            2 :             });
    2852            2 : 
    2853            2 :             // now the long cleanup should be in place, with the stopping state
    2854            2 :             cleanup_started.wait().await;
    2855            2 :             jh
    2856            2 :         };
    2857            2 : 
    2858            2 :         let mut shutdown_task = {
    2859            2 :             let (until_shutdown_started, shutdown_started) = utils::completion::channel();
    2860            2 : 
    2861            2 :             let shutdown_task = tokio::spawn(async move {
    2862            2 :                 drop(until_shutdown_started);
    2863            4 :                 super::shutdown_all_tenants0(&tenants).await;
    2864            2 :             });
    2865            2 : 
    2866            2 :             shutdown_started.wait().await;
    2867            2 :             shutdown_task
    2868            2 :         };
    2869            2 : 
    2870            2 :         let long_time = std::time::Duration::from_secs(15);
    2871            4 :         tokio::select! {
    2872            4 :             _ = &mut shutdown_task => unreachable!("shutdown should block on remove_tenant_from_memory completing"),
    2873            4 :             _ = &mut remove_tenant_from_memory_task => unreachable!("remove_tenant_from_memory_task should not complete until explicitly unblocked"),
    2874            4 :             _ = tokio::time::sleep(long_time) => {},
    2875            4 :         }
    2876            2 : 
    2877            2 :         drop(until_cleanup_completed);
    2878            2 : 
    2879            2 :         // Now that we allow it to proceed, shutdown should complete immediately
    2880            2 :         remove_tenant_from_memory_task.await.unwrap().unwrap();
    2881            2 :         shutdown_task.await.unwrap();
    2882            2 :     }
    2883              : }
        

Generated by: LCOV version 2.1-beta