LCOV - code coverage report
Current view: top level - storage_controller/src - service.rs (source / functions) Coverage Total Hit
Test: a1cc1f33dc9899e4da66eb51e44e911a4b3bd648.info Lines: 4.4 % 5867 258
Test Date: 2025-07-31 11:35:14 Functions: 0.3 % 589 2

            Line data    Source code
       1              : pub mod chaos_injector;
       2              : pub mod feature_flag;
       3              : pub(crate) mod safekeeper_reconciler;
       4              : mod safekeeper_service;
       5              : mod tenant_shard_iterator;
       6              : 
       7              : use crate::hadron_token::HadronTokenGenerator;
       8              : use std::borrow::Cow;
       9              : use std::cmp::Ordering;
      10              : use std::collections::{BTreeMap, HashMap, HashSet};
      11              : use std::error::Error;
      12              : use std::num::NonZeroU32;
      13              : use std::ops::{Deref, DerefMut};
      14              : use std::path::PathBuf;
      15              : use std::str::FromStr;
      16              : use std::sync::{Arc, OnceLock};
      17              : use std::time::{Duration, Instant, SystemTime};
      18              : 
      19              : use anyhow::Context;
      20              : use control_plane::storage_controller::{
      21              :     AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
      22              : };
      23              : use diesel::result::DatabaseErrorKind;
      24              : use futures::StreamExt;
      25              : use futures::stream::FuturesUnordered;
      26              : use http_utils::error::ApiError;
      27              : use hyper::Uri;
      28              : use itertools::Itertools;
      29              : use pageserver_api::config::PostHogConfig;
      30              : use pageserver_api::controller_api::{
      31              :     AvailabilityZone, MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability,
      32              :     NodeRegisterRequest, NodeSchedulingPolicy, NodeShard, NodeShardResponse, PlacementPolicy,
      33              :     ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse,
      34              :     SkSchedulingPolicy, TenantCreateRequest, TenantCreateResponse, TenantCreateResponseShard,
      35              :     TenantDescribeResponse, TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
      36              :     TenantShardMigrateRequest, TenantShardMigrateResponse, TenantTimelineDescribeResponse,
      37              : };
      38              : use pageserver_api::models::{
      39              :     self, DetachBehavior, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease,
      40              :     PageserverUtilization, SecondaryProgress, ShardImportStatus, ShardParameters, TenantConfig,
      41              :     TenantConfigPatchRequest, TenantConfigRequest, TenantLocationConfigRequest,
      42              :     TenantLocationConfigResponse, TenantShardLocation, TenantShardSplitRequest,
      43              :     TenantShardSplitResponse, TenantSorting, TenantTimeTravelRequest,
      44              :     TimelineArchivalConfigRequest, TimelineCreateRequest, TimelineCreateResponseStorcon,
      45              :     TimelineInfo, TopTenantShardItem, TopTenantShardsRequest,
      46              : };
      47              : use pageserver_api::shard::{
      48              :     DEFAULT_STRIPE_SIZE, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
      49              : };
      50              : use pageserver_api::upcall_api::{
      51              :     PutTimelineImportStatusRequest, ReAttachRequest, ReAttachResponse, ReAttachResponseTenant,
      52              :     TimelineImportStatusRequest, ValidateRequest, ValidateResponse, ValidateResponseTenant,
      53              : };
      54              : use pageserver_client::{BlockUnblock, mgmt_api};
      55              : use reqwest::{Certificate, StatusCode};
      56              : use safekeeper_api::models::SafekeeperUtilization;
      57              : use safekeeper_reconciler::SafekeeperReconcilers;
      58              : use tenant_shard_iterator::{TenantShardExclusiveIterator, create_shared_shard_iterator};
      59              : use tokio::sync::TryAcquireError;
      60              : use tokio::sync::mpsc::error::TrySendError;
      61              : use tokio_util::sync::CancellationToken;
      62              : use tracing::{Instrument, debug, error, info, info_span, instrument, warn};
      63              : use utils::completion::Barrier;
      64              : use utils::env;
      65              : use utils::generation::Generation;
      66              : use utils::id::{NodeId, TenantId, TimelineId};
      67              : use utils::lsn::Lsn;
      68              : use utils::shard::ShardIndex;
      69              : use utils::sync::gate::{Gate, GateGuard};
      70              : use utils::{failpoint_support, pausable_failpoint};
      71              : 
      72              : use crate::background_node_operations::{
      73              :     Delete, Drain, Fill, MAX_RECONCILES_PER_OPERATION, Operation, OperationError, OperationHandler,
      74              : };
      75              : use crate::compute_hook::{self, ComputeHook, NotifyError};
      76              : use crate::heartbeater::{Heartbeater, PageserverState, SafekeeperState};
      77              : use crate::id_lock_map::{
      78              :     IdLockMap, TracingExclusiveGuard, trace_exclusive_lock, trace_shared_lock,
      79              : };
      80              : use crate::leadership::Leadership;
      81              : use crate::metrics;
      82              : use crate::node::{AvailabilityTransition, Node};
      83              : use crate::operation_utils::{self, TenantShardDrain, TenantShardDrainAction};
      84              : use crate::pageserver_client::PageserverClient;
      85              : use crate::peer_client::GlobalObservedState;
      86              : use crate::persistence::split_state::SplitState;
      87              : use crate::persistence::{
      88              :     AbortShardSplitStatus, ControllerPersistence, DatabaseError, DatabaseResult,
      89              :     MetadataHealthPersistence, Persistence, ShardGenerationState, TenantFilter,
      90              :     TenantShardPersistence,
      91              : };
      92              : use crate::reconciler::{
      93              :     ReconcileError, ReconcileUnits, ReconcilerConfig, ReconcilerConfigBuilder, ReconcilerPriority,
      94              :     attached_location_conf,
      95              : };
      96              : use crate::safekeeper::Safekeeper;
      97              : use crate::scheduler::{
      98              :     AttachedShardTag, MaySchedule, ScheduleContext, ScheduleError, ScheduleMode, Scheduler,
      99              : };
     100              : use crate::tenant_shard::{
     101              :     IntentState, MigrateAttachment, ObservedState, ObservedStateDelta, ObservedStateLocation,
     102              :     ReconcileNeeded, ReconcileResult, ReconcileWaitError, ReconcilerStatus, ReconcilerWaiter,
     103              :     ScheduleOptimization, ScheduleOptimizationAction, TenantShard,
     104              : };
     105              : use crate::timeline_import::{
     106              :     FinalizingImport, ImportResult, ShardImportStatuses, TimelineImport,
     107              :     TimelineImportFinalizeError, TimelineImportState, UpcallClient,
     108              : };
     109              : 
     110              : const WAITER_OPERATION_POLL_TIMEOUT: Duration = Duration::from_millis(500);
     111              : 
     112              : // For operations that should be quick, like attaching a new tenant
     113              : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
     114              : 
     115              : // For operations that might be slow, like migrating a tenant with
     116              : // some data in it.
     117              : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     118              : 
     119              : // If we receive a call using Secondary mode initially, it will omit generation.  We will initialize
     120              : // tenant shards into this generation, and as long as it remains in this generation, we will accept
     121              : // input generation from future requests as authoritative.
     122              : const INITIAL_GENERATION: Generation = Generation::new(0);
     123              : 
     124              : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
     125              : /// up on unresponsive pageservers and proceed.
     126              : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     127              : 
     128              : /// How long a node may be unresponsive to heartbeats before we declare it offline.
     129              : /// This must be long enough to cover node restarts as well as normal operations: in future
     130              : pub const MAX_OFFLINE_INTERVAL_DEFAULT: Duration = Duration::from_secs(30);
     131              : 
     132              : /// How long a node may be unresponsive to heartbeats during start up before we declare it
     133              : /// offline.
     134              : ///
     135              : /// This is much more lenient than [`MAX_OFFLINE_INTERVAL_DEFAULT`] since the pageserver's
     136              : /// handling of the re-attach response may take a long time and blocks heartbeats from
     137              : /// being handled on the pageserver side.
     138              : pub const MAX_WARMING_UP_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
     139              : 
     140              : /// How often to send heartbeats to registered nodes?
     141              : pub const HEARTBEAT_INTERVAL_DEFAULT: Duration = Duration::from_secs(5);
     142              : 
     143              : /// How long is too long for a reconciliation?
     144              : pub const LONG_RECONCILE_THRESHOLD_DEFAULT: Duration = Duration::from_secs(120);
     145              : 
     146              : #[derive(Clone, strum_macros::Display)]
     147              : enum TenantOperations {
     148              :     Create,
     149              :     LocationConfig,
     150              :     ConfigSet,
     151              :     ConfigPatch,
     152              :     TimeTravelRemoteStorage,
     153              :     Delete,
     154              :     UpdatePolicy,
     155              :     ShardSplit,
     156              :     SecondaryDownload,
     157              :     TimelineCreate,
     158              :     TimelineDelete,
     159              :     AttachHook,
     160              :     TimelineArchivalConfig,
     161              :     TimelineDetachAncestor,
     162              :     TimelineGcBlockUnblock,
     163              :     DropDetached,
     164              :     DownloadHeatmapLayers,
     165              :     TimelineLsnLease,
     166              :     TimelineSafekeeperMigrate,
     167              : }
     168              : 
     169              : #[derive(Clone, strum_macros::Display)]
     170              : enum NodeOperations {
     171              :     Register,
     172              :     Configure,
     173              :     Delete,
     174              :     DeleteTombstone,
     175              : }
     176              : 
     177              : /// The leadership status for the storage controller process.
     178              : /// Allowed transitions are:
     179              : /// 1. Leader -> SteppedDown
     180              : /// 2. Candidate -> Leader
     181              : #[derive(
     182              :     Eq,
     183              :     PartialEq,
     184              :     Copy,
     185              :     Clone,
     186              :     strum_macros::Display,
     187              :     strum_macros::EnumIter,
     188              :     measured::FixedCardinalityLabel,
     189              : )]
     190              : #[strum(serialize_all = "snake_case")]
     191              : pub(crate) enum LeadershipStatus {
     192              :     /// This is the steady state where the storage controller can produce
     193              :     /// side effects in the cluster.
     194              :     Leader,
     195              :     /// We've been notified to step down by another candidate. No reconciliations
     196              :     /// take place in this state.
     197              :     SteppedDown,
     198              :     /// Initial state for a new storage controller instance. Will attempt to assume leadership.
     199              :     #[allow(unused)]
     200              :     Candidate,
     201              : }
     202              : 
     203              : enum ShardGenerationValidity {
     204              :     Valid,
     205              :     Mismatched {
     206              :         claimed: Generation,
     207              :         actual: Option<Generation>,
     208              :     },
     209              : }
     210              : 
     211              : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
     212              : pub const PRIORITY_RECONCILER_CONCURRENCY_DEFAULT: usize = 256;
     213              : pub const SAFEKEEPER_RECONCILER_CONCURRENCY_DEFAULT: usize = 32;
     214              : 
     215              : // Number of consecutive reconciliations that have occurred for one shard,
     216              : // after which the shard is ignored when considering to run optimizations.
     217              : const MAX_CONSECUTIVE_RECONCILES: usize = 10;
     218              : 
     219              : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
     220              : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
     221              : // than they're being pushed onto the queue.
     222              : const MAX_DELAYED_RECONCILES: usize = 10000;
     223              : 
     224              : // Top level state available to all HTTP handlers
     225              : struct ServiceState {
     226              :     leadership_status: LeadershipStatus,
     227              : 
     228              :     tenants: BTreeMap<TenantShardId, TenantShard>,
     229              : 
     230              :     nodes: Arc<HashMap<NodeId, Node>>,
     231              : 
     232              :     safekeepers: Arc<HashMap<NodeId, Safekeeper>>,
     233              : 
     234              :     safekeeper_reconcilers: SafekeeperReconcilers,
     235              : 
     236              :     scheduler: Scheduler,
     237              : 
     238              :     /// Ongoing background operation on the cluster if any is running.
     239              :     /// Note that only one such operation may run at any given time,
     240              :     /// hence the type choice.
     241              :     ongoing_operation: Option<OperationHandler>,
     242              : 
     243              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     244              :     delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     245              : 
     246              :     /// Tracks ongoing timeline import finalization tasks
     247              :     imports_finalizing: BTreeMap<(TenantId, TimelineId), FinalizingImport>,
     248              : }
     249              : 
     250              : /// Transform an error from a pageserver into an error to return to callers of a storage
     251              : /// controller API.
     252            0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
     253            0 :     match e {
     254            0 :         mgmt_api::Error::SendRequest(e) => {
     255              :             // Presume errors sending requests are connectivity/availability issues
     256            0 :             ApiError::ResourceUnavailable(format!("{node} error sending request: {e}").into())
     257              :         }
     258            0 :         mgmt_api::Error::ReceiveErrorBody(str) => {
     259              :             // Presume errors receiving body are connectivity/availability issues
     260            0 :             ApiError::ResourceUnavailable(
     261            0 :                 format!("{node} error receiving error body: {str}").into(),
     262            0 :             )
     263              :         }
     264            0 :         mgmt_api::Error::ReceiveBody(err) if err.is_decode() => {
     265              :             // Return 500 for decoding errors.
     266            0 :             ApiError::InternalServerError(anyhow::Error::from(err).context("error decoding body"))
     267              :         }
     268            0 :         mgmt_api::Error::ReceiveBody(err) => {
     269              :             // Presume errors receiving body are connectivity/availability issues except for decoding errors
     270            0 :             let src_str = err.source().map(|e| e.to_string()).unwrap_or_default();
     271            0 :             ApiError::ResourceUnavailable(
     272            0 :                 format!("{node} error receiving error body: {err} {src_str}").into(),
     273            0 :             )
     274              :         }
     275            0 :         mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
     276            0 :             ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
     277              :         }
     278            0 :         mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
     279            0 :             ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
     280              :         }
     281            0 :         mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
     282            0 :         | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
     283              :             // Auth errors talking to a pageserver are not auth errors for the caller: they are
     284              :             // internal server errors, showing that something is wrong with the pageserver or
     285              :             // storage controller's auth configuration.
     286            0 :             ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
     287              :         }
     288            0 :         mgmt_api::Error::ApiError(status @ StatusCode::TOO_MANY_REQUESTS, msg) => {
     289              :             // Pass through 429 errors: if pageserver is asking us to wait + retry, we in
     290              :             // turn ask our clients to wait + retry
     291            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     292              :         }
     293            0 :         mgmt_api::Error::ApiError(status, msg) => {
     294              :             // Presume general case of pageserver API errors is that we tried to do something
     295              :             // that can't be done right now.
     296            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     297              :         }
     298            0 :         mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
     299            0 :         mgmt_api::Error::Timeout(e) => ApiError::Timeout(e.into()),
     300              :     }
     301            0 : }
     302              : 
     303              : impl ServiceState {
     304            0 :     fn new(
     305            0 :         nodes: HashMap<NodeId, Node>,
     306            0 :         safekeepers: HashMap<NodeId, Safekeeper>,
     307            0 :         tenants: BTreeMap<TenantShardId, TenantShard>,
     308            0 :         scheduler: Scheduler,
     309            0 :         delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     310            0 :         initial_leadership_status: LeadershipStatus,
     311            0 :         reconcilers_cancel: CancellationToken,
     312            0 :     ) -> Self {
     313            0 :         metrics::update_leadership_status(initial_leadership_status);
     314              : 
     315            0 :         Self {
     316            0 :             leadership_status: initial_leadership_status,
     317            0 :             tenants,
     318            0 :             nodes: Arc::new(nodes),
     319            0 :             safekeepers: Arc::new(safekeepers),
     320            0 :             safekeeper_reconcilers: SafekeeperReconcilers::new(reconcilers_cancel),
     321            0 :             scheduler,
     322            0 :             ongoing_operation: None,
     323            0 :             delayed_reconcile_rx,
     324            0 :             imports_finalizing: Default::default(),
     325            0 :         }
     326            0 :     }
     327              : 
     328            0 :     fn parts_mut(
     329            0 :         &mut self,
     330            0 :     ) -> (
     331            0 :         &mut Arc<HashMap<NodeId, Node>>,
     332            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     333            0 :         &mut Scheduler,
     334            0 :     ) {
     335            0 :         (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
     336            0 :     }
     337              : 
     338              :     #[allow(clippy::type_complexity)]
     339            0 :     fn parts_mut_sk(
     340            0 :         &mut self,
     341            0 :     ) -> (
     342            0 :         &mut Arc<HashMap<NodeId, Node>>,
     343            0 :         &mut Arc<HashMap<NodeId, Safekeeper>>,
     344            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     345            0 :         &mut Scheduler,
     346            0 :     ) {
     347            0 :         (
     348            0 :             &mut self.nodes,
     349            0 :             &mut self.safekeepers,
     350            0 :             &mut self.tenants,
     351            0 :             &mut self.scheduler,
     352            0 :         )
     353            0 :     }
     354              : 
     355            0 :     fn get_leadership_status(&self) -> LeadershipStatus {
     356            0 :         self.leadership_status
     357            0 :     }
     358              : 
     359            0 :     fn step_down(&mut self) {
     360            0 :         self.leadership_status = LeadershipStatus::SteppedDown;
     361            0 :         metrics::update_leadership_status(self.leadership_status);
     362            0 :     }
     363              : 
     364            0 :     fn become_leader(&mut self) {
     365            0 :         self.leadership_status = LeadershipStatus::Leader;
     366            0 :         metrics::update_leadership_status(self.leadership_status);
     367            0 :     }
     368              : }
     369              : 
     370              : #[derive(Clone)]
     371              : pub struct Config {
     372              :     // All pageservers managed by one instance of this service must have
     373              :     // the same public key.  This JWT token will be used to authenticate
     374              :     // this service to the pageservers it manages.
     375              :     pub pageserver_jwt_token: Option<String>,
     376              : 
     377              :     // All safekeepers managed by one instance of this service must have
     378              :     // the same public key. This JWT token will be used to authenticate
     379              :     // this service to the safekeepers it manages.
     380              :     pub safekeeper_jwt_token: Option<String>,
     381              : 
     382              :     // This JWT token will be used to authenticate this service to the control plane.
     383              :     pub control_plane_jwt_token: Option<String>,
     384              : 
     385              :     // This JWT token will be used to authenticate with other storage controller instances
     386              :     pub peer_jwt_token: Option<String>,
     387              : 
     388              :     /// Prefix for storage API endpoints of the control plane. We use this prefix to compute
     389              :     /// URLs that we use to send pageserver and safekeeper attachment locations.
     390              :     /// If this is None, the compute hook will assume it is running in a test environment
     391              :     /// and try to invoke neon_local instead.
     392              :     pub control_plane_url: Option<String>,
     393              : 
     394              :     /// Grace period within which a pageserver does not respond to heartbeats, but is still
     395              :     /// considered active. Once the grace period elapses, the next heartbeat failure will
     396              :     /// mark the pagseserver offline.
     397              :     pub max_offline_interval: Duration,
     398              : 
     399              :     /// Extended grace period within which pageserver may not respond to heartbeats.
     400              :     /// This extended grace period kicks in after the node has been drained for restart
     401              :     /// and/or upon handling the re-attach request from a node.
     402              :     pub max_warming_up_interval: Duration,
     403              : 
     404              :     /// How many normal-priority Reconcilers may be spawned concurrently
     405              :     pub reconciler_concurrency: usize,
     406              : 
     407              :     /// How many high-priority Reconcilers may be spawned concurrently
     408              :     pub priority_reconciler_concurrency: usize,
     409              : 
     410              :     /// How many safekeeper reconciles may happen concurrently (per safekeeper)
     411              :     pub safekeeper_reconciler_concurrency: usize,
     412              : 
     413              :     /// How many API requests per second to allow per tenant, across all
     414              :     /// tenant-scoped API endpoints. Further API requests queue until ready.
     415              :     pub tenant_rate_limit: NonZeroU32,
     416              : 
     417              :     /// If a tenant shard's largest timeline (max_logical_size) exceeds this value, all tenant
     418              :     /// shards will be split in 2 until they fall below split_threshold (up to max_split_shards).
     419              :     ///
     420              :     /// This will greedily split into as many shards as necessary to fall below split_threshold, as
     421              :     /// powers of 2: if a tenant shard is 7 times larger than split_threshold, it will split into 8
     422              :     /// immediately, rather than first 2 then 4 then 8.
     423              :     ///
     424              :     /// None or 0 disables auto-splitting.
     425              :     ///
     426              :     /// TODO: consider using total logical size of all timelines instead.
     427              :     pub split_threshold: Option<u64>,
     428              : 
     429              :     /// The maximum number of shards a tenant can be split into during autosplits. Does not affect
     430              :     /// manual split requests. 0 or 1 disables autosplits, as we already have 1 shard.
     431              :     pub max_split_shards: u8,
     432              : 
     433              :     /// The size at which an unsharded tenant should initially split. Ingestion is significantly
     434              :     /// faster with multiple shards, so eagerly splitting below split_threshold will typically speed
     435              :     /// up initial ingestion of large tenants.
     436              :     ///
     437              :     /// This should be below split_threshold, but it is not required. If both split_threshold and
     438              :     /// initial_split_threshold qualify, the largest number of target shards will be used.
     439              :     ///
     440              :     /// Does not apply to already sharded tenants: changing initial_split_threshold or
     441              :     /// initial_split_shards is not retroactive for already-sharded tenants.
     442              :     ///
     443              :     /// None or 0 disables initial splits.
     444              :     pub initial_split_threshold: Option<u64>,
     445              : 
     446              :     /// The number of shards to split into when reaching initial_split_threshold. Will
     447              :     /// be clamped to max_split_shards.
     448              :     ///
     449              :     /// 0 or 1 disables initial splits. Has no effect if initial_split_threshold is disabled.
     450              :     pub initial_split_shards: u8,
     451              : 
     452              :     // TODO: make this cfg(feature  = "testing")
     453              :     pub neon_local_repo_dir: Option<PathBuf>,
     454              : 
     455              :     // Maximum acceptable download lag for the secondary location
     456              :     // while draining a node. If the secondary location is lagging
     457              :     // by more than the configured amount, then the secondary is not
     458              :     // upgraded to primary.
     459              :     pub max_secondary_lag_bytes: Option<u64>,
     460              : 
     461              :     pub heartbeat_interval: Duration,
     462              : 
     463              :     pub address_for_peers: Option<Uri>,
     464              : 
     465              :     pub start_as_candidate: bool,
     466              : 
     467              :     pub long_reconcile_threshold: Duration,
     468              : 
     469              :     pub use_https_pageserver_api: bool,
     470              : 
     471              :     pub use_https_safekeeper_api: bool,
     472              : 
     473              :     pub ssl_ca_certs: Vec<Certificate>,
     474              : 
     475              :     pub timelines_onto_safekeepers: bool,
     476              : 
     477              :     pub use_local_compute_notifications: bool,
     478              : 
     479              :     /// Number of safekeepers to choose for a timeline when creating it.
     480              :     /// Safekeepers will be choosen from different availability zones.
     481              :     pub timeline_safekeeper_count: usize,
     482              : 
     483              :     /// PostHog integration config
     484              :     pub posthog_config: Option<PostHogConfig>,
     485              : 
     486              :     /// When set, actively checks and initiates heatmap downloads/uploads.
     487              :     pub kick_secondary_downloads: bool,
     488              : 
     489              :     /// Timeout used for HTTP client of split requests. [`Duration::MAX`] if None.
     490              :     pub shard_split_request_timeout: Duration,
     491              : 
     492              :     // Feature flag: Whether the storage controller should act to rectify pageserver-reported local disk loss.
     493              :     pub handle_ps_local_disk_loss: bool,
     494              : }
     495              : 
     496              : impl From<DatabaseError> for ApiError {
     497            0 :     fn from(err: DatabaseError) -> ApiError {
     498            0 :         match err {
     499            0 :             DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
     500              :             // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
     501              :             DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
     502            0 :                 ApiError::ShuttingDown
     503              :             }
     504            0 :             DatabaseError::Logical(reason) | DatabaseError::Migration(reason) => {
     505            0 :                 ApiError::InternalServerError(anyhow::anyhow!(reason))
     506              :             }
     507            0 :             DatabaseError::Cas(reason) => ApiError::Conflict(reason),
     508              :         }
     509            0 :     }
     510              : }
     511              : 
     512              : enum InitialShardScheduleOutcome {
     513              :     Scheduled(TenantCreateResponseShard),
     514              :     NotScheduled,
     515              :     ShardScheduleError(ScheduleError),
     516              : }
     517              : 
     518              : pub struct Service {
     519              :     inner: Arc<std::sync::RwLock<ServiceState>>,
     520              :     config: Config,
     521              :     persistence: Arc<Persistence>,
     522              : 
     523              :     // HadronTokenGenerator to generate (sign) JWTs during compute deployment and compute-spec generation.
     524              :     #[allow(unused)]
     525              :     token_generator: Option<HadronTokenGenerator>,
     526              : 
     527              :     compute_hook: Arc<ComputeHook>,
     528              :     result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResultRequest>,
     529              : 
     530              :     heartbeater_ps: Heartbeater<Node, PageserverState>,
     531              :     heartbeater_sk: Heartbeater<Safekeeper, SafekeeperState>,
     532              : 
     533              :     // Channel for background cleanup from failed operations that require cleanup, such as shard split
     534              :     abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
     535              : 
     536              :     // Locking on a tenant granularity (covers all shards in the tenant):
     537              :     // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
     538              :     // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
     539              :     tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
     540              : 
     541              :     // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
     542              :     // that transition it to/from Active.
     543              :     node_op_locks: IdLockMap<NodeId, NodeOperations>,
     544              : 
     545              :     // Limit how many Reconcilers we will spawn concurrently for normal-priority tasks such as background reconciliations
     546              :     // and reconciliation on startup.
     547              :     reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     548              : 
     549              :     // Limit how many Reconcilers we will spawn concurrently for high-priority tasks such as tenant/timeline CRUD, which
     550              :     // a human user might be waiting for.
     551              :     priority_reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     552              : 
     553              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     554              :     /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
     555              :     ///
     556              :     /// Note that this state logically lives inside ServiceState, but carrying Sender here makes the code simpler
     557              :     /// by avoiding needing a &mut ref to something inside the ServiceState.  This could be optimized to
     558              :     /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
     559              :     delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
     560              : 
     561              :     // Process shutdown will fire this token
     562              :     cancel: CancellationToken,
     563              : 
     564              :     // Child token of [`Service::cancel`] used by reconcilers
     565              :     reconcilers_cancel: CancellationToken,
     566              : 
     567              :     // Background tasks will hold this gate
     568              :     gate: Gate,
     569              : 
     570              :     // Reconcilers background tasks will hold this gate
     571              :     reconcilers_gate: Gate,
     572              : 
     573              :     /// This waits for initial reconciliation with pageservers to complete.  Until this barrier
     574              :     /// passes, it isn't safe to do any actions that mutate tenants.
     575              :     pub(crate) startup_complete: Barrier,
     576              : 
     577              :     /// HTTP client with proper CA certs.
     578              :     http_client: reqwest::Client,
     579              : 
     580              :     /// Handle for the step down background task if one was ever requested
     581              :     step_down_barrier: OnceLock<tokio::sync::watch::Receiver<Option<GlobalObservedState>>>,
     582              : }
     583              : 
     584              : impl From<ReconcileWaitError> for ApiError {
     585            0 :     fn from(value: ReconcileWaitError) -> Self {
     586            0 :         match value {
     587            0 :             ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
     588            0 :             e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
     589            0 :             e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
     590              :         }
     591            0 :     }
     592              : }
     593              : 
     594              : impl From<OperationError> for ApiError {
     595            0 :     fn from(value: OperationError) -> Self {
     596            0 :         match value {
     597            0 :             OperationError::NodeStateChanged(err)
     598            0 :             | OperationError::FinalizeError(err)
     599            0 :             | OperationError::ImpossibleConstraint(err) => {
     600            0 :                 ApiError::InternalServerError(anyhow::anyhow!(err))
     601              :             }
     602            0 :             OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()),
     603              :         }
     604            0 :     }
     605              : }
     606              : 
     607              : #[allow(clippy::large_enum_variant)]
     608              : enum TenantCreateOrUpdate {
     609              :     Create(TenantCreateRequest),
     610              :     Update(Vec<ShardUpdate>),
     611              : }
     612              : 
     613              : struct ShardSplitParams {
     614              :     old_shard_count: ShardCount,
     615              :     new_shard_count: ShardCount,
     616              :     new_stripe_size: Option<ShardStripeSize>,
     617              :     targets: Vec<ShardSplitTarget>,
     618              :     policy: PlacementPolicy,
     619              :     config: TenantConfig,
     620              :     shard_ident: ShardIdentity,
     621              :     preferred_az_id: Option<AvailabilityZone>,
     622              : }
     623              : 
     624              : // When preparing for a shard split, we may either choose to proceed with the split,
     625              : // or find that the work is already done and return NoOp.
     626              : enum ShardSplitAction {
     627              :     Split(Box<ShardSplitParams>),
     628              :     NoOp(TenantShardSplitResponse),
     629              : }
     630              : 
     631              : // A parent shard which will be split
     632              : struct ShardSplitTarget {
     633              :     parent_id: TenantShardId,
     634              :     node: Node,
     635              :     child_ids: Vec<TenantShardId>,
     636              : }
     637              : 
     638              : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
     639              : /// might not be available.  We therefore use a queue of abort operations processed in the background.
     640              : struct TenantShardSplitAbort {
     641              :     tenant_id: TenantId,
     642              :     /// The target values from the request that failed
     643              :     new_shard_count: ShardCount,
     644              :     new_stripe_size: Option<ShardStripeSize>,
     645              :     /// Until this abort op is complete, no other operations may be done on the tenant
     646              :     _tenant_lock: TracingExclusiveGuard<TenantOperations>,
     647              :     /// The reconciler gate for the duration of the split operation, and any included abort.
     648              :     _gate: GateGuard,
     649              : }
     650              : 
     651              : #[derive(thiserror::Error, Debug)]
     652              : enum TenantShardSplitAbortError {
     653              :     #[error(transparent)]
     654              :     Database(#[from] DatabaseError),
     655              :     #[error(transparent)]
     656              :     Remote(#[from] mgmt_api::Error),
     657              :     #[error("Unavailable")]
     658              :     Unavailable,
     659              : }
     660              : 
     661              : /// Inputs for computing a target shard count for a tenant.
     662              : struct ShardSplitInputs {
     663              :     /// Current shard count.
     664              :     shard_count: ShardCount,
     665              :     /// Total size of largest timeline summed across all shards.
     666              :     max_logical_size: u64,
     667              :     /// Size-based split threshold. Zero if size-based splits are disabled.
     668              :     split_threshold: u64,
     669              :     /// Upper bound on target shards. 0 or 1 disables splits.
     670              :     max_split_shards: u8,
     671              :     /// Initial split threshold. Zero if initial splits are disabled.
     672              :     initial_split_threshold: u64,
     673              :     /// Number of shards for initial splits. 0 or 1 disables initial splits.
     674              :     initial_split_shards: u8,
     675              : }
     676              : 
     677              : struct ShardUpdate {
     678              :     tenant_shard_id: TenantShardId,
     679              :     placement_policy: PlacementPolicy,
     680              :     tenant_config: TenantConfig,
     681              : 
     682              :     /// If this is None, generation is not updated.
     683              :     generation: Option<Generation>,
     684              : 
     685              :     /// If this is None, scheduling policy is not updated.
     686              :     scheduling_policy: Option<ShardSchedulingPolicy>,
     687              : }
     688              : 
     689              : enum StopReconciliationsReason {
     690              :     ShuttingDown,
     691              :     SteppingDown,
     692              : }
     693              : 
     694              : impl std::fmt::Display for StopReconciliationsReason {
     695            0 :     fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result {
     696            0 :         let s = match self {
     697            0 :             Self::ShuttingDown => "Shutting down",
     698            0 :             Self::SteppingDown => "Stepping down",
     699              :         };
     700            0 :         write!(writer, "{s}")
     701            0 :     }
     702              : }
     703              : 
     704              : pub(crate) enum ReconcileResultRequest {
     705              :     ReconcileResult(ReconcileResult),
     706              :     Stop,
     707              : }
     708              : 
     709              : #[derive(Clone)]
     710              : pub(crate) struct MutationLocation {
     711              :     pub(crate) node: Node,
     712              :     pub(crate) generation: Generation,
     713              : }
     714              : 
     715              : #[derive(Clone)]
     716              : pub(crate) struct ShardMutationLocations {
     717              :     pub(crate) latest: MutationLocation,
     718              :     pub(crate) other: Vec<MutationLocation>,
     719              : }
     720              : 
     721              : #[derive(Default, Clone)]
     722              : pub(crate) struct TenantMutationLocations(pub BTreeMap<TenantShardId, ShardMutationLocations>);
     723              : 
     724              : struct ReconcileAllResult {
     725              :     spawned_reconciles: usize,
     726              :     stuck_reconciles: usize,
     727              :     has_delayed_reconciles: bool,
     728              : }
     729              : 
     730              : impl ReconcileAllResult {
     731            0 :     fn new(
     732            0 :         spawned_reconciles: usize,
     733            0 :         stuck_reconciles: usize,
     734            0 :         has_delayed_reconciles: bool,
     735            0 :     ) -> Self {
     736            0 :         assert!(
     737            0 :             spawned_reconciles >= stuck_reconciles,
     738            0 :             "It is impossible to have less spawned reconciles than stuck reconciles"
     739              :         );
     740            0 :         Self {
     741            0 :             spawned_reconciles,
     742            0 :             stuck_reconciles,
     743            0 :             has_delayed_reconciles,
     744            0 :         }
     745            0 :     }
     746              : 
     747              :     /// We can run optimizations only if we don't have any delayed reconciles and
     748              :     /// all spawned reconciles are also stuck reconciles.
     749            0 :     fn can_run_optimizations(&self) -> bool {
     750            0 :         !self.has_delayed_reconciles && self.spawned_reconciles == self.stuck_reconciles
     751            0 :     }
     752              : }
     753              : 
     754              : enum TenantIdOrShardId {
     755              :     TenantId(TenantId),
     756              :     TenantShardId(TenantShardId),
     757              : }
     758              : 
     759              : impl TenantIdOrShardId {
     760            0 :     fn tenant_id(&self) -> TenantId {
     761            0 :         match self {
     762            0 :             TenantIdOrShardId::TenantId(tenant_id) => *tenant_id,
     763            0 :             TenantIdOrShardId::TenantShardId(tenant_shard_id) => tenant_shard_id.tenant_id,
     764              :         }
     765            0 :     }
     766              : 
     767            0 :     fn matches(&self, tenant_shard_id: &TenantShardId) -> bool {
     768            0 :         match self {
     769            0 :             TenantIdOrShardId::TenantId(tenant_id) => tenant_shard_id.tenant_id == *tenant_id,
     770            0 :             TenantIdOrShardId::TenantShardId(this_tenant_shard_id) => {
     771            0 :                 this_tenant_shard_id == tenant_shard_id
     772              :             }
     773              :         }
     774            0 :     }
     775              : }
     776              : 
     777              : impl Service {
     778            0 :     pub fn get_config(&self) -> &Config {
     779            0 :         &self.config
     780            0 :     }
     781              : 
     782            0 :     pub fn get_http_client(&self) -> &reqwest::Client {
     783            0 :         &self.http_client
     784            0 :     }
     785              : 
     786              :     /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
     787              :     /// view of the world, and determine which pageservers are responsive.
     788              :     #[instrument(skip_all)]
     789              :     async fn startup_reconcile(
     790              :         self: &Arc<Service>,
     791              :         current_leader: Option<ControllerPersistence>,
     792              :         leader_step_down_state: Option<GlobalObservedState>,
     793              :         bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
     794              :             Result<(), (TenantShardId, NotifyError)>,
     795              :         >,
     796              :     ) {
     797              :         // Startup reconciliation does I/O to other services: whether they
     798              :         // are responsive or not, we should aim to finish within our deadline, because:
     799              :         // - If we don't, a k8s readiness hook watching /ready will kill us.
     800              :         // - While we're waiting for startup reconciliation, we are not fully
     801              :         //   available for end user operations like creating/deleting tenants and timelines.
     802              :         //
     803              :         // We set multiple deadlines to break up the time available between the phases of work: this is
     804              :         // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
     805              :         let start_at = Instant::now();
     806              :         let node_scan_deadline = start_at
     807              :             .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
     808              :             .expect("Reconcile timeout is a modest constant");
     809              : 
     810              :         let observed = if let Some(state) = leader_step_down_state {
     811              :             tracing::info!(
     812              :                 "Using observed state received from leader at {}",
     813              :                 current_leader.as_ref().unwrap().address
     814              :             );
     815              : 
     816              :             state
     817              :         } else {
     818              :             self.build_global_observed_state(node_scan_deadline).await
     819              :         };
     820              : 
     821              :         // Accumulate a list of any tenant locations that ought to be detached
     822              :         let mut cleanup = Vec::new();
     823              : 
     824              :         // Send initial heartbeat requests to all nodes loaded from the database
     825              :         let all_nodes = {
     826              :             let locked = self.inner.read().unwrap();
     827              :             locked.nodes.clone()
     828              :         };
     829              :         let (mut nodes_online, mut sks_online) =
     830              :             self.initial_heartbeat_round(all_nodes.keys()).await;
     831              : 
     832              :         // List of tenants for which we will attempt to notify compute of their location at startup
     833              :         let mut compute_notifications = Vec::new();
     834              : 
     835              :         // Populate intent and observed states for all tenants, based on reported state on pageservers
     836              :         tracing::info!("Populating tenant shards' states from initial pageserver scan...");
     837              :         let shard_count = {
     838              :             let mut locked = self.inner.write().unwrap();
     839              :             let (nodes, safekeepers, tenants, scheduler) = locked.parts_mut_sk();
     840              : 
     841              :             // Mark nodes online if they responded to us: nodes are offline by default after a restart.
     842              :             let mut new_nodes = (**nodes).clone();
     843              :             for (node_id, node) in new_nodes.iter_mut() {
     844              :                 if let Some(utilization) = nodes_online.remove(node_id) {
     845              :                     node.set_availability(NodeAvailability::Active(utilization));
     846              :                     scheduler.node_upsert(node);
     847              :                 }
     848              :             }
     849              :             *nodes = Arc::new(new_nodes);
     850              : 
     851              :             let mut new_sks = (**safekeepers).clone();
     852              :             for (node_id, node) in new_sks.iter_mut() {
     853              :                 if let Some((utilization, last_seen_at)) = sks_online.remove(node_id) {
     854              :                     node.set_availability(SafekeeperState::Available {
     855              :                         utilization,
     856              :                         last_seen_at,
     857              :                     });
     858              :                 }
     859              :             }
     860              :             *safekeepers = Arc::new(new_sks);
     861              : 
     862              :             for (tenant_shard_id, observed_state) in observed.0 {
     863              :                 let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
     864              :                     for node_id in observed_state.locations.keys() {
     865              :                         cleanup.push((tenant_shard_id, *node_id));
     866              :                     }
     867              : 
     868              :                     continue;
     869              :                 };
     870              : 
     871              :                 tenant_shard.observed = observed_state;
     872              :             }
     873              : 
     874              :             // Populate each tenant's intent state
     875              :             let mut schedule_context = ScheduleContext::default();
     876              :             for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
     877              :                 if tenant_shard_id.shard_number == ShardNumber(0) {
     878              :                     // Reset scheduling context each time we advance to the next Tenant
     879              :                     schedule_context = ScheduleContext::default();
     880              :                 }
     881              : 
     882              :                 tenant_shard.intent_from_observed(scheduler);
     883              :                 if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
     884              :                     // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
     885              :                     // not enough pageservers are available.  The tenant may well still be available
     886              :                     // to clients.
     887              :                     tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
     888              :                 } else {
     889              :                     // If we're both intending and observed to be attached at a particular node, we will
     890              :                     // emit a compute notification for this. In the case where our observed state does not
     891              :                     // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
     892              :                     if let Some(attached_at) = tenant_shard.stably_attached() {
     893              :                         compute_notifications.push(compute_hook::ShardUpdate {
     894              :                             tenant_shard_id: *tenant_shard_id,
     895              :                             node_id: attached_at,
     896              :                             stripe_size: tenant_shard.shard.stripe_size,
     897              :                             preferred_az: tenant_shard
     898              :                                 .preferred_az()
     899            0 :                                 .map(|az| Cow::Owned(az.clone())),
     900              :                         });
     901              :                     }
     902              :                 }
     903              :             }
     904              : 
     905              :             tenants.len()
     906              :         };
     907              : 
     908              :         // Before making any obeservable changes to the cluster, persist self
     909              :         // as leader in database and memory.
     910              :         let leadership = Leadership::new(
     911              :             self.persistence.clone(),
     912              :             self.config.clone(),
     913              :             self.cancel.child_token(),
     914              :         );
     915              : 
     916              :         if let Err(e) = leadership.become_leader(current_leader).await {
     917              :             tracing::error!("Failed to persist self as leader: {e}. Aborting start-up ...");
     918              :             std::process::exit(1);
     919              :         }
     920              : 
     921              :         let safekeepers = self.inner.read().unwrap().safekeepers.clone();
     922              :         let sk_schedule_requests =
     923              :             match safekeeper_reconciler::load_schedule_requests(self, &safekeepers).await {
     924              :                 Ok(v) => v,
     925              :                 Err(e) => {
     926              :                     tracing::warn!(
     927              :                         "Failed to load safekeeper pending ops at startup: {e}." // Don't abort for now: " Aborting start-up..."
     928              :                     );
     929              :                     // std::process::exit(1);
     930              :                     Vec::new()
     931              :                 }
     932              :             };
     933              : 
     934              :         {
     935              :             let mut locked = self.inner.write().unwrap();
     936              :             locked.become_leader();
     937              : 
     938              :             for (sk_id, _sk) in locked.safekeepers.clone().iter() {
     939              :                 locked.safekeeper_reconcilers.start_reconciler(*sk_id, self);
     940              :             }
     941              : 
     942              :             locked
     943              :                 .safekeeper_reconcilers
     944              :                 .schedule_request_vec(sk_schedule_requests);
     945              :         }
     946              : 
     947              :         // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
     948              :         // generation_pageserver in the database.
     949              : 
     950              :         // Emit compute hook notifications for all tenants which are already stably attached.  Other tenants
     951              :         // will emit compute hook notifications when they reconcile.
     952              :         //
     953              :         // Ordering: our calls to notify_attach_background synchronously establish a relative order for these notifications vs. any later
     954              :         // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
     955              :         // calls will be correctly ordered wrt these.
     956              :         //
     957              :         // Concurrency: we call notify_attach_background for all tenants, which will create O(N) tokio tasks, but almost all of them
     958              :         // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
     959              :         // unit and start doing I/O.
     960              :         tracing::info!(
     961              :             "Sending {} compute notifications",
     962              :             compute_notifications.len()
     963              :         );
     964              :         self.compute_hook.notify_attach_background(
     965              :             compute_notifications,
     966              :             bg_compute_notify_result_tx.clone(),
     967              :             &self.cancel,
     968              :         );
     969              : 
     970              :         // Finally, now that the service is up and running, launch reconcile operations for any tenants
     971              :         // which require it: under normal circumstances this should only include tenants that were in some
     972              :         // transient state before we restarted, or any tenants whose compute hooks failed above.
     973              :         tracing::info!("Checking for shards in need of reconciliation...");
     974              :         let reconcile_all_result = self.reconcile_all();
     975              :         // We will not wait for these reconciliation tasks to run here: we're now done with startup and
     976              :         // normal operations may proceed.
     977              : 
     978              :         // Clean up any tenants that were found on pageservers but are not known to us.  Do this in the
     979              :         // background because it does not need to complete in order to proceed with other work.
     980              :         if !cleanup.is_empty() {
     981              :             tracing::info!("Cleaning up {} locations in the background", cleanup.len());
     982              :             tokio::task::spawn({
     983              :                 let cleanup_self = self.clone();
     984            0 :                 async move { cleanup_self.cleanup_locations(cleanup).await }
     985              :             });
     986              :         }
     987              : 
     988              :         // Reconcile the timeline imports:
     989              :         // 1. Mark each tenant shard of tenants with an importing timeline as importing.
     990              :         // 2. Finalize the completed imports in the background. This handles the case where
     991              :         //    the previous storage controller instance shut down whilst finalizing imports.
     992              :         let imports = self.persistence.list_timeline_imports().await;
     993              :         match imports {
     994              :             Ok(mut imports) => {
     995              :                 {
     996              :                     let mut locked = self.inner.write().unwrap();
     997              :                     for import in &imports {
     998              :                         locked
     999              :                             .tenants
    1000              :                             .range_mut(TenantShardId::tenant_range(import.tenant_id))
    1001            0 :                             .for_each(|(_id, shard)| {
    1002            0 :                                 shard.importing = TimelineImportState::Importing
    1003            0 :                             });
    1004              :                     }
    1005              :                 }
    1006              : 
    1007            0 :                 imports.retain(|import| import.is_complete());
    1008              :                 tokio::task::spawn({
    1009              :                     let finalize_imports_self = self.clone();
    1010            0 :                     async move {
    1011            0 :                         finalize_imports_self
    1012            0 :                             .finalize_timeline_imports(imports)
    1013            0 :                             .await
    1014            0 :                     }
    1015              :                 });
    1016              :             }
    1017              :             Err(err) => {
    1018              :                 tracing::error!("Could not retrieve completed imports from database: {err}");
    1019              :             }
    1020              :         }
    1021              : 
    1022              :         let spawned_reconciles = reconcile_all_result.spawned_reconciles;
    1023              :         tracing::info!(
    1024              :             "Startup complete, spawned {spawned_reconciles} reconciliation tasks ({shard_count} shards total)"
    1025              :         );
    1026              :     }
    1027              : 
    1028            0 :     async fn initial_heartbeat_round<'a>(
    1029            0 :         &self,
    1030            0 :         node_ids: impl Iterator<Item = &'a NodeId>,
    1031            0 :     ) -> (
    1032            0 :         HashMap<NodeId, PageserverUtilization>,
    1033            0 :         HashMap<NodeId, (SafekeeperUtilization, Instant)>,
    1034            0 :     ) {
    1035            0 :         assert!(!self.startup_complete.is_ready());
    1036              : 
    1037            0 :         let all_nodes = {
    1038            0 :             let locked = self.inner.read().unwrap();
    1039            0 :             locked.nodes.clone()
    1040              :         };
    1041              : 
    1042            0 :         let mut nodes_to_heartbeat = HashMap::new();
    1043            0 :         for node_id in node_ids {
    1044            0 :             match all_nodes.get(node_id) {
    1045            0 :                 Some(node) => {
    1046            0 :                     nodes_to_heartbeat.insert(*node_id, node.clone());
    1047            0 :                 }
    1048              :                 None => {
    1049            0 :                     tracing::warn!("Node {node_id} was removed during start-up");
    1050              :                 }
    1051              :             }
    1052              :         }
    1053              : 
    1054            0 :         let all_sks = {
    1055            0 :             let locked = self.inner.read().unwrap();
    1056            0 :             locked.safekeepers.clone()
    1057              :         };
    1058              : 
    1059            0 :         tracing::info!("Sending initial heartbeats...");
    1060            0 :         let (res_ps, res_sk) = tokio::join!(
    1061            0 :             self.heartbeater_ps.heartbeat(Arc::new(nodes_to_heartbeat)),
    1062            0 :             self.heartbeater_sk.heartbeat(all_sks)
    1063              :         );
    1064              : 
    1065            0 :         let mut online_nodes = HashMap::new();
    1066            0 :         if let Ok(deltas) = res_ps {
    1067            0 :             for (node_id, status) in deltas.0 {
    1068            0 :                 match status {
    1069            0 :                     PageserverState::Available { utilization, .. } => {
    1070            0 :                         online_nodes.insert(node_id, utilization);
    1071            0 :                     }
    1072            0 :                     PageserverState::Offline => {}
    1073              :                     PageserverState::WarmingUp { .. } => {
    1074            0 :                         unreachable!("Nodes are never marked warming-up during startup reconcile")
    1075              :                     }
    1076              :                 }
    1077              :             }
    1078            0 :         }
    1079              : 
    1080            0 :         let mut online_sks = HashMap::new();
    1081            0 :         if let Ok(deltas) = res_sk {
    1082            0 :             for (node_id, status) in deltas.0 {
    1083            0 :                 match status {
    1084              :                     SafekeeperState::Available {
    1085            0 :                         utilization,
    1086            0 :                         last_seen_at,
    1087            0 :                     } => {
    1088            0 :                         online_sks.insert(node_id, (utilization, last_seen_at));
    1089            0 :                     }
    1090            0 :                     SafekeeperState::Offline => {}
    1091              :                 }
    1092              :             }
    1093            0 :         }
    1094              : 
    1095            0 :         (online_nodes, online_sks)
    1096            0 :     }
    1097              : 
    1098              :     /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
    1099              :     ///
    1100              :     /// The result includes only nodes which responded within the deadline
    1101            0 :     async fn scan_node_locations(
    1102            0 :         &self,
    1103            0 :         deadline: Instant,
    1104            0 :     ) -> HashMap<NodeId, LocationConfigListResponse> {
    1105            0 :         let nodes = {
    1106            0 :             let locked = self.inner.read().unwrap();
    1107            0 :             locked.nodes.clone()
    1108              :         };
    1109              : 
    1110            0 :         let mut node_results = HashMap::new();
    1111              : 
    1112            0 :         let mut node_list_futs = FuturesUnordered::new();
    1113              : 
    1114            0 :         tracing::info!("Scanning shards on {} nodes...", nodes.len());
    1115            0 :         for node in nodes.values() {
    1116            0 :             node_list_futs.push({
    1117            0 :                 async move {
    1118            0 :                     tracing::info!("Scanning shards on node {node}...");
    1119            0 :                     let timeout = Duration::from_secs(5);
    1120            0 :                     let response = node
    1121            0 :                         .with_client_retries(
    1122            0 :                             |client| async move { client.list_location_config().await },
    1123            0 :                             &self.http_client,
    1124            0 :                             &self.config.pageserver_jwt_token,
    1125              :                             1,
    1126              :                             5,
    1127            0 :                             timeout,
    1128            0 :                             &self.cancel,
    1129              :                         )
    1130            0 :                         .await;
    1131            0 :                     (node.get_id(), response)
    1132            0 :                 }
    1133              :             });
    1134              :         }
    1135              : 
    1136              :         loop {
    1137            0 :             let (node_id, result) = tokio::select! {
    1138            0 :                 next = node_list_futs.next() => {
    1139            0 :                     match next {
    1140            0 :                         Some(result) => result,
    1141              :                         None =>{
    1142              :                             // We got results for all our nodes
    1143            0 :                             break;
    1144              :                         }
    1145              : 
    1146              :                     }
    1147              :                 },
    1148            0 :                 _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
    1149              :                     // Give up waiting for anyone who hasn't responded: we will yield the results that we have
    1150            0 :                     tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
    1151            0 :                     break;
    1152              :                 }
    1153              :             };
    1154              : 
    1155            0 :             let Some(list_response) = result else {
    1156            0 :                 tracing::info!("Shutdown during startup_reconcile");
    1157            0 :                 break;
    1158              :             };
    1159              : 
    1160            0 :             match list_response {
    1161            0 :                 Err(e) => {
    1162            0 :                     tracing::warn!("Could not scan node {} ({e})", node_id);
    1163              :                 }
    1164            0 :                 Ok(listing) => {
    1165            0 :                     node_results.insert(node_id, listing);
    1166            0 :                 }
    1167              :             }
    1168              :         }
    1169              : 
    1170            0 :         node_results
    1171            0 :     }
    1172              : 
    1173            0 :     async fn build_global_observed_state(&self, deadline: Instant) -> GlobalObservedState {
    1174            0 :         let node_listings = self.scan_node_locations(deadline).await;
    1175            0 :         let mut observed = GlobalObservedState::default();
    1176              : 
    1177            0 :         for (node_id, location_confs) in node_listings {
    1178            0 :             tracing::info!(
    1179            0 :                 "Received {} shard statuses from pageserver {}",
    1180            0 :                 location_confs.tenant_shards.len(),
    1181              :                 node_id
    1182              :             );
    1183              : 
    1184            0 :             for (tid, location_conf) in location_confs.tenant_shards {
    1185            0 :                 let entry = observed.0.entry(tid).or_default();
    1186            0 :                 entry.locations.insert(
    1187            0 :                     node_id,
    1188            0 :                     ObservedStateLocation {
    1189            0 :                         conf: location_conf,
    1190            0 :                     },
    1191            0 :                 );
    1192            0 :             }
    1193              :         }
    1194              : 
    1195            0 :         observed
    1196            0 :     }
    1197              : 
    1198              :     /// Used during [`Self::startup_reconcile`] and shard splits: detach a list of unknown-to-us
    1199              :     /// tenants from pageservers.
    1200              :     ///
    1201              :     /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
    1202              :     /// tenants, then it is probably something incompletely deleted before: we will not fight with any
    1203              :     /// other task trying to attach it.
    1204              :     #[instrument(skip_all)]
    1205              :     async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
    1206              :         let nodes = self.inner.read().unwrap().nodes.clone();
    1207              : 
    1208              :         for (tenant_shard_id, node_id) in cleanup {
    1209              :             // A node reported a tenant_shard_id which is unknown to us: detach it.
    1210              :             let Some(node) = nodes.get(&node_id) else {
    1211              :                 // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
    1212              :                 // a location to clean up on a node that has since been removed.
    1213              :                 tracing::info!(
    1214              :                     "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
    1215              :                 );
    1216              :                 continue;
    1217              :             };
    1218              : 
    1219              :             if self.cancel.is_cancelled() {
    1220              :                 break;
    1221              :             }
    1222              : 
    1223              :             let client = PageserverClient::new(
    1224              :                 node.get_id(),
    1225              :                 self.http_client.clone(),
    1226              :                 node.base_url(),
    1227              :                 self.config.pageserver_jwt_token.as_deref(),
    1228              :             );
    1229              :             match client
    1230              :                 .location_config(
    1231              :                     tenant_shard_id,
    1232              :                     LocationConfig {
    1233              :                         mode: LocationConfigMode::Detached,
    1234              :                         generation: None,
    1235              :                         secondary_conf: None,
    1236              :                         shard_number: tenant_shard_id.shard_number.0,
    1237              :                         shard_count: tenant_shard_id.shard_count.literal(),
    1238              :                         shard_stripe_size: 0,
    1239              :                         tenant_conf: models::TenantConfig::default(),
    1240              :                     },
    1241              :                     None,
    1242              :                     false,
    1243              :                 )
    1244              :                 .await
    1245              :             {
    1246              :                 Ok(()) => {
    1247              :                     tracing::info!(
    1248              :                         "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
    1249              :                     );
    1250              :                 }
    1251              :                 Err(e) => {
    1252              :                     // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
    1253              :                     // break anything.
    1254              :                     tracing::error!(
    1255              :                         "Failed to detach unknown shard {tenant_shard_id} on pageserver {node_id}: {e}"
    1256              :                     );
    1257              :                 }
    1258              :             }
    1259              :         }
    1260              :     }
    1261              : 
    1262              :     /// Long running background task that periodically wakes up and looks for shards that need
    1263              :     /// reconciliation.  Reconciliation is fallible, so any reconciliation tasks that fail during
    1264              :     /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
    1265              :     /// for those retries.
    1266              :     #[instrument(skip_all)]
    1267              :     async fn background_reconcile(self: &Arc<Self>) {
    1268              :         self.startup_complete.clone().wait().await;
    1269              : 
    1270              :         const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
    1271              :         let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
    1272              :         while !self.reconcilers_cancel.is_cancelled() {
    1273              :             tokio::select! {
    1274              :               _ = interval.tick() => {
    1275              :                 let reconcile_all_result = self.reconcile_all();
    1276              :                 if reconcile_all_result.can_run_optimizations() {
    1277              :                     // Run optimizer only when we didn't find any other work to do
    1278              :                     self.optimize_all().await;
    1279              :                 }
    1280              :                 // Always attempt autosplits. Sharding is crucial for bulk ingest performance, so we
    1281              :                 // must be responsive when new projects begin ingesting and reach the threshold.
    1282              :                 self.autosplit_tenants().await;
    1283              :               },
    1284              :               _ = self.reconcilers_cancel.cancelled() => return
    1285              :             }
    1286              :         }
    1287              :     }
    1288              :     /// Heartbeat all storage nodes once in a while.
    1289              :     #[instrument(skip_all)]
    1290              :     async fn spawn_heartbeat_driver(self: &Arc<Self>) {
    1291              :         self.startup_complete.clone().wait().await;
    1292              : 
    1293              :         let mut interval = tokio::time::interval(self.config.heartbeat_interval);
    1294              :         while !self.cancel.is_cancelled() {
    1295              :             tokio::select! {
    1296              :               _ = interval.tick() => { }
    1297              :               _ = self.cancel.cancelled() => return
    1298              :             };
    1299              : 
    1300              :             let nodes = {
    1301              :                 let locked = self.inner.read().unwrap();
    1302              :                 locked.nodes.clone()
    1303              :             };
    1304              : 
    1305              :             let safekeepers = {
    1306              :                 let locked = self.inner.read().unwrap();
    1307              :                 locked.safekeepers.clone()
    1308              :             };
    1309              : 
    1310              :             let (res_ps, res_sk) = tokio::join!(
    1311              :                 self.heartbeater_ps.heartbeat(nodes),
    1312              :                 self.heartbeater_sk.heartbeat(safekeepers)
    1313              :             );
    1314              : 
    1315              :             if let Ok(deltas) = res_ps {
    1316              :                 let mut to_handle = Vec::default();
    1317              : 
    1318              :                 for (node_id, state) in deltas.0 {
    1319              :                     let new_availability = match state {
    1320              :                         PageserverState::Available { utilization, .. } => {
    1321              :                             NodeAvailability::Active(utilization)
    1322              :                         }
    1323              :                         PageserverState::WarmingUp { started_at } => {
    1324              :                             NodeAvailability::WarmingUp(started_at)
    1325              :                         }
    1326              :                         PageserverState::Offline => {
    1327              :                             // The node might have been placed in the WarmingUp state
    1328              :                             // while the heartbeat round was on-going. Hence, filter out
    1329              :                             // offline transitions for WarmingUp nodes that are still within
    1330              :                             // their grace period.
    1331              :                             if let Ok(NodeAvailability::WarmingUp(started_at)) = self
    1332              :                                 .get_node(node_id)
    1333              :                                 .await
    1334              :                                 .as_ref()
    1335            0 :                                 .map(|n| n.get_availability())
    1336              :                             {
    1337              :                                 let now = Instant::now();
    1338              :                                 if now - *started_at >= self.config.max_warming_up_interval {
    1339              :                                     NodeAvailability::Offline
    1340              :                                 } else {
    1341              :                                     NodeAvailability::WarmingUp(*started_at)
    1342              :                                 }
    1343              :                             } else {
    1344              :                                 NodeAvailability::Offline
    1345              :                             }
    1346              :                         }
    1347              :                     };
    1348              : 
    1349              :                     let node_lock = trace_exclusive_lock(
    1350              :                         &self.node_op_locks,
    1351              :                         node_id,
    1352              :                         NodeOperations::Configure,
    1353              :                     )
    1354              :                     .await;
    1355              : 
    1356              :                     pausable_failpoint!("heartbeat-pre-node-state-configure");
    1357              : 
    1358              :                     // This is the code path for geniune availability transitions (i.e node
    1359              :                     // goes unavailable and/or comes back online).
    1360              :                     let res = self
    1361              :                         .node_state_configure(node_id, Some(new_availability), None, &node_lock)
    1362              :                         .await;
    1363              : 
    1364              :                     match res {
    1365              :                         Ok(transition) => {
    1366              :                             // Keep hold of the lock until the availability transitions
    1367              :                             // have been handled in
    1368              :                             // [`Service::handle_node_availability_transitions`] in order avoid
    1369              :                             // racing with [`Service::external_node_configure`].
    1370              :                             to_handle.push((node_id, node_lock, transition));
    1371              :                         }
    1372              :                         Err(ApiError::NotFound(_)) => {
    1373              :                             // This should be rare, but legitimate since the heartbeats are done
    1374              :                             // on a snapshot of the nodes.
    1375              :                             tracing::info!("Node {} was not found after heartbeat round", node_id);
    1376              :                         }
    1377              :                         Err(ApiError::ShuttingDown) => {
    1378              :                             // No-op: we're shutting down, no need to try and update any nodes' statuses
    1379              :                         }
    1380              :                         Err(err) => {
    1381              :                             // Transition to active involves reconciling: if a node responds to a heartbeat then
    1382              :                             // becomes unavailable again, we may get an error here.
    1383              :                             tracing::error!(
    1384              :                                 "Failed to update node state {} after heartbeat round: {}",
    1385              :                                 node_id,
    1386              :                                 err
    1387              :                             );
    1388              :                         }
    1389              :                     }
    1390              :                 }
    1391              : 
    1392              :                 // We collected all the transitions above and now we handle them.
    1393              :                 let res = self.handle_node_availability_transitions(to_handle).await;
    1394              :                 if let Err(errs) = res {
    1395              :                     for (node_id, err) in errs {
    1396              :                         match err {
    1397              :                             ApiError::NotFound(_) => {
    1398              :                                 // This should be rare, but legitimate since the heartbeats are done
    1399              :                                 // on a snapshot of the nodes.
    1400              :                                 tracing::info!(
    1401              :                                     "Node {} was not found after heartbeat round",
    1402              :                                     node_id
    1403              :                                 );
    1404              :                             }
    1405              :                             err => {
    1406              :                                 tracing::error!(
    1407              :                                     "Failed to handle availability transition for {} after heartbeat round: {}",
    1408              :                                     node_id,
    1409              :                                     err
    1410              :                                 );
    1411              :                             }
    1412              :                         }
    1413              :                     }
    1414              :                 }
    1415              :             }
    1416              :             if let Ok(deltas) = res_sk {
    1417              :                 let mut to_activate = Vec::new();
    1418              :                 {
    1419              :                     let mut locked = self.inner.write().unwrap();
    1420              :                     let mut safekeepers = (*locked.safekeepers).clone();
    1421              : 
    1422              :                     for (id, state) in deltas.0 {
    1423              :                         let Some(sk) = safekeepers.get_mut(&id) else {
    1424              :                             tracing::info!(
    1425              :                                 "Couldn't update safekeeper safekeeper state for id {id} from heartbeat={state:?}"
    1426              :                             );
    1427              :                             continue;
    1428              :                         };
    1429              :                         if sk.scheduling_policy() == SkSchedulingPolicy::Activating
    1430              :                             && let SafekeeperState::Available { .. } = state
    1431              :                         {
    1432              :                             to_activate.push(id);
    1433              :                         }
    1434              :                         sk.set_availability(state);
    1435              :                     }
    1436              :                     locked.safekeepers = Arc::new(safekeepers);
    1437              :                 }
    1438              :                 for sk_id in to_activate {
    1439              :                     // TODO this can race with set_scheduling_policy (can create disjoint DB <-> in-memory state)
    1440              :                     tracing::info!("Activating safekeeper {sk_id}");
    1441              :                     match self.persistence.activate_safekeeper(sk_id.0 as i64).await {
    1442              :                         Ok(Some(())) => {}
    1443              :                         Ok(None) => {
    1444              :                             tracing::info!(
    1445              :                                 "safekeeper {sk_id} has been removed from db or has different scheduling policy than active or activating"
    1446              :                             );
    1447              :                         }
    1448              :                         Err(e) => {
    1449              :                             tracing::warn!("couldn't apply activation of {sk_id} to db: {e}");
    1450              :                             continue;
    1451              :                         }
    1452              :                     }
    1453              :                     if let Err(e) = self
    1454              :                         .set_safekeeper_scheduling_policy_in_mem(sk_id, SkSchedulingPolicy::Active)
    1455              :                         .await
    1456              :                     {
    1457              :                         tracing::info!("couldn't activate safekeeper {sk_id} in memory: {e}");
    1458              :                         continue;
    1459              :                     }
    1460              :                     tracing::info!("Activation of safekeeper {sk_id} done");
    1461              :                 }
    1462              :             }
    1463              :         }
    1464              :     }
    1465              : 
    1466              :     /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
    1467              :     /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
    1468              :     /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
    1469              :     /// will indicate that reconciliation is not needed.
    1470              :     #[instrument(skip_all, fields(
    1471              :         seq=%result.sequence,
    1472              :         tenant_id=%result.tenant_shard_id.tenant_id,
    1473              :         shard_id=%result.tenant_shard_id.shard_slug(),
    1474              :     ))]
    1475              :     fn process_result(&self, result: ReconcileResult) {
    1476              :         let mut locked = self.inner.write().unwrap();
    1477              :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    1478              :         let Some(tenant) = tenants.get_mut(&result.tenant_shard_id) else {
    1479              :             // A reconciliation result might race with removing a tenant: drop results for
    1480              :             // tenants that aren't in our map.
    1481              :             return;
    1482              :         };
    1483              : 
    1484              :         // Usually generation should only be updated via this path, so the max() isn't
    1485              :         // needed, but it is used to handle out-of-band updates via. e.g. test hook.
    1486              :         tenant.generation = std::cmp::max(tenant.generation, result.generation);
    1487              : 
    1488              :         // If the reconciler signals that it failed to notify compute, set this state on
    1489              :         // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
    1490              :         tenant.pending_compute_notification = result.pending_compute_notification;
    1491              : 
    1492              :         // Let the TenantShard know it is idle.
    1493              :         tenant.reconcile_complete(result.sequence);
    1494              : 
    1495              :         // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1496              :         // make to the tenant
    1497            0 :         let deltas = result.observed_deltas.into_iter().flat_map(|delta| {
    1498              :             // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1499              :             // make to the tenant
    1500            0 :             let node = nodes.get(delta.node_id())?;
    1501              : 
    1502            0 :             if node.is_available() {
    1503            0 :                 return Some(delta);
    1504            0 :             }
    1505              : 
    1506              :             // In case a node became unavailable concurrently with the reconcile, observed
    1507              :             // locations on it are now uncertain. By convention, set them to None in order
    1508              :             // for them to get refreshed when the node comes back online.
    1509            0 :             Some(ObservedStateDelta::Upsert(Box::new((
    1510            0 :                 node.get_id(),
    1511            0 :                 ObservedStateLocation { conf: None },
    1512            0 :             ))))
    1513            0 :         });
    1514              : 
    1515              :         match result.result {
    1516              :             Ok(()) => {
    1517              :                 tenant.apply_observed_deltas(deltas);
    1518              :                 tenant.waiter.advance(result.sequence);
    1519              :             }
    1520              :             Err(e) => {
    1521              :                 match e {
    1522              :                     ReconcileError::Cancel => {
    1523              :                         tracing::info!("Reconciler was cancelled");
    1524              :                     }
    1525              :                     ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
    1526              :                         // This might be due to the reconciler getting cancelled, or it might
    1527              :                         // be due to the `Node` being marked offline.
    1528              :                         tracing::info!("Reconciler cancelled during pageserver API call");
    1529              :                     }
    1530              :                     _ => {
    1531              :                         tracing::warn!("Reconcile error: {}", e);
    1532              :                     }
    1533              :                 }
    1534              : 
    1535              :                 // Ordering: populate last_error before advancing error_seq,
    1536              :                 // so that waiters will see the correct error after waiting.
    1537              :                 tenant.set_last_error(result.sequence, e);
    1538              : 
    1539              :                 // If the reconciliation failed, don't clear the observed state for places where we
    1540              :                 // detached. Instead, mark the observed state as uncertain.
    1541            0 :                 let failed_reconcile_deltas = deltas.map(|delta| {
    1542            0 :                     if let ObservedStateDelta::Delete(node_id) = delta {
    1543            0 :                         ObservedStateDelta::Upsert(Box::new((
    1544            0 :                             node_id,
    1545            0 :                             ObservedStateLocation { conf: None },
    1546            0 :                         )))
    1547              :                     } else {
    1548            0 :                         delta
    1549              :                     }
    1550            0 :                 });
    1551              :                 tenant.apply_observed_deltas(failed_reconcile_deltas);
    1552              :             }
    1553              :         }
    1554              : 
    1555              :         tenant.consecutive_reconciles_count = tenant.consecutive_reconciles_count.saturating_add(1);
    1556              : 
    1557              :         // If we just finished detaching all shards for a tenant, it might be time to drop it from memory.
    1558              :         if tenant.policy == PlacementPolicy::Detached {
    1559              :             // We may only drop a tenant from memory while holding the exclusive lock on the tenant ID: this protects us
    1560              :             // from concurrent execution wrt a request handler that might expect the tenant to remain in memory for the
    1561              :             // duration of the request.
    1562              :             let guard = self.tenant_op_locks.try_exclusive(
    1563              :                 tenant.tenant_shard_id.tenant_id,
    1564              :                 TenantOperations::DropDetached,
    1565              :             );
    1566              :             if let Some(guard) = guard {
    1567              :                 self.maybe_drop_tenant(tenant.tenant_shard_id.tenant_id, &mut locked, &guard);
    1568              :             }
    1569              :         }
    1570              : 
    1571              :         // Maybe some other work can proceed now that this job finished.
    1572              :         //
    1573              :         // Only bother with this if we have some semaphore units available in the normal-priority semaphore (these
    1574              :         // reconciles are scheduled at `[ReconcilerPriority::Normal]`).
    1575              :         if self.reconciler_concurrency.available_permits() > 0 {
    1576              :             while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
    1577              :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    1578              :                 if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
    1579              :                     shard.delayed_reconcile = false;
    1580              :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    1581              :                 }
    1582              : 
    1583              :                 if self.reconciler_concurrency.available_permits() == 0 {
    1584              :                     break;
    1585              :                 }
    1586              :             }
    1587              :         }
    1588              :     }
    1589              : 
    1590            0 :     async fn process_results(
    1591            0 :         &self,
    1592            0 :         mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResultRequest>,
    1593            0 :         mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
    1594            0 :             Result<(), (TenantShardId, NotifyError)>,
    1595            0 :         >,
    1596            0 :     ) {
    1597              :         loop {
    1598              :             // Wait for the next result, or for cancellation
    1599            0 :             tokio::select! {
    1600            0 :                 r = result_rx.recv() => {
    1601            0 :                     match r {
    1602            0 :                         Some(ReconcileResultRequest::ReconcileResult(result)) => {self.process_result(result);},
    1603            0 :                         None | Some(ReconcileResultRequest::Stop) => {break;}
    1604              :                     }
    1605              :                 }
    1606            0 :                 _ = async{
    1607            0 :                     match bg_compute_hook_result_rx.recv().await {
    1608            0 :                         Some(result) => {
    1609            0 :                             if let Err((tenant_shard_id, notify_error)) = result {
    1610            0 :                                 tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
    1611            0 :                                 let mut locked = self.inner.write().unwrap();
    1612            0 :                                 if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
    1613            0 :                                     shard.pending_compute_notification = true;
    1614            0 :                                 }
    1615              : 
    1616            0 :                             }
    1617              :                         },
    1618              :                         None => {
    1619              :                             // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
    1620            0 :                             self.cancel.cancelled().await;
    1621              :                         }
    1622              :                     }
    1623            0 :                 } => {},
    1624            0 :                 _ = self.cancel.cancelled() => {
    1625            0 :                     break;
    1626              :                 }
    1627              :             };
    1628              :         }
    1629            0 :     }
    1630              : 
    1631            0 :     async fn process_aborts(
    1632            0 :         &self,
    1633            0 :         mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
    1634            0 :     ) {
    1635              :         loop {
    1636              :             // Wait for the next result, or for cancellation
    1637            0 :             let op = tokio::select! {
    1638            0 :                 r = abort_rx.recv() => {
    1639            0 :                     match r {
    1640            0 :                         Some(op) => {op},
    1641            0 :                         None => {break;}
    1642              :                     }
    1643              :                 }
    1644            0 :                 _ = self.cancel.cancelled() => {
    1645            0 :                     break;
    1646              :                 }
    1647              :             };
    1648              : 
    1649              :             // Retry until shutdown: we must keep this request object alive until it is properly
    1650              :             // processed, as it holds a lock guard that prevents other operations trying to do things
    1651              :             // to the tenant while it is in a weird part-split state.
    1652            0 :             while !self.reconcilers_cancel.is_cancelled() {
    1653            0 :                 match self.abort_tenant_shard_split(&op).await {
    1654            0 :                     Ok(_) => break,
    1655            0 :                     Err(e) => {
    1656            0 :                         tracing::warn!(
    1657            0 :                             "Failed to abort shard split on {}, will retry: {e}",
    1658              :                             op.tenant_id
    1659              :                         );
    1660              : 
    1661              :                         // If a node is unavailable, we hope that it has been properly marked Offline
    1662              :                         // when we retry, so that the abort op will succeed.  If the abort op is failing
    1663              :                         // for some other reason, we will keep retrying forever, or until a human notices
    1664              :                         // and does something about it (either fixing a pageserver or restarting the controller).
    1665            0 :                         tokio::time::timeout(
    1666            0 :                             Duration::from_secs(5),
    1667            0 :                             self.reconcilers_cancel.cancelled(),
    1668            0 :                         )
    1669            0 :                         .await
    1670            0 :                         .ok();
    1671              :                     }
    1672              :                 }
    1673              :             }
    1674              :         }
    1675            0 :     }
    1676              : 
    1677            0 :     pub async fn spawn(
    1678            0 :         config: Config,
    1679            0 :         persistence: Arc<Persistence>,
    1680            0 :         token_generator: Option<HadronTokenGenerator>,
    1681            0 :     ) -> anyhow::Result<Arc<Self>> {
    1682            0 :         let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
    1683            0 :         let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
    1684              : 
    1685            0 :         let leadership_cancel = CancellationToken::new();
    1686            0 :         let leadership = Leadership::new(persistence.clone(), config.clone(), leadership_cancel);
    1687            0 :         let (leader, leader_step_down_state) = leadership.step_down_current_leader().await?;
    1688              : 
    1689              :         // Apply the migrations **after** the current leader has stepped down
    1690              :         // (or we've given up waiting for it), but **before** reading from the
    1691              :         // database. The only exception is reading the current leader before
    1692              :         // migrating.
    1693            0 :         persistence.migration_run().await?;
    1694              : 
    1695            0 :         tracing::info!("Loading nodes from database...");
    1696            0 :         let nodes = persistence
    1697            0 :             .list_nodes()
    1698            0 :             .await?
    1699            0 :             .into_iter()
    1700            0 :             .map(|x| Node::from_persistent(x, config.use_https_pageserver_api))
    1701            0 :             .collect::<anyhow::Result<Vec<Node>>>()?;
    1702            0 :         let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
    1703            0 :         tracing::info!("Loaded {} nodes from database.", nodes.len());
    1704            0 :         metrics::METRICS_REGISTRY
    1705            0 :             .metrics_group
    1706            0 :             .storage_controller_pageserver_nodes
    1707            0 :             .set(nodes.len() as i64);
    1708            0 :         metrics::METRICS_REGISTRY
    1709            0 :             .metrics_group
    1710            0 :             .storage_controller_https_pageserver_nodes
    1711            0 :             .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    1712              : 
    1713            0 :         tracing::info!("Loading safekeepers from database...");
    1714            0 :         let safekeepers = persistence
    1715            0 :             .list_safekeepers()
    1716            0 :             .await?
    1717            0 :             .into_iter()
    1718            0 :             .map(|skp| {
    1719            0 :                 Safekeeper::from_persistence(
    1720            0 :                     skp,
    1721            0 :                     CancellationToken::new(),
    1722            0 :                     config.use_https_safekeeper_api,
    1723              :                 )
    1724            0 :             })
    1725            0 :             .collect::<anyhow::Result<Vec<_>>>()?;
    1726            0 :         let safekeepers: HashMap<NodeId, Safekeeper> =
    1727            0 :             safekeepers.into_iter().map(|n| (n.get_id(), n)).collect();
    1728            0 :         let count_policy = |policy| {
    1729            0 :             safekeepers
    1730            0 :                 .iter()
    1731            0 :                 .filter(|sk| sk.1.scheduling_policy() == policy)
    1732            0 :                 .count()
    1733            0 :         };
    1734            0 :         let active_sk_count = count_policy(SkSchedulingPolicy::Active);
    1735            0 :         let activating_sk_count = count_policy(SkSchedulingPolicy::Activating);
    1736            0 :         let pause_sk_count = count_policy(SkSchedulingPolicy::Pause);
    1737            0 :         let decom_sk_count = count_policy(SkSchedulingPolicy::Decomissioned);
    1738            0 :         tracing::info!(
    1739            0 :             "Loaded {} safekeepers from database. Active {active_sk_count}, activating {activating_sk_count}, \
    1740            0 :             paused {pause_sk_count}, decomissioned {decom_sk_count}.",
    1741            0 :             safekeepers.len()
    1742              :         );
    1743            0 :         metrics::METRICS_REGISTRY
    1744            0 :             .metrics_group
    1745            0 :             .storage_controller_safekeeper_nodes
    1746            0 :             .set(safekeepers.len() as i64);
    1747            0 :         metrics::METRICS_REGISTRY
    1748            0 :             .metrics_group
    1749            0 :             .storage_controller_https_safekeeper_nodes
    1750            0 :             .set(safekeepers.values().filter(|s| s.has_https_port()).count() as i64);
    1751              : 
    1752            0 :         tracing::info!("Loading shards from database...");
    1753            0 :         let mut tenant_shard_persistence = persistence.load_active_tenant_shards().await?;
    1754            0 :         tracing::info!(
    1755            0 :             "Loaded {} shards from database.",
    1756            0 :             tenant_shard_persistence.len()
    1757              :         );
    1758              : 
    1759              :         // If any shard splits were in progress, reset the database state to abort them
    1760            0 :         let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
    1761            0 :             HashMap::new();
    1762            0 :         for tsp in &mut tenant_shard_persistence {
    1763            0 :             let shard = tsp.get_shard_identity()?;
    1764            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1765            0 :             let entry = tenant_shard_count_min_max
    1766            0 :                 .entry(tenant_shard_id.tenant_id)
    1767            0 :                 .or_insert_with(|| (shard.count, shard.count));
    1768            0 :             entry.0 = std::cmp::min(entry.0, shard.count);
    1769            0 :             entry.1 = std::cmp::max(entry.1, shard.count);
    1770              :         }
    1771              : 
    1772            0 :         for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
    1773            0 :             if count_min != count_max {
    1774              :                 // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
    1775              :                 // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
    1776              :                 // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
    1777            0 :                 tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
    1778            0 :                 let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
    1779              : 
    1780              :                 // We may never see the Complete status here: if the split was complete, we wouldn't have
    1781              :                 // identified this tenant has having mismatching min/max counts.
    1782            0 :                 assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
    1783              : 
    1784              :                 // Clear the splitting status in-memory, to reflect that we just aborted in the database
    1785            0 :                 tenant_shard_persistence.iter_mut().for_each(|tsp| {
    1786              :                     // Set idle split state on those shards that we will retain.
    1787            0 :                     let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
    1788            0 :                     if tsp_tenant_id == tenant_id
    1789            0 :                         && tsp.get_shard_identity().unwrap().count == count_min
    1790            0 :                     {
    1791            0 :                         tsp.splitting = SplitState::Idle;
    1792            0 :                     } else if tsp_tenant_id == tenant_id {
    1793              :                         // Leave the splitting state on the child shards: this will be used next to
    1794              :                         // drop them.
    1795            0 :                         tracing::info!(
    1796            0 :                             "Shard {tsp_tenant_id} will be dropped after shard split abort",
    1797              :                         );
    1798            0 :                     }
    1799            0 :                 });
    1800              : 
    1801              :                 // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
    1802            0 :                 tenant_shard_persistence.retain(|tsp| {
    1803            0 :                     TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
    1804            0 :                         || tsp.splitting == SplitState::Idle
    1805            0 :                 });
    1806            0 :             }
    1807              :         }
    1808              : 
    1809            0 :         let mut tenants = BTreeMap::new();
    1810              : 
    1811            0 :         let mut scheduler = Scheduler::new(nodes.values());
    1812              : 
    1813              :         #[cfg(feature = "testing")]
    1814              :         {
    1815              :             use pageserver_api::controller_api::AvailabilityZone;
    1816              : 
    1817              :             // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
    1818              :             // tests only store the shards, not the nodes.  The nodes will be loaded shortly
    1819              :             // after when pageservers start up and register.
    1820            0 :             let mut node_ids = HashSet::new();
    1821            0 :             for tsp in &tenant_shard_persistence {
    1822            0 :                 if let Some(node_id) = tsp.generation_pageserver {
    1823            0 :                     node_ids.insert(node_id);
    1824            0 :                 }
    1825              :             }
    1826            0 :             for node_id in node_ids {
    1827            0 :                 tracing::info!("Creating node {} in scheduler for tests", node_id);
    1828            0 :                 let node = Node::new(
    1829            0 :                     NodeId(node_id as u64),
    1830            0 :                     "".to_string(),
    1831              :                     123,
    1832            0 :                     None,
    1833            0 :                     "".to_string(),
    1834              :                     123,
    1835            0 :                     None,
    1836            0 :                     None,
    1837            0 :                     AvailabilityZone("test_az".to_string()),
    1838              :                     false,
    1839              :                 )
    1840            0 :                 .unwrap();
    1841              : 
    1842            0 :                 scheduler.node_upsert(&node);
    1843              :             }
    1844              :         }
    1845            0 :         for tsp in tenant_shard_persistence {
    1846            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1847              : 
    1848              :             // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
    1849              :             // it with what we can infer: the node for which a generation was most recently issued.
    1850            0 :             let mut intent = IntentState::new(
    1851            0 :                 tsp.preferred_az_id
    1852            0 :                     .as_ref()
    1853            0 :                     .map(|az| AvailabilityZone(az.clone())),
    1854              :             );
    1855            0 :             if let Some(generation_pageserver) = tsp.generation_pageserver.map(|n| NodeId(n as u64))
    1856              :             {
    1857            0 :                 if nodes.contains_key(&generation_pageserver) {
    1858            0 :                     intent.set_attached(&mut scheduler, Some(generation_pageserver));
    1859            0 :                 } else {
    1860              :                     // If a node was removed before being completely drained, it is legal for it to leave behind a `generation_pageserver` referring
    1861              :                     // to a non-existent node, because node deletion doesn't block on completing the reconciliations that will issue new generations
    1862              :                     // on different pageservers.
    1863            0 :                     tracing::warn!(
    1864            0 :                         "Tenant shard {tenant_shard_id} references non-existent node {generation_pageserver} in database, will be rescheduled"
    1865              :                     );
    1866              :                 }
    1867            0 :             }
    1868            0 :             let new_tenant = TenantShard::from_persistent(tsp, intent)?;
    1869              : 
    1870            0 :             tenants.insert(tenant_shard_id, new_tenant);
    1871              :         }
    1872              : 
    1873            0 :         let (startup_completion, startup_complete) = utils::completion::channel();
    1874              : 
    1875              :         // This channel is continuously consumed by process_results, so doesn't need to be very large.
    1876            0 :         let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
    1877            0 :             tokio::sync::mpsc::channel(512);
    1878              : 
    1879            0 :         let (delayed_reconcile_tx, delayed_reconcile_rx) =
    1880            0 :             tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
    1881              : 
    1882            0 :         let cancel = CancellationToken::new();
    1883            0 :         let reconcilers_cancel = cancel.child_token();
    1884              : 
    1885            0 :         let mut http_client = reqwest::Client::builder();
    1886              :         // We intentionally disable the connection pool, so every request will create its own TCP connection.
    1887              :         // It's especially important for heartbeaters to notice more network problems.
    1888              :         //
    1889              :         // TODO: It makes sense to use this client only in heartbeaters and create a second one with
    1890              :         // connection pooling for everything else. But reqwest::Client may create a connection without
    1891              :         // ever using it (it uses hyper's Client under the hood):
    1892              :         // https://github.com/hyperium/hyper-util/blob/d51318df3461d40e5f5e5ca163cb3905ac960209/src/client/legacy/client.rs#L415
    1893              :         //
    1894              :         // Because of a bug in hyper0::Connection::graceful_shutdown such connections hang during
    1895              :         // graceful server shutdown: https://github.com/hyperium/hyper/issues/2730
    1896              :         //
    1897              :         // The bug has been fixed in hyper v1, so keep alive may be enabled only after we migrate to hyper1.
    1898            0 :         http_client = http_client.pool_max_idle_per_host(0);
    1899            0 :         for ssl_ca_cert in &config.ssl_ca_certs {
    1900            0 :             http_client = http_client.add_root_certificate(ssl_ca_cert.clone());
    1901            0 :         }
    1902            0 :         let http_client = http_client.build()?;
    1903              : 
    1904            0 :         let heartbeater_ps = Heartbeater::new(
    1905            0 :             http_client.clone(),
    1906            0 :             config.pageserver_jwt_token.clone(),
    1907            0 :             config.max_offline_interval,
    1908            0 :             config.max_warming_up_interval,
    1909            0 :             cancel.clone(),
    1910              :         );
    1911              : 
    1912            0 :         let heartbeater_sk = Heartbeater::new(
    1913            0 :             http_client.clone(),
    1914            0 :             config.safekeeper_jwt_token.clone(),
    1915            0 :             config.max_offline_interval,
    1916            0 :             config.max_warming_up_interval,
    1917            0 :             cancel.clone(),
    1918              :         );
    1919              : 
    1920            0 :         let initial_leadership_status = if config.start_as_candidate {
    1921            0 :             LeadershipStatus::Candidate
    1922              :         } else {
    1923            0 :             LeadershipStatus::Leader
    1924              :         };
    1925              : 
    1926            0 :         let this = Arc::new(Self {
    1927            0 :             inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
    1928            0 :                 nodes,
    1929            0 :                 safekeepers,
    1930            0 :                 tenants,
    1931            0 :                 scheduler,
    1932            0 :                 delayed_reconcile_rx,
    1933            0 :                 initial_leadership_status,
    1934            0 :                 reconcilers_cancel.clone(),
    1935              :             ))),
    1936            0 :             config: config.clone(),
    1937            0 :             persistence,
    1938            0 :             token_generator,
    1939            0 :             compute_hook: Arc::new(ComputeHook::new(config.clone())?),
    1940            0 :             result_tx,
    1941            0 :             heartbeater_ps,
    1942            0 :             heartbeater_sk,
    1943            0 :             reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1944            0 :                 config.reconciler_concurrency,
    1945              :             )),
    1946            0 :             priority_reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1947            0 :                 config.priority_reconciler_concurrency,
    1948              :             )),
    1949            0 :             delayed_reconcile_tx,
    1950            0 :             abort_tx,
    1951            0 :             startup_complete: startup_complete.clone(),
    1952            0 :             cancel,
    1953            0 :             reconcilers_cancel,
    1954            0 :             gate: Gate::default(),
    1955            0 :             reconcilers_gate: Gate::default(),
    1956            0 :             tenant_op_locks: Default::default(),
    1957            0 :             node_op_locks: Default::default(),
    1958            0 :             http_client,
    1959            0 :             step_down_barrier: Default::default(),
    1960              :         });
    1961              : 
    1962            0 :         let result_task_this = this.clone();
    1963            0 :         tokio::task::spawn(async move {
    1964              :             // Block shutdown until we're done (we must respect self.cancel)
    1965            0 :             if let Ok(_gate) = result_task_this.gate.enter() {
    1966            0 :                 result_task_this
    1967            0 :                     .process_results(result_rx, bg_compute_notify_result_rx)
    1968            0 :                     .await
    1969            0 :             }
    1970            0 :         });
    1971              : 
    1972            0 :         tokio::task::spawn({
    1973            0 :             let this = this.clone();
    1974            0 :             async move {
    1975              :                 // Block shutdown until we're done (we must respect self.cancel)
    1976            0 :                 if let Ok(_gate) = this.gate.enter() {
    1977            0 :                     this.process_aborts(abort_rx).await
    1978            0 :                 }
    1979            0 :             }
    1980              :         });
    1981              : 
    1982            0 :         tokio::task::spawn({
    1983            0 :             let this = this.clone();
    1984            0 :             async move {
    1985            0 :                 if let Ok(_gate) = this.gate.enter() {
    1986              :                     loop {
    1987            0 :                         tokio::select! {
    1988            0 :                             _ = this.cancel.cancelled() => {
    1989            0 :                                 break;
    1990              :                             },
    1991            0 :                             _ = tokio::time::sleep(Duration::from_secs(60)) => {}
    1992              :                         };
    1993            0 :                         this.tenant_op_locks.housekeeping();
    1994              :                     }
    1995            0 :                 }
    1996            0 :             }
    1997              :         });
    1998              : 
    1999            0 :         tokio::task::spawn({
    2000            0 :             let this = this.clone();
    2001              :             // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
    2002              :             // is done.
    2003            0 :             let startup_completion = startup_completion.clone();
    2004            0 :             async move {
    2005              :                 // Block shutdown until we're done (we must respect self.cancel)
    2006            0 :                 let Ok(_gate) = this.gate.enter() else {
    2007            0 :                     return;
    2008              :                 };
    2009              : 
    2010            0 :                 this.startup_reconcile(leader, leader_step_down_state, bg_compute_notify_result_tx)
    2011            0 :                     .await;
    2012              : 
    2013            0 :                 drop(startup_completion);
    2014            0 :             }
    2015              :         });
    2016              : 
    2017            0 :         tokio::task::spawn({
    2018            0 :             let this = this.clone();
    2019            0 :             let startup_complete = startup_complete.clone();
    2020            0 :             async move {
    2021            0 :                 startup_complete.wait().await;
    2022            0 :                 this.background_reconcile().await;
    2023            0 :             }
    2024              :         });
    2025              : 
    2026            0 :         tokio::task::spawn({
    2027            0 :             let this = this.clone();
    2028            0 :             let startup_complete = startup_complete.clone();
    2029            0 :             async move {
    2030            0 :                 startup_complete.wait().await;
    2031            0 :                 this.spawn_heartbeat_driver().await;
    2032            0 :             }
    2033              :         });
    2034              : 
    2035              :         // Check that there is enough safekeepers configured that we can create new timelines
    2036            0 :         let test_sk_res_str = match this.safekeepers_for_new_timeline().await {
    2037            0 :             Ok(v) => format!("Ok({v:?})"),
    2038            0 :             Err(v) => format!("Err({v:})"),
    2039              :         };
    2040            0 :         tracing::info!(
    2041              :             timeline_safekeeper_count = config.timeline_safekeeper_count,
    2042              :             timelines_onto_safekeepers = config.timelines_onto_safekeepers,
    2043            0 :             "viability test result (test timeline creation on safekeepers): {test_sk_res_str}",
    2044              :         );
    2045              : 
    2046            0 :         Ok(this)
    2047            0 :     }
    2048              : 
    2049            0 :     pub(crate) async fn attach_hook(
    2050            0 :         &self,
    2051            0 :         attach_req: AttachHookRequest,
    2052            0 :     ) -> anyhow::Result<AttachHookResponse> {
    2053            0 :         let _tenant_lock = trace_exclusive_lock(
    2054            0 :             &self.tenant_op_locks,
    2055            0 :             attach_req.tenant_shard_id.tenant_id,
    2056            0 :             TenantOperations::AttachHook,
    2057            0 :         )
    2058            0 :         .await;
    2059              : 
    2060              :         // This is a test hook.  To enable using it on tenants that were created directly with
    2061              :         // the pageserver API (not via this service), we will auto-create any missing tenant
    2062              :         // shards with default state.
    2063            0 :         let insert = {
    2064            0 :             match self
    2065            0 :                 .maybe_load_tenant(attach_req.tenant_shard_id.tenant_id, &_tenant_lock)
    2066            0 :                 .await
    2067              :             {
    2068            0 :                 Ok(_) => false,
    2069            0 :                 Err(ApiError::NotFound(_)) => true,
    2070            0 :                 Err(e) => return Err(e.into()),
    2071              :             }
    2072              :         };
    2073              : 
    2074            0 :         if insert {
    2075            0 :             let config = attach_req.config.clone().unwrap_or_default();
    2076            0 :             let tsp = TenantShardPersistence {
    2077            0 :                 tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
    2078            0 :                 shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
    2079            0 :                 shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
    2080            0 :                 shard_stripe_size: 0,
    2081            0 :                 generation: attach_req.generation_override.or(Some(0)),
    2082            0 :                 generation_pageserver: None,
    2083            0 :                 placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
    2084            0 :                 config: serde_json::to_string(&config).unwrap(),
    2085            0 :                 splitting: SplitState::default(),
    2086            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    2087            0 :                     .unwrap(),
    2088            0 :                 preferred_az_id: None,
    2089            0 :             };
    2090              : 
    2091            0 :             match self.persistence.insert_tenant_shards(vec![tsp]).await {
    2092            0 :                 Err(e) => match e {
    2093              :                     DatabaseError::Query(diesel::result::Error::DatabaseError(
    2094              :                         DatabaseErrorKind::UniqueViolation,
    2095              :                         _,
    2096              :                     )) => {
    2097            0 :                         tracing::info!(
    2098            0 :                             "Raced with another request to insert tenant {}",
    2099              :                             attach_req.tenant_shard_id
    2100              :                         )
    2101              :                     }
    2102            0 :                     _ => return Err(e.into()),
    2103              :                 },
    2104              :                 Ok(()) => {
    2105            0 :                     tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
    2106              : 
    2107            0 :                     let mut shard = TenantShard::new(
    2108            0 :                         attach_req.tenant_shard_id,
    2109            0 :                         ShardIdentity::unsharded(),
    2110            0 :                         PlacementPolicy::Attached(0),
    2111            0 :                         None,
    2112              :                     );
    2113            0 :                     shard.config = config;
    2114              : 
    2115            0 :                     let mut locked = self.inner.write().unwrap();
    2116            0 :                     locked.tenants.insert(attach_req.tenant_shard_id, shard);
    2117            0 :                     tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
    2118              :                 }
    2119              :             }
    2120            0 :         }
    2121              : 
    2122            0 :         let new_generation = if let Some(req_node_id) = attach_req.node_id {
    2123            0 :             let maybe_tenant_conf = {
    2124            0 :                 let locked = self.inner.write().unwrap();
    2125            0 :                 locked
    2126            0 :                     .tenants
    2127            0 :                     .get(&attach_req.tenant_shard_id)
    2128            0 :                     .map(|t| t.config.clone())
    2129              :             };
    2130              : 
    2131            0 :             match maybe_tenant_conf {
    2132            0 :                 Some(conf) => {
    2133            0 :                     let new_generation = self
    2134            0 :                         .persistence
    2135            0 :                         .increment_generation(attach_req.tenant_shard_id, req_node_id)
    2136            0 :                         .await?;
    2137              : 
    2138              :                     // Persist the placement policy update. This is required
    2139              :                     // when we reattaching a detached tenant.
    2140            0 :                     self.persistence
    2141            0 :                         .update_tenant_shard(
    2142            0 :                             TenantFilter::Shard(attach_req.tenant_shard_id),
    2143            0 :                             Some(PlacementPolicy::Attached(0)),
    2144            0 :                             Some(conf),
    2145            0 :                             None,
    2146            0 :                             None,
    2147            0 :                         )
    2148            0 :                         .await?;
    2149            0 :                     Some(new_generation)
    2150              :                 }
    2151              :                 None => {
    2152            0 :                     anyhow::bail!("Attach hook handling raced with tenant removal")
    2153              :                 }
    2154              :             }
    2155              :         } else {
    2156            0 :             self.persistence.detach(attach_req.tenant_shard_id).await?;
    2157            0 :             None
    2158              :         };
    2159              : 
    2160            0 :         let mut locked = self.inner.write().unwrap();
    2161            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    2162              : 
    2163            0 :         let tenant_shard = tenants
    2164            0 :             .get_mut(&attach_req.tenant_shard_id)
    2165            0 :             .expect("Checked for existence above");
    2166              : 
    2167            0 :         if let Some(new_generation) = new_generation {
    2168            0 :             tenant_shard.generation = Some(new_generation);
    2169            0 :             tenant_shard.policy = PlacementPolicy::Attached(0);
    2170            0 :         } else {
    2171              :             // This is a detach notification.  We must update placement policy to avoid re-attaching
    2172              :             // during background scheduling/reconciliation, or during storage controller restart.
    2173            0 :             assert!(attach_req.node_id.is_none());
    2174            0 :             tenant_shard.policy = PlacementPolicy::Detached;
    2175              :         }
    2176              : 
    2177            0 :         if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
    2178            0 :             tracing::info!(
    2179              :                 tenant_id = %attach_req.tenant_shard_id,
    2180              :                 ps_id = %attaching_pageserver,
    2181              :                 generation = ?tenant_shard.generation,
    2182            0 :                 "issuing",
    2183              :             );
    2184            0 :         } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
    2185            0 :             tracing::info!(
    2186              :                 tenant_id = %attach_req.tenant_shard_id,
    2187              :                 %ps_id,
    2188              :                 generation = ?tenant_shard.generation,
    2189            0 :                 "dropping",
    2190              :             );
    2191              :         } else {
    2192            0 :             tracing::info!(
    2193              :             tenant_id = %attach_req.tenant_shard_id,
    2194            0 :             "no-op: tenant already has no pageserver");
    2195              :         }
    2196            0 :         tenant_shard
    2197            0 :             .intent
    2198            0 :             .set_attached(scheduler, attach_req.node_id);
    2199              : 
    2200            0 :         tracing::info!(
    2201            0 :             "attach_hook: tenant {} set generation {:?}, pageserver {}, config {:?}",
    2202              :             attach_req.tenant_shard_id,
    2203              :             tenant_shard.generation,
    2204              :             // TODO: this is an odd number of 0xf's
    2205            0 :             attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff)),
    2206              :             attach_req.config,
    2207              :         );
    2208              : 
    2209              :         // Trick the reconciler into not doing anything for this tenant: this helps
    2210              :         // tests that manually configure a tenant on the pagesrever, and then call this
    2211              :         // attach hook: they don't want background reconciliation to modify what they
    2212              :         // did to the pageserver.
    2213              :         #[cfg(feature = "testing")]
    2214              :         {
    2215            0 :             if let Some(node_id) = attach_req.node_id {
    2216            0 :                 tenant_shard.observed.locations = HashMap::from([(
    2217            0 :                     node_id,
    2218            0 :                     ObservedStateLocation {
    2219            0 :                         conf: Some(attached_location_conf(
    2220            0 :                             tenant_shard.generation.unwrap(),
    2221            0 :                             &tenant_shard.shard,
    2222            0 :                             &tenant_shard.config,
    2223            0 :                             &PlacementPolicy::Attached(0),
    2224            0 :                             tenant_shard.intent.get_secondary().len(),
    2225            0 :                         )),
    2226            0 :                     },
    2227            0 :                 )]);
    2228            0 :             } else {
    2229            0 :                 tenant_shard.observed.locations.clear();
    2230            0 :             }
    2231              :         }
    2232              : 
    2233              :         Ok(AttachHookResponse {
    2234            0 :             generation: attach_req
    2235            0 :                 .node_id
    2236            0 :                 .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
    2237              :         })
    2238            0 :     }
    2239              : 
    2240            0 :     pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
    2241            0 :         let locked = self.inner.read().unwrap();
    2242              : 
    2243            0 :         let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
    2244              : 
    2245              :         InspectResponse {
    2246            0 :             attachment: tenant_shard.and_then(|s| {
    2247            0 :                 s.intent
    2248            0 :                     .get_attached()
    2249            0 :                     .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
    2250            0 :             }),
    2251              :         }
    2252            0 :     }
    2253              : 
    2254              :     // When the availability state of a node transitions to active, we must do a full reconciliation
    2255              :     // of LocationConfigs on that node.  This is because while a node was offline:
    2256              :     // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
    2257              :     // - aborting a tenant shard split might have left rogue child shards behind on this node.
    2258              :     //
    2259              :     // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
    2260              :     // Reconcilers might communicate with the node, and these must not overlap with the work we do in
    2261              :     // this function.
    2262              :     //
    2263              :     // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
    2264              :     // for written for a single node rather than as a batch job for all nodes.
    2265              :     #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
    2266              :     async fn node_activate_reconcile(
    2267              :         &self,
    2268              :         mut node: Node,
    2269              :         _lock: &TracingExclusiveGuard<NodeOperations>,
    2270              :     ) -> Result<(), ApiError> {
    2271              :         // This Node is a mutable local copy: we will set it active so that we can use its
    2272              :         // API client to reconcile with the node.  The Node in [`Self::nodes`] will get updated
    2273              :         // later.
    2274              :         node.set_availability(NodeAvailability::Active(PageserverUtilization::full()));
    2275              : 
    2276              :         let configs = match node
    2277              :             .with_client_retries(
    2278            0 :                 |client| async move { client.list_location_config().await },
    2279              :                 &self.http_client,
    2280              :                 &self.config.pageserver_jwt_token,
    2281              :                 1,
    2282              :                 5,
    2283              :                 SHORT_RECONCILE_TIMEOUT,
    2284              :                 &self.cancel,
    2285              :             )
    2286              :             .await
    2287              :         {
    2288              :             None => {
    2289              :                 // We're shutting down (the Node's cancellation token can't have fired, because
    2290              :                 // we're the only scope that has a reference to it, and we didn't fire it).
    2291              :                 return Err(ApiError::ShuttingDown);
    2292              :             }
    2293              :             Some(Err(e)) => {
    2294              :                 // This node didn't succeed listing its locations: it may not proceed to active state
    2295              :                 // as it is apparently unavailable.
    2296              :                 return Err(ApiError::PreconditionFailed(
    2297              :                     format!("Failed to query node location configs, cannot activate ({e})").into(),
    2298              :                 ));
    2299              :             }
    2300              :             Some(Ok(configs)) => configs,
    2301              :         };
    2302              :         tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
    2303              : 
    2304              :         let mut cleanup = Vec::new();
    2305              :         let mut mismatched_locations = 0;
    2306              :         {
    2307              :             let mut locked = self.inner.write().unwrap();
    2308              : 
    2309              :             for (tenant_shard_id, reported) in configs.tenant_shards {
    2310              :                 let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
    2311              :                     cleanup.push(tenant_shard_id);
    2312              :                     continue;
    2313              :                 };
    2314              : 
    2315              :                 let on_record = &mut tenant_shard
    2316              :                     .observed
    2317              :                     .locations
    2318              :                     .entry(node.get_id())
    2319            0 :                     .or_insert_with(|| ObservedStateLocation { conf: None })
    2320              :                     .conf;
    2321              : 
    2322              :                 // If the location reported by the node does not match our observed state,
    2323              :                 // then we mark it as uncertain and let the background reconciliation loop
    2324              :                 // deal with it.
    2325              :                 //
    2326              :                 // Note that this also covers net new locations reported by the node.
    2327              :                 if *on_record != reported {
    2328              :                     mismatched_locations += 1;
    2329              :                     *on_record = None;
    2330              :                 }
    2331              :             }
    2332              :         }
    2333              : 
    2334              :         if mismatched_locations > 0 {
    2335              :             tracing::info!(
    2336              :                 "Set observed state to None for {mismatched_locations} mismatched locations"
    2337              :             );
    2338              :         }
    2339              : 
    2340              :         for tenant_shard_id in cleanup {
    2341              :             tracing::info!("Detaching {tenant_shard_id}");
    2342              :             match node
    2343              :                 .with_client_retries(
    2344            0 :                     |client| async move {
    2345            0 :                         let config = LocationConfig {
    2346            0 :                             mode: LocationConfigMode::Detached,
    2347            0 :                             generation: None,
    2348            0 :                             secondary_conf: None,
    2349            0 :                             shard_number: tenant_shard_id.shard_number.0,
    2350            0 :                             shard_count: tenant_shard_id.shard_count.literal(),
    2351            0 :                             shard_stripe_size: 0,
    2352            0 :                             tenant_conf: models::TenantConfig::default(),
    2353            0 :                         };
    2354            0 :                         client
    2355            0 :                             .location_config(tenant_shard_id, config, None, false)
    2356            0 :                             .await
    2357            0 :                     },
    2358              :                     &self.http_client,
    2359              :                     &self.config.pageserver_jwt_token,
    2360              :                     1,
    2361              :                     5,
    2362              :                     SHORT_RECONCILE_TIMEOUT,
    2363              :                     &self.cancel,
    2364              :                 )
    2365              :                 .await
    2366              :             {
    2367              :                 None => {
    2368              :                     // We're shutting down (the Node's cancellation token can't have fired, because
    2369              :                     // we're the only scope that has a reference to it, and we didn't fire it).
    2370              :                     return Err(ApiError::ShuttingDown);
    2371              :                 }
    2372              :                 Some(Err(e)) => {
    2373              :                     // Do not let the node proceed to Active state if it is not responsive to requests
    2374              :                     // to detach.  This could happen if e.g. a shutdown bug in the pageserver is preventing
    2375              :                     // detach completing: we should not let this node back into the set of nodes considered
    2376              :                     // okay for scheduling.
    2377              :                     return Err(ApiError::Conflict(format!(
    2378              :                         "Node {node} failed to detach {tenant_shard_id}: {e}"
    2379              :                     )));
    2380              :                 }
    2381              :                 Some(Ok(_)) => {}
    2382              :             };
    2383              :         }
    2384              : 
    2385              :         Ok(())
    2386              :     }
    2387              : 
    2388            0 :     pub(crate) async fn re_attach(
    2389            0 :         &self,
    2390            0 :         reattach_req: ReAttachRequest,
    2391            0 :     ) -> Result<ReAttachResponse, ApiError> {
    2392            0 :         if let Some(register_req) = reattach_req.register {
    2393            0 :             self.node_register(register_req).await?;
    2394            0 :         }
    2395              : 
    2396              :         // Ordering: we must persist generation number updates before making them visible in the in-memory state
    2397            0 :         let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
    2398              : 
    2399            0 :         tracing::info!(
    2400              :             node_id=%reattach_req.node_id,
    2401            0 :             "Incremented {} tenant shards' generations",
    2402            0 :             incremented_generations.len()
    2403              :         );
    2404              : 
    2405              :         // Apply the updated generation to our in-memory state, and
    2406              :         // gather discover secondary locations.
    2407            0 :         let mut locked = self.inner.write().unwrap();
    2408            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    2409              : 
    2410            0 :         let mut response = ReAttachResponse {
    2411            0 :             tenants: Vec::new(),
    2412            0 :         };
    2413              : 
    2414              :         // [Hadron] If the pageserver reports in the reattach message that it has an empty disk, it's possible that it just
    2415              :         // recovered from a local disk failure. The response of the reattach request will contain a list of tenants but it
    2416              :         // will not be honored by the pageserver in this case (disk failure). We should make sure we clear any observed
    2417              :         // locations of tenants attached to the node so that the reconciler will discover the discrpancy and reconfigure the
    2418              :         // missing tenants on the node properly.
    2419            0 :         if self.config.handle_ps_local_disk_loss && reattach_req.empty_local_disk.unwrap_or(false) {
    2420            0 :             tracing::info!(
    2421            0 :                 "Pageserver {node_id} reports empty local disk, clearing observed locations referencing the pageserver for all tenants",
    2422              :                 node_id = reattach_req.node_id
    2423              :             );
    2424            0 :             let mut num_tenant_shards_affected = 0;
    2425            0 :             for (tenant_shard_id, shard) in tenants.iter_mut() {
    2426            0 :                 if shard
    2427            0 :                     .observed
    2428            0 :                     .locations
    2429            0 :                     .remove(&reattach_req.node_id)
    2430            0 :                     .is_some()
    2431              :                 {
    2432            0 :                     tracing::info!("Cleared observed location for tenant shard {tenant_shard_id}");
    2433            0 :                     num_tenant_shards_affected += 1;
    2434            0 :                 }
    2435              :             }
    2436            0 :             tracing::info!(
    2437            0 :                 "Cleared observed locations for {num_tenant_shards_affected} tenant shards"
    2438              :             );
    2439            0 :         }
    2440              : 
    2441              :         // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
    2442              :         // to call location_conf API with an old generation.  Wait for cancellation to complete
    2443              :         // before responding to this request.  Requires well implemented CancellationToken logic
    2444              :         // all the way to where we call location_conf.  Even then, there can still be a location_conf
    2445              :         // request in flight over the network: TODO handle that by making location_conf API refuse
    2446              :         // to go backward in generations.
    2447              : 
    2448              :         // Scan through all shards, applying updates for ones where we updated generation
    2449              :         // and identifying shards that intend to have a secondary location on this node.
    2450            0 :         for (tenant_shard_id, shard) in tenants {
    2451            0 :             if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
    2452            0 :                 let new_gen = *new_gen;
    2453            0 :                 response.tenants.push(ReAttachResponseTenant {
    2454            0 :                     id: *tenant_shard_id,
    2455            0 :                     r#gen: Some(new_gen.into().unwrap()),
    2456            0 :                     // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
    2457            0 :                     // execution.  If a pageserver is restarted during that process, then the reconcile pass will
    2458            0 :                     // fail, and start from scratch, so it doesn't make sense for us to try and preserve
    2459            0 :                     // the stale/multi states at this point.
    2460            0 :                     mode: LocationConfigMode::AttachedSingle,
    2461            0 :                     stripe_size: shard.shard.stripe_size,
    2462            0 :                 });
    2463              : 
    2464            0 :                 shard.generation = std::cmp::max(shard.generation, Some(new_gen));
    2465            0 :                 if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
    2466              :                     // Why can we update `observed` even though we're not sure our response will be received
    2467              :                     // by the pageserver?  Because the pageserver will not proceed with startup until
    2468              :                     // it has processed response: if it loses it, we'll see another request and increment
    2469              :                     // generation again, avoiding any uncertainty about dirtiness of tenant's state.
    2470            0 :                     if let Some(conf) = observed.conf.as_mut() {
    2471            0 :                         conf.generation = new_gen.into();
    2472            0 :                     }
    2473            0 :                 } else {
    2474            0 :                     // This node has no observed state for the shard: perhaps it was offline
    2475            0 :                     // when the pageserver restarted.  Insert a None, so that the Reconciler
    2476            0 :                     // will be prompted to learn the location's state before it makes changes.
    2477            0 :                     shard
    2478            0 :                         .observed
    2479            0 :                         .locations
    2480            0 :                         .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
    2481            0 :                 }
    2482            0 :             } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
    2483            0 :                 // Ordering: pageserver will not accept /location_config requests until it has
    2484            0 :                 // finished processing the response from re-attach.  So we can update our in-memory state
    2485            0 :                 // now, and be confident that we are not stamping on the result of some later location config.
    2486            0 :                 // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
    2487            0 :                 // so we might update observed state here, and then get over-written by some racing
    2488            0 :                 // ReconcileResult.  The impact is low however, since we have set state on pageserver something
    2489            0 :                 // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
    2490            0 : 
    2491            0 :                 response.tenants.push(ReAttachResponseTenant {
    2492            0 :                     id: *tenant_shard_id,
    2493            0 :                     r#gen: None,
    2494            0 :                     mode: LocationConfigMode::Secondary,
    2495            0 :                     stripe_size: shard.shard.stripe_size,
    2496            0 :                 });
    2497            0 : 
    2498            0 :                 // We must not update observed, because we have no guarantee that our
    2499            0 :                 // response will be received by the pageserver. This could leave it
    2500            0 :                 // falsely dirty, but the resulting reconcile should be idempotent.
    2501            0 :             }
    2502              :         }
    2503              : 
    2504              :         // We consider a node Active once we have composed a re-attach response, but we
    2505              :         // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
    2506              :         // implicitly synchronizes the LocationConfigs on the node.
    2507              :         //
    2508              :         // Setting a node active unblocks any Reconcilers that might write to the location config API,
    2509              :         // but those requests will not be accepted by the node until it has finished processing
    2510              :         // the re-attach response.
    2511              :         //
    2512              :         // Additionally, reset the nodes scheduling policy to match the conditional update done
    2513              :         // in [`Persistence::re_attach`].
    2514            0 :         if let Some(node) = nodes.get(&reattach_req.node_id) {
    2515            0 :             let reset_scheduling = matches!(
    2516            0 :                 node.get_scheduling(),
    2517              :                 NodeSchedulingPolicy::PauseForRestart
    2518              :                     | NodeSchedulingPolicy::Draining
    2519              :                     | NodeSchedulingPolicy::Filling
    2520              :                     | NodeSchedulingPolicy::Deleting
    2521              :             );
    2522              : 
    2523            0 :             let mut new_nodes = (**nodes).clone();
    2524            0 :             if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
    2525            0 :                 if reset_scheduling {
    2526            0 :                     node.set_scheduling(NodeSchedulingPolicy::Active);
    2527            0 :                 }
    2528              : 
    2529            0 :                 tracing::info!("Marking {} warming-up on reattach", reattach_req.node_id);
    2530            0 :                 node.set_availability(NodeAvailability::WarmingUp(std::time::Instant::now()));
    2531              : 
    2532            0 :                 scheduler.node_upsert(node);
    2533            0 :                 let new_nodes = Arc::new(new_nodes);
    2534            0 :                 *nodes = new_nodes;
    2535              :             } else {
    2536            0 :                 tracing::error!(
    2537            0 :                     "Reattaching node {} was removed while processing the request",
    2538              :                     reattach_req.node_id
    2539              :                 );
    2540              :             }
    2541            0 :         }
    2542              : 
    2543            0 :         Ok(response)
    2544            0 :     }
    2545              : 
    2546            0 :     pub(crate) async fn validate(
    2547            0 :         &self,
    2548            0 :         validate_req: ValidateRequest,
    2549            0 :     ) -> Result<ValidateResponse, DatabaseError> {
    2550              :         // Fast in-memory check: we may reject validation on anything that doesn't match our
    2551              :         // in-memory generation for a shard
    2552            0 :         let in_memory_result = {
    2553            0 :             let mut in_memory_result = Vec::new();
    2554            0 :             let locked = self.inner.read().unwrap();
    2555            0 :             for req_tenant in validate_req.tenants {
    2556            0 :                 if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
    2557            0 :                     let valid = tenant_shard.generation == Some(Generation::new(req_tenant.r#gen));
    2558            0 :                     tracing::info!(
    2559            0 :                         "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
    2560              :                         req_tenant.id,
    2561              :                         req_tenant.r#gen,
    2562              :                         tenant_shard.generation
    2563              :                     );
    2564              : 
    2565            0 :                     in_memory_result.push((
    2566            0 :                         req_tenant.id,
    2567            0 :                         Generation::new(req_tenant.r#gen),
    2568            0 :                         valid,
    2569            0 :                     ));
    2570              :                 } else {
    2571              :                     // This is legal: for example during a shard split the pageserver may still
    2572              :                     // have deletions in its queue from the old pre-split shard, or after deletion
    2573              :                     // of a tenant that was busy with compaction/gc while being deleted.
    2574            0 :                     tracing::info!(
    2575            0 :                         "Refusing deletion validation for missing shard {}",
    2576              :                         req_tenant.id
    2577              :                     );
    2578              :                 }
    2579              :             }
    2580              : 
    2581            0 :             in_memory_result
    2582              :         };
    2583              : 
    2584              :         // Database calls to confirm validity for anything that passed the in-memory check.  We must do this
    2585              :         // in case of controller split-brain, where some other controller process might have incremented the generation.
    2586            0 :         let db_generations = self
    2587            0 :             .persistence
    2588            0 :             .shard_generations(
    2589            0 :                 in_memory_result
    2590            0 :                     .iter()
    2591            0 :                     .filter_map(|i| if i.2 { Some(&i.0) } else { None }),
    2592              :             )
    2593            0 :             .await?;
    2594            0 :         let db_generations = db_generations.into_iter().collect::<HashMap<_, _>>();
    2595              : 
    2596            0 :         let mut response = ValidateResponse {
    2597            0 :             tenants: Vec::new(),
    2598            0 :         };
    2599            0 :         for (tenant_shard_id, validate_generation, valid) in in_memory_result.into_iter() {
    2600            0 :             let valid = if valid {
    2601            0 :                 let db_generation = db_generations.get(&tenant_shard_id);
    2602            0 :                 db_generation == Some(&Some(validate_generation))
    2603              :             } else {
    2604              :                 // If in-memory state says it's invalid, trust that.  It's always safe to fail a validation, at worst
    2605              :                 // this prevents a pageserver from cleaning up an object in S3.
    2606            0 :                 false
    2607              :             };
    2608              : 
    2609            0 :             response.tenants.push(ValidateResponseTenant {
    2610            0 :                 id: tenant_shard_id,
    2611            0 :                 valid,
    2612            0 :             })
    2613              :         }
    2614              : 
    2615            0 :         Ok(response)
    2616            0 :     }
    2617              : 
    2618            0 :     pub(crate) async fn tenant_create(
    2619            0 :         &self,
    2620            0 :         create_req: TenantCreateRequest,
    2621            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    2622            0 :         let tenant_id = create_req.new_tenant_id.tenant_id;
    2623              : 
    2624              :         // Exclude any concurrent attempts to create/access the same tenant ID
    2625            0 :         let _tenant_lock = trace_exclusive_lock(
    2626            0 :             &self.tenant_op_locks,
    2627            0 :             create_req.new_tenant_id.tenant_id,
    2628            0 :             TenantOperations::Create,
    2629            0 :         )
    2630            0 :         .await;
    2631            0 :         let (response, waiters) = self.do_tenant_create(create_req).await?;
    2632              : 
    2633            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    2634              :             // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
    2635              :             // accept compute notifications while it is in the process of creating.  Reconciliation will
    2636              :             // be retried in the background.
    2637            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
    2638            0 :         }
    2639            0 :         Ok(response)
    2640            0 :     }
    2641              : 
    2642            0 :     pub(crate) async fn do_tenant_create(
    2643            0 :         &self,
    2644            0 :         create_req: TenantCreateRequest,
    2645            0 :     ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
    2646            0 :         let placement_policy = create_req
    2647            0 :             .placement_policy
    2648            0 :             .clone()
    2649              :             // As a default, zero secondaries is convenient for tests that don't choose a policy.
    2650            0 :             .unwrap_or(PlacementPolicy::Attached(0));
    2651              : 
    2652              :         // This service expects to handle sharding itself: it is an error to try and directly create
    2653              :         // a particular shard here.
    2654            0 :         let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
    2655            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    2656            0 :                 "Attempted to create a specific shard, this API is for creating the whole tenant"
    2657            0 :             )));
    2658              :         } else {
    2659            0 :             create_req.new_tenant_id.tenant_id
    2660              :         };
    2661              : 
    2662            0 :         tracing::info!(
    2663            0 :             "Creating tenant {}, shard_count={:?}",
    2664              :             create_req.new_tenant_id,
    2665              :             create_req.shard_parameters.count,
    2666              :         );
    2667              : 
    2668            0 :         let create_ids = (0..create_req.shard_parameters.count.count())
    2669            0 :             .map(|i| TenantShardId {
    2670            0 :                 tenant_id,
    2671            0 :                 shard_number: ShardNumber(i),
    2672            0 :                 shard_count: create_req.shard_parameters.count,
    2673            0 :             })
    2674            0 :             .collect::<Vec<_>>();
    2675              : 
    2676              :         // If the caller specifies a None generation, it means "start from default".  This is different
    2677              :         // to [`Self::tenant_location_config`], where a None generation is used to represent
    2678              :         // an incompletely-onboarded tenant.
    2679            0 :         let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
    2680            0 :             tracing::info!(
    2681            0 :                 "tenant_create: secondary mode, generation is_some={}",
    2682            0 :                 create_req.generation.is_some()
    2683              :             );
    2684            0 :             create_req.generation.map(Generation::new)
    2685              :         } else {
    2686            0 :             tracing::info!(
    2687            0 :                 "tenant_create: not secondary mode, generation is_some={}",
    2688            0 :                 create_req.generation.is_some()
    2689              :             );
    2690            0 :             Some(
    2691            0 :                 create_req
    2692            0 :                     .generation
    2693            0 :                     .map(Generation::new)
    2694            0 :                     .unwrap_or(INITIAL_GENERATION),
    2695            0 :             )
    2696              :         };
    2697              : 
    2698            0 :         let preferred_az_id = {
    2699            0 :             let locked = self.inner.read().unwrap();
    2700              :             // Idempotency: take the existing value if the tenant already exists
    2701            0 :             if let Some(shard) = locked.tenants.get(create_ids.first().unwrap()) {
    2702            0 :                 shard.preferred_az().cloned()
    2703              :             } else {
    2704            0 :                 locked.scheduler.get_az_for_new_tenant()
    2705              :             }
    2706              :         };
    2707              : 
    2708              :         // Ordering: we persist tenant shards before creating them on the pageserver.  This enables a caller
    2709              :         // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
    2710              :         // during the creation, rather than risking leaving orphan objects in S3.
    2711            0 :         let persist_tenant_shards = create_ids
    2712            0 :             .iter()
    2713            0 :             .map(|tenant_shard_id| TenantShardPersistence {
    2714            0 :                 tenant_id: tenant_shard_id.tenant_id.to_string(),
    2715            0 :                 shard_number: tenant_shard_id.shard_number.0 as i32,
    2716            0 :                 shard_count: tenant_shard_id.shard_count.literal() as i32,
    2717            0 :                 shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
    2718            0 :                 generation: initial_generation.map(|g| g.into().unwrap() as i32),
    2719              :                 // The pageserver is not known until scheduling happens: we will set this column when
    2720              :                 // incrementing the generation the first time we attach to a pageserver.
    2721            0 :                 generation_pageserver: None,
    2722            0 :                 placement_policy: serde_json::to_string(&placement_policy).unwrap(),
    2723            0 :                 config: serde_json::to_string(&create_req.config).unwrap(),
    2724            0 :                 splitting: SplitState::default(),
    2725            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    2726            0 :                     .unwrap(),
    2727            0 :                 preferred_az_id: preferred_az_id.as_ref().map(|az| az.to_string()),
    2728            0 :             })
    2729            0 :             .collect();
    2730              : 
    2731            0 :         match self
    2732            0 :             .persistence
    2733            0 :             .insert_tenant_shards(persist_tenant_shards)
    2734            0 :             .await
    2735              :         {
    2736            0 :             Ok(_) => {}
    2737              :             Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
    2738              :                 DatabaseErrorKind::UniqueViolation,
    2739              :                 _,
    2740              :             ))) => {
    2741              :                 // Unique key violation: this is probably a retry.  Because the shard count is part of the unique key,
    2742              :                 // if we see a unique key violation it means that the creation request's shard count matches the previous
    2743              :                 // creation's shard count.
    2744            0 :                 tracing::info!(
    2745            0 :                     "Tenant shards already present in database, proceeding with idempotent creation..."
    2746              :                 );
    2747              :             }
    2748              :             // Any other database error is unexpected and a bug.
    2749            0 :             Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
    2750              :         };
    2751              : 
    2752            0 :         let mut schedule_context = ScheduleContext::default();
    2753            0 :         let mut schedule_error = None;
    2754            0 :         let mut response_shards = Vec::new();
    2755            0 :         for tenant_shard_id in create_ids {
    2756            0 :             tracing::info!("Creating shard {tenant_shard_id}...");
    2757              : 
    2758            0 :             let outcome = self
    2759            0 :                 .do_initial_shard_scheduling(
    2760            0 :                     tenant_shard_id,
    2761            0 :                     initial_generation,
    2762            0 :                     create_req.shard_parameters,
    2763            0 :                     create_req.config.clone(),
    2764            0 :                     placement_policy.clone(),
    2765            0 :                     preferred_az_id.as_ref(),
    2766            0 :                     &mut schedule_context,
    2767            0 :                 )
    2768            0 :                 .await;
    2769              : 
    2770            0 :             match outcome {
    2771            0 :                 InitialShardScheduleOutcome::Scheduled(resp) => response_shards.push(resp),
    2772            0 :                 InitialShardScheduleOutcome::NotScheduled => {}
    2773            0 :                 InitialShardScheduleOutcome::ShardScheduleError(err) => {
    2774            0 :                     schedule_error = Some(err);
    2775            0 :                 }
    2776              :             }
    2777              :         }
    2778              : 
    2779              :         // If we failed to schedule shards, then they are still created in the controller,
    2780              :         // but we return an error to the requester to avoid a silent failure when someone
    2781              :         // tries to e.g. create a tenant whose placement policy requires more nodes than
    2782              :         // are present in the system.  We do this here rather than in the above loop, to
    2783              :         // avoid situations where we only create a subset of shards in the tenant.
    2784            0 :         if let Some(e) = schedule_error {
    2785            0 :             return Err(ApiError::Conflict(format!(
    2786            0 :                 "Failed to schedule shard(s): {e}"
    2787            0 :             )));
    2788            0 :         }
    2789              : 
    2790            0 :         let waiters = {
    2791            0 :             let mut locked = self.inner.write().unwrap();
    2792            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    2793            0 :             let config = ReconcilerConfigBuilder::new(ReconcilerPriority::High)
    2794            0 :                 .tenant_creation_hint(true)
    2795            0 :                 .build();
    2796            0 :             tenants
    2797            0 :                 .range_mut(TenantShardId::tenant_range(tenant_id))
    2798            0 :                 .filter_map(|(_shard_id, shard)| {
    2799            0 :                     self.maybe_configured_reconcile_shard(shard, nodes, config)
    2800            0 :                 })
    2801            0 :                 .collect::<Vec<_>>()
    2802              :         };
    2803              : 
    2804            0 :         Ok((
    2805            0 :             TenantCreateResponse {
    2806            0 :                 shards: response_shards,
    2807            0 :             },
    2808            0 :             waiters,
    2809            0 :         ))
    2810            0 :     }
    2811              : 
    2812              :     /// Helper for tenant creation that does the scheduling for an individual shard. Covers both the
    2813              :     /// case of a new tenant and a pre-existing one.
    2814              :     #[allow(clippy::too_many_arguments)]
    2815            0 :     async fn do_initial_shard_scheduling(
    2816            0 :         &self,
    2817            0 :         tenant_shard_id: TenantShardId,
    2818            0 :         initial_generation: Option<Generation>,
    2819            0 :         shard_params: ShardParameters,
    2820            0 :         config: TenantConfig,
    2821            0 :         placement_policy: PlacementPolicy,
    2822            0 :         preferred_az_id: Option<&AvailabilityZone>,
    2823            0 :         schedule_context: &mut ScheduleContext,
    2824            0 :     ) -> InitialShardScheduleOutcome {
    2825            0 :         let mut locked = self.inner.write().unwrap();
    2826            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    2827              : 
    2828              :         use std::collections::btree_map::Entry;
    2829            0 :         match tenants.entry(tenant_shard_id) {
    2830            0 :             Entry::Occupied(mut entry) => {
    2831            0 :                 tracing::info!("Tenant shard {tenant_shard_id} already exists while creating");
    2832              : 
    2833            0 :                 if let Err(err) = entry.get_mut().schedule(scheduler, schedule_context) {
    2834            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(err);
    2835            0 :                 }
    2836              : 
    2837            0 :                 if let Some(node_id) = entry.get().intent.get_attached() {
    2838            0 :                     let generation = entry
    2839            0 :                         .get()
    2840            0 :                         .generation
    2841            0 :                         .expect("Generation is set when in attached mode");
    2842            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2843            0 :                         shard_id: tenant_shard_id,
    2844            0 :                         node_id: *node_id,
    2845            0 :                         generation: generation.into().unwrap(),
    2846            0 :                     })
    2847              :                 } else {
    2848            0 :                     InitialShardScheduleOutcome::NotScheduled
    2849              :                 }
    2850              :             }
    2851            0 :             Entry::Vacant(entry) => {
    2852            0 :                 let state = entry.insert(TenantShard::new(
    2853            0 :                     tenant_shard_id,
    2854            0 :                     ShardIdentity::from_params(tenant_shard_id.shard_number, shard_params),
    2855            0 :                     placement_policy,
    2856            0 :                     preferred_az_id.cloned(),
    2857              :                 ));
    2858              : 
    2859            0 :                 state.generation = initial_generation;
    2860            0 :                 state.config = config;
    2861            0 :                 if let Err(e) = state.schedule(scheduler, schedule_context) {
    2862            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(e);
    2863            0 :                 }
    2864              : 
    2865              :                 // Only include shards in result if we are attaching: the purpose
    2866              :                 // of the response is to tell the caller where the shards are attached.
    2867            0 :                 if let Some(node_id) = state.intent.get_attached() {
    2868            0 :                     let generation = state
    2869            0 :                         .generation
    2870            0 :                         .expect("Generation is set when in attached mode");
    2871            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2872            0 :                         shard_id: tenant_shard_id,
    2873            0 :                         node_id: *node_id,
    2874            0 :                         generation: generation.into().unwrap(),
    2875            0 :                     })
    2876              :                 } else {
    2877            0 :                     InitialShardScheduleOutcome::NotScheduled
    2878              :                 }
    2879              :             }
    2880              :         }
    2881            0 :     }
    2882              : 
    2883              :     /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
    2884              :     /// wait for reconciliation to complete before responding.
    2885            0 :     async fn await_waiters(
    2886            0 :         &self,
    2887            0 :         waiters: Vec<ReconcilerWaiter>,
    2888            0 :         timeout: Duration,
    2889            0 :     ) -> Result<(), ReconcileWaitError> {
    2890            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2891            0 :         for waiter in waiters {
    2892            0 :             let timeout = deadline.duration_since(Instant::now());
    2893            0 :             waiter.wait_timeout(timeout).await?;
    2894              :         }
    2895              : 
    2896            0 :         Ok(())
    2897            0 :     }
    2898              : 
    2899              :     /// Same as [`Service::await_waiters`], but returns the waiters which are still
    2900              :     /// in progress
    2901            0 :     async fn await_waiters_remainder(
    2902            0 :         &self,
    2903            0 :         waiters: Vec<ReconcilerWaiter>,
    2904            0 :         timeout: Duration,
    2905            0 :     ) -> Vec<ReconcilerWaiter> {
    2906            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2907            0 :         for waiter in waiters.iter() {
    2908            0 :             let timeout = deadline.duration_since(Instant::now());
    2909            0 :             let _ = waiter.wait_timeout(timeout).await;
    2910              :         }
    2911              : 
    2912            0 :         waiters
    2913            0 :             .into_iter()
    2914            0 :             .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress))
    2915            0 :             .collect::<Vec<_>>()
    2916            0 :     }
    2917              : 
    2918              :     /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
    2919              :     /// and transform it into either a tenant creation of a series of shard updates.
    2920              :     ///
    2921              :     /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
    2922              :     /// still be returned.
    2923            0 :     fn tenant_location_config_prepare(
    2924            0 :         &self,
    2925            0 :         tenant_id: TenantId,
    2926            0 :         req: TenantLocationConfigRequest,
    2927            0 :     ) -> TenantCreateOrUpdate {
    2928            0 :         let mut updates = Vec::new();
    2929            0 :         let mut locked = self.inner.write().unwrap();
    2930            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    2931            0 :         let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2932              : 
    2933              :         // Use location config mode as an indicator of policy.
    2934            0 :         let placement_policy = match req.config.mode {
    2935            0 :             LocationConfigMode::Detached => PlacementPolicy::Detached,
    2936            0 :             LocationConfigMode::Secondary => PlacementPolicy::Secondary,
    2937              :             LocationConfigMode::AttachedMulti
    2938              :             | LocationConfigMode::AttachedSingle
    2939              :             | LocationConfigMode::AttachedStale => {
    2940            0 :                 if nodes.len() > 1 {
    2941            0 :                     PlacementPolicy::Attached(1)
    2942              :                 } else {
    2943              :                     // Convenience for dev/test: if we just have one pageserver, import
    2944              :                     // tenants into non-HA mode so that scheduling will succeed.
    2945            0 :                     PlacementPolicy::Attached(0)
    2946              :                 }
    2947              :             }
    2948              :         };
    2949              : 
    2950              :         // Ordinarily we do not update scheduling policy, but when making major changes
    2951              :         // like detaching or demoting to secondary-only, we need to force the scheduling
    2952              :         // mode to Active, or the caller's expected outcome (detach it) will not happen.
    2953            0 :         let scheduling_policy = match req.config.mode {
    2954              :             LocationConfigMode::Detached | LocationConfigMode::Secondary => {
    2955              :                 // Special case: when making major changes like detaching or demoting to secondary-only,
    2956              :                 // we need to force the scheduling mode to Active, or nothing will happen.
    2957            0 :                 Some(ShardSchedulingPolicy::Active)
    2958              :             }
    2959              :             LocationConfigMode::AttachedMulti
    2960              :             | LocationConfigMode::AttachedSingle
    2961              :             | LocationConfigMode::AttachedStale => {
    2962              :                 // While attached, continue to respect whatever the existing scheduling mode is.
    2963            0 :                 None
    2964              :             }
    2965              :         };
    2966              : 
    2967            0 :         let mut create = true;
    2968            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    2969              :             // Saw an existing shard: this is not a creation
    2970            0 :             create = false;
    2971              : 
    2972              :             // Shards may have initially been created by a Secondary request, where we
    2973              :             // would have left generation as None.
    2974              :             //
    2975              :             // We only update generation the first time we see an attached-mode request,
    2976              :             // and if there is no existing generation set. The caller is responsible for
    2977              :             // ensuring that no non-storage-controller pageserver ever uses a higher
    2978              :             // generation than they passed in here.
    2979              :             use LocationConfigMode::*;
    2980            0 :             let set_generation = match req.config.mode {
    2981            0 :                 AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
    2982            0 :                     req.config.generation.map(Generation::new)
    2983              :                 }
    2984            0 :                 _ => None,
    2985              :             };
    2986              : 
    2987            0 :             updates.push(ShardUpdate {
    2988            0 :                 tenant_shard_id: *shard_id,
    2989            0 :                 placement_policy: placement_policy.clone(),
    2990            0 :                 tenant_config: req.config.tenant_conf.clone(),
    2991            0 :                 generation: set_generation,
    2992            0 :                 scheduling_policy,
    2993            0 :             });
    2994              :         }
    2995              : 
    2996            0 :         if create {
    2997              :             use LocationConfigMode::*;
    2998            0 :             let generation = match req.config.mode {
    2999            0 :                 AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
    3000              :                 // If a caller provided a generation in a non-attached request, ignore it
    3001              :                 // and leave our generation as None: this enables a subsequent update to set
    3002              :                 // the generation when setting an attached mode for the first time.
    3003            0 :                 _ => None,
    3004              :             };
    3005              : 
    3006            0 :             TenantCreateOrUpdate::Create(
    3007            0 :                 // Synthesize a creation request
    3008            0 :                 TenantCreateRequest {
    3009            0 :                     new_tenant_id: tenant_shard_id,
    3010            0 :                     generation,
    3011            0 :                     shard_parameters: ShardParameters {
    3012            0 :                         count: tenant_shard_id.shard_count,
    3013            0 :                         // We only import un-sharded or single-sharded tenants, so stripe
    3014            0 :                         // size can be made up arbitrarily here.
    3015            0 :                         stripe_size: DEFAULT_STRIPE_SIZE,
    3016            0 :                     },
    3017            0 :                     placement_policy: Some(placement_policy),
    3018            0 :                     config: req.config.tenant_conf,
    3019            0 :                 },
    3020            0 :             )
    3021              :         } else {
    3022            0 :             assert!(!updates.is_empty());
    3023            0 :             TenantCreateOrUpdate::Update(updates)
    3024              :         }
    3025            0 :     }
    3026              : 
    3027              :     /// For APIs that might act on tenants with [`PlacementPolicy::Detached`], first check if
    3028              :     /// the tenant is present in memory. If not, load it from the database.  If it is found
    3029              :     /// in neither location, return a NotFound error.
    3030              :     ///
    3031              :     /// Caller must demonstrate they hold a lock guard, as otherwise two callers might try and load
    3032              :     /// it at the same time, or we might race with [`Self::maybe_drop_tenant`]
    3033            0 :     async fn maybe_load_tenant(
    3034            0 :         &self,
    3035            0 :         tenant_id: TenantId,
    3036            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    3037            0 :     ) -> Result<(), ApiError> {
    3038              :         // Check if the tenant is present in memory, and select an AZ to use when loading
    3039              :         // if we will load it.
    3040            0 :         let load_in_az = {
    3041            0 :             let locked = self.inner.read().unwrap();
    3042            0 :             let existing = locked
    3043            0 :                 .tenants
    3044            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3045            0 :                 .next();
    3046              : 
    3047              :             // If the tenant is not present in memory, we expect to load it from database,
    3048              :             // so let's figure out what AZ to load it into while we have self.inner locked.
    3049            0 :             if existing.is_none() {
    3050            0 :                 locked
    3051            0 :                     .scheduler
    3052            0 :                     .get_az_for_new_tenant()
    3053            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    3054            0 :                         "No AZ with nodes found to load tenant"
    3055            0 :                     )))?
    3056              :             } else {
    3057              :                 // We already have this tenant in memory
    3058            0 :                 return Ok(());
    3059              :             }
    3060              :         };
    3061              : 
    3062            0 :         let tenant_shards = self.persistence.load_tenant(tenant_id).await?;
    3063            0 :         if tenant_shards.is_empty() {
    3064            0 :             return Err(ApiError::NotFound(
    3065            0 :                 anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    3066            0 :             ));
    3067            0 :         }
    3068              : 
    3069              :         // Update the persistent shards with the AZ that we are about to apply to in-memory state
    3070            0 :         self.persistence
    3071            0 :             .set_tenant_shard_preferred_azs(
    3072            0 :                 tenant_shards
    3073            0 :                     .iter()
    3074            0 :                     .map(|t| {
    3075            0 :                         (
    3076            0 :                             t.get_tenant_shard_id().expect("Corrupt shard in database"),
    3077            0 :                             Some(load_in_az.clone()),
    3078            0 :                         )
    3079            0 :                     })
    3080            0 :                     .collect(),
    3081              :             )
    3082            0 :             .await?;
    3083              : 
    3084            0 :         let mut locked = self.inner.write().unwrap();
    3085            0 :         tracing::info!(
    3086            0 :             "Loaded {} shards for tenant {}",
    3087            0 :             tenant_shards.len(),
    3088              :             tenant_id
    3089              :         );
    3090              : 
    3091            0 :         locked.tenants.extend(tenant_shards.into_iter().map(|p| {
    3092            0 :             let intent = IntentState::new(Some(load_in_az.clone()));
    3093            0 :             let shard =
    3094            0 :                 TenantShard::from_persistent(p, intent).expect("Corrupt shard row in database");
    3095              : 
    3096              :             // Sanity check: when loading on-demand, we should always be loaded something Detached
    3097            0 :             debug_assert!(shard.policy == PlacementPolicy::Detached);
    3098            0 :             if shard.policy != PlacementPolicy::Detached {
    3099            0 :                 tracing::error!(
    3100            0 :                     "Tenant shard {} loaded on-demand, but has non-Detached policy {:?}",
    3101              :                     shard.tenant_shard_id,
    3102              :                     shard.policy
    3103              :                 );
    3104            0 :             }
    3105              : 
    3106            0 :             (shard.tenant_shard_id, shard)
    3107            0 :         }));
    3108              : 
    3109            0 :         Ok(())
    3110            0 :     }
    3111              : 
    3112              :     /// If all shards for a tenant are detached, and in a fully quiescent state (no observed locations on pageservers),
    3113              :     /// and have no reconciler running, then we can drop the tenant from memory.  It will be reloaded on-demand
    3114              :     /// if we are asked to attach it again (see [`Self::maybe_load_tenant`]).
    3115              :     ///
    3116              :     /// Caller must demonstrate they hold a lock guard, as otherwise it is unsafe to drop a tenant from
    3117              :     /// memory while some other function might assume it continues to exist while not holding the lock on Self::inner.
    3118            0 :     fn maybe_drop_tenant(
    3119            0 :         &self,
    3120            0 :         tenant_id: TenantId,
    3121            0 :         locked: &mut std::sync::RwLockWriteGuard<ServiceState>,
    3122            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    3123            0 :     ) {
    3124            0 :         let mut tenant_shards = locked.tenants.range(TenantShardId::tenant_range(tenant_id));
    3125            0 :         if tenant_shards.all(|(_id, shard)| {
    3126            0 :             shard.policy == PlacementPolicy::Detached
    3127            0 :                 && shard.reconciler.is_none()
    3128            0 :                 && shard.observed.is_empty()
    3129            0 :         }) {
    3130            0 :             let keys = locked
    3131            0 :                 .tenants
    3132            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3133            0 :                 .map(|(id, _)| id)
    3134            0 :                 .copied()
    3135            0 :                 .collect::<Vec<_>>();
    3136            0 :             for key in keys {
    3137            0 :                 tracing::info!("Dropping detached tenant shard {} from memory", key);
    3138            0 :                 locked.tenants.remove(&key);
    3139              :             }
    3140            0 :         }
    3141            0 :     }
    3142              : 
    3143              :     /// This API is used by the cloud control plane to migrate unsharded tenants that it created
    3144              :     /// directly with pageservers into this service.
    3145              :     ///
    3146              :     /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
    3147              :     /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
    3148              :     /// Think of the first attempt to call this API as a transfer of absolute authority over the
    3149              :     /// tenant's source of generation numbers.
    3150              :     ///
    3151              :     /// The mode in this request coarse-grained control of tenants:
    3152              :     /// - Call with mode Attached* to upsert the tenant.
    3153              :     /// - Call with mode Secondary to either onboard a tenant without attaching it, or
    3154              :     ///   to set an existing tenant to PolicyMode::Secondary
    3155              :     /// - Call with mode Detached to switch to PolicyMode::Detached
    3156            0 :     pub(crate) async fn tenant_location_config(
    3157            0 :         &self,
    3158            0 :         tenant_shard_id: TenantShardId,
    3159            0 :         req: TenantLocationConfigRequest,
    3160            0 :     ) -> Result<TenantLocationConfigResponse, ApiError> {
    3161              :         // We require an exclusive lock, because we are updating both persistent and in-memory state
    3162            0 :         let _tenant_lock = trace_exclusive_lock(
    3163            0 :             &self.tenant_op_locks,
    3164            0 :             tenant_shard_id.tenant_id,
    3165            0 :             TenantOperations::LocationConfig,
    3166            0 :         )
    3167            0 :         .await;
    3168              : 
    3169            0 :         let tenant_id = if !tenant_shard_id.is_unsharded() {
    3170            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    3171            0 :                 "This API is for importing single-sharded or unsharded tenants"
    3172            0 :             )));
    3173              :         } else {
    3174            0 :             tenant_shard_id.tenant_id
    3175              :         };
    3176              : 
    3177              :         // In case we are waking up a Detached tenant
    3178            0 :         match self.maybe_load_tenant(tenant_id, &_tenant_lock).await {
    3179            0 :             Ok(()) | Err(ApiError::NotFound(_)) => {
    3180            0 :                 // This is a creation or an update
    3181            0 :             }
    3182            0 :             Err(e) => {
    3183            0 :                 return Err(e);
    3184              :             }
    3185              :         };
    3186              : 
    3187              :         // First check if this is a creation or an update
    3188            0 :         let create_or_update = self.tenant_location_config_prepare(tenant_id, req);
    3189              : 
    3190            0 :         let mut result = TenantLocationConfigResponse {
    3191            0 :             shards: Vec::new(),
    3192            0 :             stripe_size: None,
    3193            0 :         };
    3194            0 :         let waiters = match create_or_update {
    3195            0 :             TenantCreateOrUpdate::Create(create_req) => {
    3196            0 :                 let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
    3197            0 :                 result.shards = create_resp
    3198            0 :                     .shards
    3199            0 :                     .into_iter()
    3200            0 :                     .map(|s| TenantShardLocation {
    3201            0 :                         node_id: s.node_id,
    3202            0 :                         shard_id: s.shard_id,
    3203            0 :                     })
    3204            0 :                     .collect();
    3205            0 :                 waiters
    3206              :             }
    3207            0 :             TenantCreateOrUpdate::Update(updates) => {
    3208              :                 // Persist updates
    3209              :                 // Ordering: write to the database before applying changes in-memory, so that
    3210              :                 // we will not appear time-travel backwards on a restart.
    3211              : 
    3212            0 :                 let mut schedule_context = ScheduleContext::default();
    3213              :                 for ShardUpdate {
    3214            0 :                     tenant_shard_id,
    3215            0 :                     placement_policy,
    3216            0 :                     tenant_config,
    3217            0 :                     generation,
    3218            0 :                     scheduling_policy,
    3219            0 :                 } in &updates
    3220              :                 {
    3221            0 :                     self.persistence
    3222            0 :                         .update_tenant_shard(
    3223            0 :                             TenantFilter::Shard(*tenant_shard_id),
    3224            0 :                             Some(placement_policy.clone()),
    3225            0 :                             Some(tenant_config.clone()),
    3226            0 :                             *generation,
    3227            0 :                             *scheduling_policy,
    3228            0 :                         )
    3229            0 :                         .await?;
    3230              :                 }
    3231              : 
    3232              :                 // Apply updates in-memory
    3233            0 :                 let mut waiters = Vec::new();
    3234              :                 {
    3235            0 :                     let mut locked = self.inner.write().unwrap();
    3236            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    3237              : 
    3238              :                     for ShardUpdate {
    3239            0 :                         tenant_shard_id,
    3240            0 :                         placement_policy,
    3241            0 :                         tenant_config,
    3242            0 :                         generation: update_generation,
    3243            0 :                         scheduling_policy,
    3244            0 :                     } in updates
    3245              :                     {
    3246            0 :                         let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    3247            0 :                             tracing::warn!("Shard {tenant_shard_id} removed while updating");
    3248            0 :                             continue;
    3249              :                         };
    3250              : 
    3251              :                         // Update stripe size
    3252            0 :                         if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
    3253            0 :                             result.stripe_size = Some(shard.shard.stripe_size);
    3254            0 :                         }
    3255              : 
    3256            0 :                         shard.policy = placement_policy;
    3257            0 :                         shard.config = tenant_config;
    3258            0 :                         if let Some(generation) = update_generation {
    3259            0 :                             shard.generation = Some(generation);
    3260            0 :                         }
    3261              : 
    3262            0 :                         if let Some(scheduling_policy) = scheduling_policy {
    3263            0 :                             shard.set_scheduling_policy(scheduling_policy);
    3264            0 :                         }
    3265              : 
    3266            0 :                         shard.schedule(scheduler, &mut schedule_context)?;
    3267              : 
    3268            0 :                         let maybe_waiter =
    3269            0 :                             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3270            0 :                         if let Some(waiter) = maybe_waiter {
    3271            0 :                             waiters.push(waiter);
    3272            0 :                         }
    3273              : 
    3274            0 :                         if let Some(node_id) = shard.intent.get_attached() {
    3275            0 :                             result.shards.push(TenantShardLocation {
    3276            0 :                                 shard_id: tenant_shard_id,
    3277            0 :                                 node_id: *node_id,
    3278            0 :                             })
    3279            0 :                         }
    3280              :                     }
    3281              :                 }
    3282            0 :                 waiters
    3283              :             }
    3284              :         };
    3285              : 
    3286            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3287              :             // Do not treat a reconcile error as fatal: we have already applied any requested
    3288              :             // Intent changes, and the reconcile can fail for external reasons like unavailable
    3289              :             // compute notification API.  In these cases, it is important that we do not
    3290              :             // cause the cloud control plane to retry forever on this API.
    3291            0 :             tracing::warn!(
    3292            0 :                 "Failed to reconcile after /location_config: {e}, returning success anyway"
    3293              :             );
    3294            0 :         }
    3295              : 
    3296              :         // Logging the full result is useful because it lets us cross-check what the cloud control
    3297              :         // plane's tenant_shards table should contain.
    3298            0 :         tracing::info!("Complete, returning {result:?}");
    3299              : 
    3300            0 :         Ok(result)
    3301            0 :     }
    3302              : 
    3303            0 :     pub(crate) async fn tenant_config_patch(
    3304            0 :         &self,
    3305            0 :         req: TenantConfigPatchRequest,
    3306            0 :     ) -> Result<(), ApiError> {
    3307            0 :         let _tenant_lock = trace_exclusive_lock(
    3308            0 :             &self.tenant_op_locks,
    3309            0 :             req.tenant_id,
    3310            0 :             TenantOperations::ConfigPatch,
    3311            0 :         )
    3312            0 :         .await;
    3313              : 
    3314            0 :         let tenant_id = req.tenant_id;
    3315            0 :         let patch = req.config;
    3316              : 
    3317            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3318              : 
    3319            0 :         let base = {
    3320            0 :             let locked = self.inner.read().unwrap();
    3321            0 :             let shards = locked
    3322            0 :                 .tenants
    3323            0 :                 .range(TenantShardId::tenant_range(req.tenant_id));
    3324              : 
    3325            0 :             let mut configs = shards.map(|(_sid, shard)| &shard.config).peekable();
    3326              : 
    3327            0 :             let first = match configs.peek() {
    3328            0 :                 Some(first) => (*first).clone(),
    3329              :                 None => {
    3330            0 :                     return Err(ApiError::NotFound(
    3331            0 :                         anyhow::anyhow!("Tenant {} not found", req.tenant_id).into(),
    3332            0 :                     ));
    3333              :                 }
    3334              :             };
    3335              : 
    3336            0 :             if !configs.all_equal() {
    3337            0 :                 tracing::error!("Tenant configs for {} are mismatched. ", req.tenant_id);
    3338              :                 // This can't happen because we atomically update the database records
    3339              :                 // of all shards to the new value in [`Self::set_tenant_config_and_reconcile`].
    3340            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3341            0 :                     "Tenant configs for {} are mismatched",
    3342            0 :                     req.tenant_id
    3343            0 :                 )));
    3344            0 :             }
    3345              : 
    3346            0 :             first
    3347              :         };
    3348              : 
    3349            0 :         let updated_config = base
    3350            0 :             .apply_patch(patch)
    3351            0 :             .map_err(|err| ApiError::BadRequest(anyhow::anyhow!(err)))?;
    3352            0 :         self.set_tenant_config_and_reconcile(tenant_id, updated_config)
    3353            0 :             .await
    3354            0 :     }
    3355              : 
    3356            0 :     pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
    3357              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3358            0 :         let _tenant_lock = trace_exclusive_lock(
    3359            0 :             &self.tenant_op_locks,
    3360            0 :             req.tenant_id,
    3361            0 :             TenantOperations::ConfigSet,
    3362            0 :         )
    3363            0 :         .await;
    3364              : 
    3365            0 :         self.maybe_load_tenant(req.tenant_id, &_tenant_lock).await?;
    3366              : 
    3367            0 :         self.set_tenant_config_and_reconcile(req.tenant_id, req.config)
    3368            0 :             .await
    3369            0 :     }
    3370              : 
    3371            0 :     async fn set_tenant_config_and_reconcile(
    3372            0 :         &self,
    3373            0 :         tenant_id: TenantId,
    3374            0 :         config: TenantConfig,
    3375            0 :     ) -> Result<(), ApiError> {
    3376            0 :         self.persistence
    3377            0 :             .update_tenant_shard(
    3378            0 :                 TenantFilter::Tenant(tenant_id),
    3379            0 :                 None,
    3380            0 :                 Some(config.clone()),
    3381            0 :                 None,
    3382            0 :                 None,
    3383            0 :             )
    3384            0 :             .await?;
    3385              : 
    3386            0 :         let waiters = {
    3387            0 :             let mut waiters = Vec::new();
    3388            0 :             let mut locked = self.inner.write().unwrap();
    3389            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    3390            0 :             for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3391            0 :                 shard.config = config.clone();
    3392            0 :                 if let Some(waiter) =
    3393            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3394            0 :                 {
    3395            0 :                     waiters.push(waiter);
    3396            0 :                 }
    3397              :             }
    3398            0 :             waiters
    3399              :         };
    3400              : 
    3401            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3402              :             // Treat this as success because we have stored the configuration.  If e.g.
    3403              :             // a node was unavailable at this time, it should not stop us accepting a
    3404              :             // configuration change.
    3405            0 :             tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
    3406            0 :         }
    3407              : 
    3408            0 :         Ok(())
    3409            0 :     }
    3410              : 
    3411            0 :     pub(crate) fn tenant_config_get(
    3412            0 :         &self,
    3413            0 :         tenant_id: TenantId,
    3414            0 :     ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
    3415            0 :         let config = {
    3416            0 :             let locked = self.inner.read().unwrap();
    3417              : 
    3418            0 :             match locked
    3419            0 :                 .tenants
    3420            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3421            0 :                 .next()
    3422              :             {
    3423            0 :                 Some((_tenant_shard_id, shard)) => shard.config.clone(),
    3424              :                 None => {
    3425            0 :                     return Err(ApiError::NotFound(
    3426            0 :                         anyhow::anyhow!("Tenant not found").into(),
    3427            0 :                     ));
    3428              :                 }
    3429              :             }
    3430              :         };
    3431              : 
    3432              :         // Unlike the pageserver, we do not have a set of global defaults: the config is
    3433              :         // entirely per-tenant.  Therefore the distinction between `tenant_specific_overrides`
    3434              :         // and `effective_config` in the response is meaningless, but we retain that syntax
    3435              :         // in order to remain compatible with the pageserver API.
    3436              : 
    3437            0 :         let response = HashMap::from([
    3438              :             (
    3439              :                 "tenant_specific_overrides",
    3440            0 :                 serde_json::to_value(&config)
    3441            0 :                     .context("serializing tenant specific overrides")
    3442            0 :                     .map_err(ApiError::InternalServerError)?,
    3443              :             ),
    3444              :             (
    3445            0 :                 "effective_config",
    3446            0 :                 serde_json::to_value(&config)
    3447            0 :                     .context("serializing effective config")
    3448            0 :                     .map_err(ApiError::InternalServerError)?,
    3449              :             ),
    3450              :         ]);
    3451              : 
    3452            0 :         Ok(response)
    3453            0 :     }
    3454              : 
    3455            0 :     pub(crate) async fn tenant_time_travel_remote_storage(
    3456            0 :         &self,
    3457            0 :         time_travel_req: &TenantTimeTravelRequest,
    3458            0 :         tenant_id: TenantId,
    3459            0 :         timestamp: Cow<'_, str>,
    3460            0 :         done_if_after: Cow<'_, str>,
    3461            0 :     ) -> Result<(), ApiError> {
    3462            0 :         let _tenant_lock = trace_exclusive_lock(
    3463            0 :             &self.tenant_op_locks,
    3464            0 :             tenant_id,
    3465            0 :             TenantOperations::TimeTravelRemoteStorage,
    3466            0 :         )
    3467            0 :         .await;
    3468              : 
    3469            0 :         let node = {
    3470            0 :             let mut locked = self.inner.write().unwrap();
    3471              :             // Just a sanity check to prevent misuse: the API expects that the tenant is fully
    3472              :             // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
    3473              :             // but only at the start of the process, so it's really just to prevent operator
    3474              :             // mistakes.
    3475            0 :             for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
    3476            0 :                 if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
    3477              :                 {
    3478            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3479            0 :                         "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
    3480            0 :                     )));
    3481            0 :                 }
    3482            0 :                 let maybe_attached = shard
    3483            0 :                     .observed
    3484            0 :                     .locations
    3485            0 :                     .iter()
    3486            0 :                     .filter_map(|(node_id, observed_location)| {
    3487            0 :                         observed_location
    3488            0 :                             .conf
    3489            0 :                             .as_ref()
    3490            0 :                             .map(|loc| (node_id, observed_location, loc.mode))
    3491            0 :                     })
    3492            0 :                     .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
    3493            0 :                 if let Some((node_id, _observed_location, mode)) = maybe_attached {
    3494            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3495            0 :                         "We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}"
    3496            0 :                     )));
    3497            0 :                 }
    3498              :             }
    3499            0 :             let scheduler = &mut locked.scheduler;
    3500              :             // Right now we only perform the operation on a single node without parallelization
    3501              :             // TODO fan out the operation to multiple nodes for better performance
    3502            0 :             let node_id = scheduler.any_available_node()?;
    3503            0 :             let node = locked
    3504            0 :                 .nodes
    3505            0 :                 .get(&node_id)
    3506            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3507            0 :             node.clone()
    3508              :         };
    3509              : 
    3510              :         // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
    3511            0 :         let mut counts = time_travel_req
    3512            0 :             .shard_counts
    3513            0 :             .iter()
    3514            0 :             .copied()
    3515            0 :             .collect::<HashSet<_>>()
    3516            0 :             .into_iter()
    3517            0 :             .collect::<Vec<_>>();
    3518            0 :         counts.sort_unstable();
    3519              : 
    3520            0 :         for count in counts {
    3521            0 :             let shard_ids = (0..count.count())
    3522            0 :                 .map(|i| TenantShardId {
    3523            0 :                     tenant_id,
    3524            0 :                     shard_number: ShardNumber(i),
    3525            0 :                     shard_count: count,
    3526            0 :                 })
    3527            0 :                 .collect::<Vec<_>>();
    3528            0 :             for tenant_shard_id in shard_ids {
    3529            0 :                 let client = PageserverClient::new(
    3530            0 :                     node.get_id(),
    3531            0 :                     self.http_client.clone(),
    3532            0 :                     node.base_url(),
    3533            0 :                     self.config.pageserver_jwt_token.as_deref(),
    3534              :                 );
    3535              : 
    3536            0 :                 tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
    3537              : 
    3538            0 :                 client
    3539            0 :                     .tenant_time_travel_remote_storage(
    3540            0 :                         tenant_shard_id,
    3541            0 :                         &timestamp,
    3542            0 :                         &done_if_after,
    3543            0 :                     )
    3544            0 :                     .await
    3545            0 :                     .map_err(|e| {
    3546            0 :                         ApiError::InternalServerError(anyhow::anyhow!(
    3547            0 :                             "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
    3548            0 :                             node
    3549            0 :                         ))
    3550            0 :                     })?;
    3551              :             }
    3552              :         }
    3553            0 :         Ok(())
    3554            0 :     }
    3555              : 
    3556            0 :     pub(crate) async fn tenant_secondary_download(
    3557            0 :         &self,
    3558            0 :         tenant_id: TenantId,
    3559            0 :         wait: Option<Duration>,
    3560            0 :     ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
    3561            0 :         let _tenant_lock = trace_shared_lock(
    3562            0 :             &self.tenant_op_locks,
    3563            0 :             tenant_id,
    3564            0 :             TenantOperations::SecondaryDownload,
    3565            0 :         )
    3566            0 :         .await;
    3567              : 
    3568              :         // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
    3569            0 :         let targets = {
    3570            0 :             let locked = self.inner.read().unwrap();
    3571            0 :             let mut targets = Vec::new();
    3572              : 
    3573            0 :             for (tenant_shard_id, shard) in
    3574            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    3575              :             {
    3576            0 :                 for node_id in shard.intent.get_secondary() {
    3577            0 :                     let node = locked
    3578            0 :                         .nodes
    3579            0 :                         .get(node_id)
    3580            0 :                         .expect("Pageservers may not be deleted while referenced");
    3581            0 : 
    3582            0 :                     targets.push((*tenant_shard_id, node.clone()));
    3583            0 :                 }
    3584              :             }
    3585            0 :             targets
    3586              :         };
    3587              : 
    3588              :         // Issue concurrent requests to all shards' locations
    3589            0 :         let mut futs = FuturesUnordered::new();
    3590            0 :         for (tenant_shard_id, node) in targets {
    3591            0 :             let client = PageserverClient::new(
    3592            0 :                 node.get_id(),
    3593            0 :                 self.http_client.clone(),
    3594            0 :                 node.base_url(),
    3595            0 :                 self.config.pageserver_jwt_token.as_deref(),
    3596              :             );
    3597            0 :             futs.push(async move {
    3598            0 :                 let result = client
    3599            0 :                     .tenant_secondary_download(tenant_shard_id, wait)
    3600            0 :                     .await;
    3601            0 :                 (result, node, tenant_shard_id)
    3602            0 :             })
    3603              :         }
    3604              : 
    3605              :         // Handle any errors returned by pageservers.  This includes cases like this request racing with
    3606              :         // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
    3607              :         // well as more general cases like 503s, 500s, or timeouts.
    3608            0 :         let mut aggregate_progress = SecondaryProgress::default();
    3609            0 :         let mut aggregate_status: Option<StatusCode> = None;
    3610            0 :         let mut error: Option<mgmt_api::Error> = None;
    3611            0 :         while let Some((result, node, tenant_shard_id)) = futs.next().await {
    3612            0 :             match result {
    3613            0 :                 Err(e) => {
    3614              :                     // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
    3615              :                     // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
    3616              :                     // than they had hoped for.
    3617            0 :                     tracing::warn!("Secondary download error from pageserver {node}: {e}",);
    3618            0 :                     error = Some(e)
    3619              :                 }
    3620            0 :                 Ok((status_code, progress)) => {
    3621            0 :                     tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
    3622            0 :                     aggregate_progress.layers_downloaded += progress.layers_downloaded;
    3623            0 :                     aggregate_progress.layers_total += progress.layers_total;
    3624            0 :                     aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
    3625            0 :                     aggregate_progress.bytes_total += progress.bytes_total;
    3626            0 :                     aggregate_progress.heatmap_mtime =
    3627            0 :                         std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
    3628            0 :                     aggregate_status = match aggregate_status {
    3629            0 :                         None => Some(status_code),
    3630            0 :                         Some(StatusCode::OK) => Some(status_code),
    3631            0 :                         Some(cur) => {
    3632              :                             // Other status codes (e.g. 202) -- do not overwrite.
    3633            0 :                             Some(cur)
    3634              :                         }
    3635              :                     };
    3636              :                 }
    3637              :             }
    3638              :         }
    3639              : 
    3640              :         // If any of the shards return 202, indicate our result as 202.
    3641            0 :         match aggregate_status {
    3642              :             None => {
    3643            0 :                 match error {
    3644            0 :                     Some(e) => {
    3645              :                         // No successes, and an error: surface it
    3646            0 :                         Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
    3647              :                     }
    3648              :                     None => {
    3649              :                         // No shards found
    3650            0 :                         Err(ApiError::NotFound(
    3651            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    3652            0 :                         ))
    3653              :                     }
    3654              :                 }
    3655              :             }
    3656            0 :             Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
    3657              :         }
    3658            0 :     }
    3659              : 
    3660            0 :     pub(crate) async fn tenant_delete(
    3661            0 :         self: &Arc<Self>,
    3662            0 :         tenant_id: TenantId,
    3663            0 :     ) -> Result<StatusCode, ApiError> {
    3664            0 :         let _tenant_lock =
    3665            0 :             trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
    3666              : 
    3667            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3668              : 
    3669              :         // Detach all shards. This also deletes local pageserver shard data.
    3670            0 :         let (detach_waiters, node) = {
    3671            0 :             let mut detach_waiters = Vec::new();
    3672            0 :             let mut locked = self.inner.write().unwrap();
    3673            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    3674            0 :             for (_, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3675              :                 // Update the tenant's intent to remove all attachments
    3676            0 :                 shard.policy = PlacementPolicy::Detached;
    3677            0 :                 shard
    3678            0 :                     .schedule(scheduler, &mut ScheduleContext::default())
    3679            0 :                     .expect("De-scheduling is infallible");
    3680            0 :                 debug_assert!(shard.intent.get_attached().is_none());
    3681            0 :                 debug_assert!(shard.intent.get_secondary().is_empty());
    3682              : 
    3683            0 :                 if let Some(waiter) =
    3684            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3685            0 :                 {
    3686            0 :                     detach_waiters.push(waiter);
    3687            0 :                 }
    3688              :             }
    3689              : 
    3690              :             // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant
    3691              :             // was attached, just has to be able to see the S3 content)
    3692            0 :             let node_id = scheduler.any_available_node()?;
    3693            0 :             let node = nodes
    3694            0 :                 .get(&node_id)
    3695            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3696            0 :             (detach_waiters, node.clone())
    3697              :         };
    3698              : 
    3699              :         // This reconcile wait can fail in a few ways:
    3700              :         //  A there is a very long queue for the reconciler semaphore
    3701              :         //  B some pageserver is failing to handle a detach promptly
    3702              :         //  C some pageserver goes offline right at the moment we send it a request.
    3703              :         //
    3704              :         // A and C are transient: the semaphore will eventually become available, and once a node is marked offline
    3705              :         // the next attempt to reconcile will silently skip detaches for an offline node and succeed.  If B happens,
    3706              :         // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while
    3707              :         // deleting the underlying data).
    3708            0 :         self.await_waiters(detach_waiters, RECONCILE_TIMEOUT)
    3709            0 :             .await?;
    3710              : 
    3711              :         // Delete the entire tenant (all shards) from remote storage via a random pageserver.
    3712              :         // Passing an unsharded tenant ID will cause the pageserver to remove all remote paths with
    3713              :         // the tenant ID prefix, including all shards (even possibly stale ones).
    3714            0 :         match node
    3715            0 :             .with_client_retries(
    3716            0 :                 |client| async move {
    3717            0 :                     client
    3718            0 :                         .tenant_delete(TenantShardId::unsharded(tenant_id))
    3719            0 :                         .await
    3720            0 :                 },
    3721            0 :                 &self.http_client,
    3722            0 :                 &self.config.pageserver_jwt_token,
    3723              :                 1,
    3724              :                 3,
    3725              :                 RECONCILE_TIMEOUT,
    3726            0 :                 &self.cancel,
    3727              :             )
    3728            0 :             .await
    3729            0 :             .unwrap_or(Err(mgmt_api::Error::Cancelled))
    3730              :         {
    3731            0 :             Ok(_) => {}
    3732              :             Err(mgmt_api::Error::Cancelled) => {
    3733            0 :                 return Err(ApiError::ShuttingDown);
    3734              :             }
    3735            0 :             Err(e) => {
    3736              :                 // This is unexpected: remote deletion should be infallible, unless the object store
    3737              :                 // at large is unavailable.
    3738            0 :                 tracing::error!("Error deleting via node {node}: {e}");
    3739            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    3740              :             }
    3741              :         }
    3742              : 
    3743              :         // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
    3744              :         // our in-memory state and database state.
    3745              : 
    3746              :         // Ordering: we delete persistent state first: if we then
    3747              :         // crash, we will drop the in-memory state.
    3748              : 
    3749              :         // Drop persistent state.
    3750            0 :         self.persistence.delete_tenant(tenant_id).await?;
    3751              : 
    3752              :         // Drop in-memory state
    3753              :         {
    3754            0 :             let mut locked = self.inner.write().unwrap();
    3755            0 :             let (_nodes, tenants, scheduler) = locked.parts_mut();
    3756              : 
    3757              :             // Dereference Scheduler from shards before dropping them
    3758            0 :             for (_tenant_shard_id, shard) in
    3759            0 :                 tenants.range_mut(TenantShardId::tenant_range(tenant_id))
    3760            0 :             {
    3761            0 :                 shard.intent.clear(scheduler);
    3762            0 :             }
    3763              : 
    3764            0 :             tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
    3765            0 :             tracing::info!(
    3766            0 :                 "Deleted tenant {tenant_id}, now have {} tenants",
    3767            0 :                 locked.tenants.len()
    3768              :             );
    3769              :         };
    3770              : 
    3771              :         // Delete the tenant from safekeepers (if needed)
    3772            0 :         self.tenant_delete_safekeepers(tenant_id)
    3773            0 :             .instrument(tracing::info_span!("tenant_delete_safekeepers", %tenant_id))
    3774            0 :             .await?;
    3775              : 
    3776              :         // Success is represented as 404, to imitate the existing pageserver deletion API
    3777            0 :         Ok(StatusCode::NOT_FOUND)
    3778            0 :     }
    3779              : 
    3780              :     /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
    3781              :     /// for a tenant.  The TenantConfig is passed through to pageservers, whereas this function modifies
    3782              :     /// the tenant's policies (configuration) within the storage controller
    3783            0 :     pub(crate) async fn tenant_update_policy(
    3784            0 :         &self,
    3785            0 :         tenant_id: TenantId,
    3786            0 :         req: TenantPolicyRequest,
    3787            0 :     ) -> Result<(), ApiError> {
    3788              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3789            0 :         let _tenant_lock = trace_exclusive_lock(
    3790            0 :             &self.tenant_op_locks,
    3791            0 :             tenant_id,
    3792            0 :             TenantOperations::UpdatePolicy,
    3793            0 :         )
    3794            0 :         .await;
    3795              : 
    3796            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3797              : 
    3798            0 :         failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
    3799              : 
    3800              :         let TenantPolicyRequest {
    3801            0 :             placement,
    3802            0 :             mut scheduling,
    3803            0 :         } = req;
    3804              : 
    3805            0 :         if let Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) = placement {
    3806              :             // When someone configures a tenant to detach, we force the scheduling policy to enable
    3807              :             // this to take effect.
    3808            0 :             if scheduling.is_none() {
    3809            0 :                 scheduling = Some(ShardSchedulingPolicy::Active);
    3810            0 :             }
    3811            0 :         }
    3812              : 
    3813            0 :         self.persistence
    3814            0 :             .update_tenant_shard(
    3815            0 :                 TenantFilter::Tenant(tenant_id),
    3816            0 :                 placement.clone(),
    3817            0 :                 None,
    3818            0 :                 None,
    3819            0 :                 scheduling,
    3820            0 :             )
    3821            0 :             .await?;
    3822              : 
    3823            0 :         let mut schedule_context = ScheduleContext::default();
    3824            0 :         let mut locked = self.inner.write().unwrap();
    3825            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    3826            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3827            0 :             if let Some(placement) = &placement {
    3828            0 :                 shard.policy = placement.clone();
    3829              : 
    3830            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3831            0 :                                "Updated placement policy to {placement:?}");
    3832            0 :             }
    3833              : 
    3834            0 :             if let Some(scheduling) = &scheduling {
    3835            0 :                 shard.set_scheduling_policy(*scheduling);
    3836              : 
    3837            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3838            0 :                                "Updated scheduling policy to {scheduling:?}");
    3839            0 :             }
    3840              : 
    3841              :             // In case scheduling is being switched back on, try it now.
    3842            0 :             shard.schedule(scheduler, &mut schedule_context).ok();
    3843            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3844              :         }
    3845              : 
    3846            0 :         Ok(())
    3847            0 :     }
    3848              : 
    3849            0 :     pub(crate) async fn tenant_timeline_create_pageservers(
    3850            0 :         &self,
    3851            0 :         tenant_id: TenantId,
    3852            0 :         mut create_req: TimelineCreateRequest,
    3853            0 :     ) -> Result<TimelineInfo, ApiError> {
    3854            0 :         tracing::info!(
    3855            0 :             "Creating timeline {}/{}",
    3856              :             tenant_id,
    3857              :             create_req.new_timeline_id,
    3858              :         );
    3859              : 
    3860            0 :         self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    3861            0 :             if targets.0.is_empty() {
    3862            0 :                 return Err(ApiError::NotFound(
    3863            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3864            0 :                 ));
    3865            0 :             };
    3866              : 
    3867            0 :             let (shard_zero_tid, shard_zero_locations) =
    3868            0 :                 targets.0.pop_first().expect("Must have at least one shard");
    3869            0 :             assert!(shard_zero_tid.is_shard_zero());
    3870              : 
    3871            0 :             async fn create_one(
    3872            0 :                 tenant_shard_id: TenantShardId,
    3873            0 :                 locations: ShardMutationLocations,
    3874            0 :                 http_client: reqwest::Client,
    3875            0 :                 jwt: Option<String>,
    3876            0 :                 mut create_req: TimelineCreateRequest,
    3877            0 :             ) -> Result<TimelineInfo, ApiError> {
    3878            0 :                 let latest = locations.latest.node;
    3879              : 
    3880            0 :                 tracing::info!(
    3881            0 :                     "Creating timeline on shard {}/{}, attached to node {latest} in generation {:?}",
    3882              :                     tenant_shard_id,
    3883              :                     create_req.new_timeline_id,
    3884              :                     locations.latest.generation
    3885              :                 );
    3886              : 
    3887            0 :                 let client =
    3888            0 :                     PageserverClient::new(latest.get_id(), http_client.clone(), latest.base_url(), jwt.as_deref());
    3889              : 
    3890            0 :                 let timeline_info = client
    3891            0 :                     .timeline_create(tenant_shard_id, &create_req)
    3892            0 :                     .await
    3893            0 :                     .map_err(|e| passthrough_api_error(&latest, e))?;
    3894              : 
    3895              :                 // If we are going to create the timeline on some stale locations for shard 0, then ask them to re-use
    3896              :                 // the initdb generated by the latest location, rather than generating their own.  This avoids racing uploads
    3897              :                 // of initdb to S3 which might not be binary-identical if different pageservers have different postgres binaries.
    3898            0 :                 if tenant_shard_id.is_shard_zero() {
    3899            0 :                     if let models::TimelineCreateRequestMode::Bootstrap { existing_initdb_timeline_id, .. } = &mut create_req.mode {
    3900            0 :                         *existing_initdb_timeline_id = Some(create_req.new_timeline_id);
    3901            0 :                     }
    3902            0 :                 }
    3903              : 
    3904              :                 // We propagate timeline creations to all attached locations such that a compute
    3905              :                 // for the new timeline is able to start regardless of the current state of the
    3906              :                 // tenant shard reconciliation.
    3907            0 :                 for location in locations.other {
    3908            0 :                     tracing::info!(
    3909            0 :                         "Creating timeline on shard {}/{}, stale attached to node {} in generation {:?}",
    3910              :                         tenant_shard_id,
    3911              :                         create_req.new_timeline_id,
    3912              :                         location.node,
    3913              :                         location.generation
    3914              :                     );
    3915              : 
    3916            0 :                     let client = PageserverClient::new(
    3917            0 :                         location.node.get_id(),
    3918            0 :                         http_client.clone(),
    3919            0 :                         location.node.base_url(),
    3920            0 :                         jwt.as_deref(),
    3921              :                     );
    3922              : 
    3923            0 :                     let res = client
    3924            0 :                         .timeline_create(tenant_shard_id, &create_req)
    3925            0 :                         .await;
    3926              : 
    3927            0 :                     if let Err(e) = res {
    3928            0 :                         match e {
    3929            0 :                             mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _) => {
    3930            0 :                                 // Tenant might have been detached from the stale location,
    3931            0 :                                 // so ignore 404s.
    3932            0 :                             },
    3933              :                             _ => {
    3934            0 :                                 return Err(passthrough_api_error(&location.node, e));
    3935              :                             }
    3936              :                         }
    3937            0 :                     }
    3938              :                 }
    3939              : 
    3940            0 :                 Ok(timeline_info)
    3941            0 :             }
    3942              : 
    3943              :             // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
    3944              :             // use whatever LSN that shard picked when creating on subsequent shards.  We arbitrarily use shard zero as the shard
    3945              :             // that will get the first creation request, and propagate the LSN to all the >0 shards.
    3946              :             //
    3947              :             // This also enables non-zero shards to use the initdb that shard 0 generated and uploaded to S3, rather than
    3948              :             // independently generating their own initdb.  This guarantees that shards cannot end up with different initial
    3949              :             // states if e.g. they have different postgres binary versions.
    3950            0 :             let timeline_info = create_one(
    3951            0 :                 shard_zero_tid,
    3952            0 :                 shard_zero_locations,
    3953            0 :                 self.http_client.clone(),
    3954            0 :                 self.config.pageserver_jwt_token.clone(),
    3955            0 :                 create_req.clone(),
    3956            0 :             )
    3957            0 :             .await?;
    3958              : 
    3959              :             // Update the create request for shards >= 0
    3960            0 :             match &mut create_req.mode {
    3961            0 :                 models::TimelineCreateRequestMode::Branch { ancestor_start_lsn, .. } if ancestor_start_lsn.is_none() => {
    3962            0 :                     // Propagate the LSN that shard zero picked, if caller didn't provide one
    3963            0 :                     *ancestor_start_lsn = timeline_info.ancestor_lsn;
    3964            0 :                 },
    3965            0 :                 models::TimelineCreateRequestMode::Bootstrap { existing_initdb_timeline_id, .. } => {
    3966              :                     // For shards >= 0, do not run initdb: use the one that shard 0 uploaded to S3
    3967            0 :                     *existing_initdb_timeline_id = Some(create_req.new_timeline_id)
    3968              :                 }
    3969            0 :                 _ => {}
    3970              :             }
    3971              : 
    3972              :             // Create timeline on remaining shards with number >0
    3973            0 :             if !targets.0.is_empty() {
    3974              :                 // If we had multiple shards, issue requests for the remainder now.
    3975            0 :                 let jwt = &self.config.pageserver_jwt_token;
    3976            0 :                 self.tenant_for_shards(
    3977            0 :                     targets
    3978            0 :                         .0
    3979            0 :                         .iter()
    3980            0 :                         .map(|t| (*t.0, t.1.latest.node.clone()))
    3981            0 :                         .collect(),
    3982            0 :                     |tenant_shard_id: TenantShardId, _node: Node| {
    3983            0 :                         let create_req = create_req.clone();
    3984            0 :                         let mutation_locations = targets.0.remove(&tenant_shard_id).unwrap();
    3985            0 :                         Box::pin(create_one(
    3986            0 :                             tenant_shard_id,
    3987            0 :                             mutation_locations,
    3988            0 :                             self.http_client.clone(),
    3989            0 :                             jwt.clone(),
    3990            0 :                             create_req,
    3991            0 :                         ))
    3992            0 :                     },
    3993              :                 )
    3994            0 :                 .await?;
    3995            0 :             }
    3996              : 
    3997            0 :             Ok(timeline_info)
    3998            0 :         })
    3999            0 :         .await?
    4000            0 :     }
    4001              : 
    4002            0 :     pub(crate) async fn tenant_timeline_create(
    4003            0 :         self: &Arc<Self>,
    4004            0 :         tenant_id: TenantId,
    4005            0 :         create_req: TimelineCreateRequest,
    4006            0 :     ) -> Result<TimelineCreateResponseStorcon, ApiError> {
    4007            0 :         let safekeepers = self.config.timelines_onto_safekeepers;
    4008            0 :         let timeline_id = create_req.new_timeline_id;
    4009              : 
    4010            0 :         tracing::info!(
    4011            0 :             mode=%create_req.mode_tag(),
    4012              :             %safekeepers,
    4013            0 :             "Creating timeline {}/{}",
    4014              :             tenant_id,
    4015              :             timeline_id,
    4016              :         );
    4017              : 
    4018            0 :         let _tenant_lock = trace_shared_lock(
    4019            0 :             &self.tenant_op_locks,
    4020            0 :             tenant_id,
    4021            0 :             TenantOperations::TimelineCreate,
    4022            0 :         )
    4023            0 :         .await;
    4024            0 :         failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock");
    4025            0 :         let is_import = create_req.is_import();
    4026            0 :         let read_only = matches!(
    4027            0 :             create_req.mode,
    4028              :             models::TimelineCreateRequestMode::Branch {
    4029              :                 read_only: true,
    4030              :                 ..
    4031              :             }
    4032              :         );
    4033              : 
    4034            0 :         if is_import {
    4035              :             // Ensure that there is no split on-going.
    4036              :             // [`Self::tenant_shard_split`] holds the exclusive tenant lock
    4037              :             // for the duration of the split, but here we handle the case
    4038              :             // where we restarted and the split is being aborted.
    4039            0 :             let locked = self.inner.read().unwrap();
    4040            0 :             let splitting = locked
    4041            0 :                 .tenants
    4042            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4043            0 :                 .any(|(_id, shard)| shard.splitting != SplitState::Idle);
    4044              : 
    4045            0 :             if splitting {
    4046            0 :                 return Err(ApiError::Conflict("Tenant is splitting shard".to_string()));
    4047            0 :             }
    4048            0 :         }
    4049              : 
    4050            0 :         let timeline_info = self
    4051            0 :             .tenant_timeline_create_pageservers(tenant_id, create_req)
    4052            0 :             .await?;
    4053              : 
    4054            0 :         let selected_safekeepers = if is_import {
    4055            0 :             let shards = {
    4056            0 :                 let locked = self.inner.read().unwrap();
    4057            0 :                 locked
    4058            0 :                     .tenants
    4059            0 :                     .range(TenantShardId::tenant_range(tenant_id))
    4060            0 :                     .map(|(ts_id, _)| ts_id.to_index())
    4061            0 :                     .collect::<Vec<_>>()
    4062              :             };
    4063              : 
    4064            0 :             if !shards
    4065            0 :                 .iter()
    4066            0 :                 .map(|shard_index| shard_index.shard_count)
    4067            0 :                 .all_equal()
    4068              :             {
    4069            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4070            0 :                     "Inconsistent shard count"
    4071            0 :                 )));
    4072            0 :             }
    4073              : 
    4074            0 :             let import = TimelineImport {
    4075            0 :                 tenant_id,
    4076            0 :                 timeline_id,
    4077            0 :                 shard_statuses: ShardImportStatuses::new(shards),
    4078            0 :             };
    4079              : 
    4080            0 :             let inserted = self
    4081            0 :                 .persistence
    4082            0 :                 .insert_timeline_import(import.to_persistent())
    4083            0 :                 .await
    4084            0 :                 .context("timeline import insert")
    4085            0 :                 .map_err(ApiError::InternalServerError)?;
    4086              : 
    4087              :             // Set the importing flag on the tenant shards
    4088            0 :             self.inner
    4089            0 :                 .write()
    4090            0 :                 .unwrap()
    4091            0 :                 .tenants
    4092            0 :                 .range_mut(TenantShardId::tenant_range(tenant_id))
    4093            0 :                 .for_each(|(_id, shard)| shard.importing = TimelineImportState::Importing);
    4094              : 
    4095            0 :             match inserted {
    4096              :                 true => {
    4097            0 :                     tracing::info!(%tenant_id, %timeline_id, "Inserted timeline import");
    4098              :                 }
    4099              :                 false => {
    4100            0 :                     tracing::info!(%tenant_id, %timeline_id, "Timeline import entry already present");
    4101              :                 }
    4102              :             }
    4103              : 
    4104            0 :             None
    4105            0 :         } else if safekeepers || read_only {
    4106              :             // Note that for imported timelines, we do not create the timeline on the safekeepers
    4107              :             // straight away. Instead, we do it once the import finalized such that we know what
    4108              :             // start LSN to provide for the safekeepers. This is done in
    4109              :             // [`Self::finalize_timeline_import`].
    4110            0 :             let res = self
    4111            0 :                 .tenant_timeline_create_safekeepers(tenant_id, &timeline_info, read_only)
    4112            0 :                 .instrument(tracing::info_span!("timeline_create_safekeepers", %tenant_id, timeline_id=%timeline_info.timeline_id))
    4113            0 :                 .await?;
    4114            0 :             Some(res)
    4115              :         } else {
    4116            0 :             None
    4117              :         };
    4118              : 
    4119            0 :         Ok(TimelineCreateResponseStorcon {
    4120            0 :             timeline_info,
    4121            0 :             safekeepers: selected_safekeepers,
    4122            0 :         })
    4123            0 :     }
    4124              : 
    4125              :     #[instrument(skip_all, fields(
    4126              :         tenant_id=%req.tenant_shard_id.tenant_id,
    4127              :         shard_id=%req.tenant_shard_id.shard_slug(),
    4128              :         timeline_id=%req.timeline_id,
    4129              :     ))]
    4130              :     pub(crate) async fn handle_timeline_shard_import_progress(
    4131              :         self: &Arc<Self>,
    4132              :         req: TimelineImportStatusRequest,
    4133              :     ) -> Result<ShardImportStatus, ApiError> {
    4134              :         let validity = self
    4135              :             .validate_shard_generation(req.tenant_shard_id, req.generation)
    4136              :             .await?;
    4137              :         match validity {
    4138              :             ShardGenerationValidity::Valid => {
    4139              :                 // fallthrough
    4140              :             }
    4141              :             ShardGenerationValidity::Mismatched { claimed, actual } => {
    4142              :                 tracing::info!(
    4143              :                     claimed=?claimed.into(),
    4144            0 :                     actual=?actual.and_then(|g| g.into()),
    4145              :                     "Rejecting import progress fetch from stale generation"
    4146              :                 );
    4147              : 
    4148              :                 return Err(ApiError::BadRequest(anyhow::anyhow!("Invalid generation")));
    4149              :             }
    4150              :         }
    4151              : 
    4152              :         let maybe_import = self
    4153              :             .persistence
    4154              :             .get_timeline_import(req.tenant_shard_id.tenant_id, req.timeline_id)
    4155              :             .await?;
    4156              : 
    4157            0 :         let import = maybe_import.ok_or_else(|| {
    4158            0 :             ApiError::NotFound(
    4159            0 :                 format!(
    4160            0 :                     "import for {}/{} not found",
    4161            0 :                     req.tenant_shard_id.tenant_id, req.timeline_id
    4162            0 :                 )
    4163            0 :                 .into(),
    4164            0 :             )
    4165            0 :         })?;
    4166              : 
    4167              :         import
    4168              :             .shard_statuses
    4169              :             .0
    4170              :             .get(&req.tenant_shard_id.to_index())
    4171              :             .cloned()
    4172            0 :             .ok_or_else(|| {
    4173            0 :                 ApiError::NotFound(
    4174            0 :                     format!("shard {} not found", req.tenant_shard_id.shard_slug()).into(),
    4175            0 :                 )
    4176            0 :             })
    4177              :     }
    4178              : 
    4179              :     #[instrument(skip_all, fields(
    4180              :         tenant_id=%req.tenant_shard_id.tenant_id,
    4181              :         shard_id=%req.tenant_shard_id.shard_slug(),
    4182              :         timeline_id=%req.timeline_id,
    4183              :     ))]
    4184              :     pub(crate) async fn handle_timeline_shard_import_progress_upcall(
    4185              :         self: &Arc<Self>,
    4186              :         req: PutTimelineImportStatusRequest,
    4187              :     ) -> Result<(), ApiError> {
    4188              :         let validity = self
    4189              :             .validate_shard_generation(req.tenant_shard_id, req.generation)
    4190              :             .await?;
    4191              :         match validity {
    4192              :             ShardGenerationValidity::Valid => {
    4193              :                 // fallthrough
    4194              :             }
    4195              :             ShardGenerationValidity::Mismatched { claimed, actual } => {
    4196              :                 tracing::info!(
    4197              :                     claimed=?claimed.into(),
    4198            0 :                     actual=?actual.and_then(|g| g.into()),
    4199              :                     "Rejecting import progress update from stale generation"
    4200              :                 );
    4201              : 
    4202              :                 return Err(ApiError::PreconditionFailed("Invalid generation".into()));
    4203              :             }
    4204              :         }
    4205              : 
    4206              :         let res = self
    4207              :             .persistence
    4208              :             .update_timeline_import(req.tenant_shard_id, req.timeline_id, req.status)
    4209              :             .await;
    4210              :         let timeline_import = match res {
    4211              :             Ok(Ok(Some(timeline_import))) => timeline_import,
    4212              :             Ok(Ok(None)) => {
    4213              :                 // Idempotency: we've already seen and handled this update.
    4214              :                 return Ok(());
    4215              :             }
    4216              :             Ok(Err(logical_err)) => {
    4217              :                 return Err(logical_err.into());
    4218              :             }
    4219              :             Err(db_err) => {
    4220              :                 return Err(db_err.into());
    4221              :             }
    4222              :         };
    4223              : 
    4224              :         tracing::info!(
    4225              :             tenant_id=%req.tenant_shard_id.tenant_id,
    4226              :             timeline_id=%req.timeline_id,
    4227              :             shard_id=%req.tenant_shard_id.shard_slug(),
    4228              :             "Updated timeline import status to: {timeline_import:?}");
    4229              : 
    4230              :         if timeline_import.is_complete() {
    4231              :             tokio::task::spawn({
    4232              :                 let this = self.clone();
    4233            0 :                 async move { this.finalize_timeline_import(timeline_import).await }
    4234              :             });
    4235              :         }
    4236              : 
    4237              :         Ok(())
    4238              :     }
    4239              : 
    4240              :     /// Check that a provided generation for some tenant shard is the most recent one.
    4241              :     ///
    4242              :     /// Validate with the in-mem state first, and, if that passes, validate with the
    4243              :     /// database state which is authoritative.
    4244            0 :     async fn validate_shard_generation(
    4245            0 :         self: &Arc<Self>,
    4246            0 :         tenant_shard_id: TenantShardId,
    4247            0 :         generation: Generation,
    4248            0 :     ) -> Result<ShardGenerationValidity, ApiError> {
    4249              :         {
    4250            0 :             let locked = self.inner.read().unwrap();
    4251            0 :             let tenant_shard =
    4252            0 :                 locked
    4253            0 :                     .tenants
    4254            0 :                     .get(&tenant_shard_id)
    4255            0 :                     .ok_or(ApiError::InternalServerError(anyhow::anyhow!(
    4256            0 :                         "{} shard not found",
    4257            0 :                         tenant_shard_id
    4258            0 :                     )))?;
    4259              : 
    4260            0 :             if tenant_shard.generation != Some(generation) {
    4261            0 :                 return Ok(ShardGenerationValidity::Mismatched {
    4262            0 :                     claimed: generation,
    4263            0 :                     actual: tenant_shard.generation,
    4264            0 :                 });
    4265            0 :             }
    4266              :         }
    4267              : 
    4268            0 :         let mut db_generations = self
    4269            0 :             .persistence
    4270            0 :             .shard_generations(std::iter::once(&tenant_shard_id))
    4271            0 :             .await?;
    4272            0 :         let (_tid, db_generation) =
    4273            0 :             db_generations
    4274            0 :                 .pop()
    4275            0 :                 .ok_or(ApiError::InternalServerError(anyhow::anyhow!(
    4276            0 :                     "{} shard not found",
    4277            0 :                     tenant_shard_id
    4278            0 :                 )))?;
    4279              : 
    4280            0 :         if db_generation != Some(generation) {
    4281            0 :             return Ok(ShardGenerationValidity::Mismatched {
    4282            0 :                 claimed: generation,
    4283            0 :                 actual: db_generation,
    4284            0 :             });
    4285            0 :         }
    4286              : 
    4287            0 :         Ok(ShardGenerationValidity::Valid)
    4288            0 :     }
    4289              : 
    4290              :     /// Finalize the import of a timeline
    4291              :     ///
    4292              :     /// This method should be called once all shards have reported that the import is complete.
    4293              :     /// Firstly, it polls the post import timeline activation endpoint exposed by the pageserver.
    4294              :     /// Once the timeline is active on all shards, the timeline also gets created on the
    4295              :     /// safekeepers. Finally, notify cplane of the import completion (whether failed or
    4296              :     /// successful), and remove the import from the database and in-memory.
    4297              :     ///
    4298              :     /// If this method gets pre-empted by shut down, it will be called again at start-up (on-going
    4299              :     /// imports are stored in the database).
    4300              :     ///
    4301              :     /// # Cancel-Safety
    4302              :     /// Not cancel safe.
    4303              :     /// If the caller stops polling, the import will not be removed from
    4304              :     /// [`ServiceState::imports_finalizing`].
    4305              :     #[instrument(skip_all, fields(
    4306              :         tenant_id=%import.tenant_id,
    4307              :         timeline_id=%import.timeline_id,
    4308              :     ))]
    4309              : 
    4310              :     async fn finalize_timeline_import(
    4311              :         self: &Arc<Self>,
    4312              :         import: TimelineImport,
    4313              :     ) -> Result<(), TimelineImportFinalizeError> {
    4314              :         let tenant_timeline = (import.tenant_id, import.timeline_id);
    4315              : 
    4316              :         let (_finalize_import_guard, cancel) = {
    4317              :             let mut locked = self.inner.write().unwrap();
    4318              :             let gate = Gate::default();
    4319              :             let cancel = CancellationToken::default();
    4320              : 
    4321              :             let guard = gate.enter().unwrap();
    4322              : 
    4323              :             locked.imports_finalizing.insert(
    4324              :                 tenant_timeline,
    4325              :                 FinalizingImport {
    4326              :                     gate,
    4327              :                     cancel: cancel.clone(),
    4328              :                 },
    4329              :             );
    4330              : 
    4331              :             (guard, cancel)
    4332              :         };
    4333              : 
    4334              :         let res = tokio::select! {
    4335              :             res = self.finalize_timeline_import_impl(import) => {
    4336              :                 res
    4337              :             },
    4338              :             _ = cancel.cancelled() => {
    4339              :                 Err(TimelineImportFinalizeError::Cancelled)
    4340              :             }
    4341              :         };
    4342              : 
    4343              :         let mut locked = self.inner.write().unwrap();
    4344              :         locked.imports_finalizing.remove(&tenant_timeline);
    4345              : 
    4346              :         res
    4347              :     }
    4348              : 
    4349            0 :     async fn finalize_timeline_import_impl(
    4350            0 :         self: &Arc<Self>,
    4351            0 :         import: TimelineImport,
    4352            0 :     ) -> Result<(), TimelineImportFinalizeError> {
    4353            0 :         tracing::info!("Finalizing timeline import");
    4354              : 
    4355            0 :         pausable_failpoint!("timeline-import-pre-cplane-notification");
    4356              : 
    4357            0 :         let tenant_id = import.tenant_id;
    4358            0 :         let timeline_id = import.timeline_id;
    4359              : 
    4360            0 :         let import_error = import.completion_error();
    4361            0 :         match import_error {
    4362            0 :             Some(err) => {
    4363            0 :                 self.notify_cplane_and_delete_import(tenant_id, timeline_id, Err(err))
    4364            0 :                     .await?;
    4365            0 :                 tracing::warn!("Timeline import completed with shard errors");
    4366            0 :                 Ok(())
    4367              :             }
    4368            0 :             None => match self.activate_timeline_post_import(&import).await {
    4369            0 :                 Ok(timeline_info) => {
    4370            0 :                     tracing::info!("Post import timeline activation complete");
    4371              : 
    4372            0 :                     if self.config.timelines_onto_safekeepers {
    4373              :                         // Now that we know the start LSN of this timeline, create it on the
    4374              :                         // safekeepers.
    4375            0 :                         self.tenant_timeline_create_safekeepers_until_success(
    4376            0 :                             import.tenant_id,
    4377            0 :                             timeline_info,
    4378            0 :                         )
    4379            0 :                         .await?;
    4380            0 :                     }
    4381              : 
    4382            0 :                     self.notify_cplane_and_delete_import(tenant_id, timeline_id, Ok(()))
    4383            0 :                         .await?;
    4384              : 
    4385            0 :                     tracing::info!("Timeline import completed successfully");
    4386            0 :                     Ok(())
    4387              :                 }
    4388              :                 Err(TimelineImportFinalizeError::ShuttingDown) => {
    4389              :                     // We got pre-empted by shut down and will resume after the restart.
    4390            0 :                     Err(TimelineImportFinalizeError::ShuttingDown)
    4391              :                 }
    4392            0 :                 Err(err) => {
    4393              :                     // Any finalize error apart from shut down is permanent and requires us to notify
    4394              :                     // cplane such that it can clean up.
    4395            0 :                     tracing::error!("Import finalize failed with permanent error: {err}");
    4396            0 :                     self.notify_cplane_and_delete_import(
    4397            0 :                         tenant_id,
    4398            0 :                         timeline_id,
    4399            0 :                         Err(err.to_string()),
    4400            0 :                     )
    4401            0 :                     .await?;
    4402            0 :                     Err(err)
    4403              :                 }
    4404              :             },
    4405              :         }
    4406            0 :     }
    4407              : 
    4408            0 :     async fn notify_cplane_and_delete_import(
    4409            0 :         self: &Arc<Self>,
    4410            0 :         tenant_id: TenantId,
    4411            0 :         timeline_id: TimelineId,
    4412            0 :         import_result: ImportResult,
    4413            0 :     ) -> Result<(), TimelineImportFinalizeError> {
    4414            0 :         let import_failed = import_result.is_err();
    4415            0 :         tracing::info!(%import_failed, "Notifying cplane of import completion");
    4416              : 
    4417            0 :         let client = UpcallClient::new(self.get_config(), self.cancel.child_token());
    4418            0 :         client
    4419            0 :             .notify_import_complete(tenant_id, timeline_id, import_result)
    4420            0 :             .await
    4421            0 :             .map_err(|_err| TimelineImportFinalizeError::ShuttingDown)?;
    4422              : 
    4423            0 :         if let Err(err) = self
    4424            0 :             .persistence
    4425            0 :             .delete_timeline_import(tenant_id, timeline_id)
    4426            0 :             .await
    4427              :         {
    4428            0 :             tracing::warn!("Failed to delete timeline import entry from database: {err}");
    4429            0 :         }
    4430              : 
    4431            0 :         self.inner
    4432            0 :             .write()
    4433            0 :             .unwrap()
    4434            0 :             .tenants
    4435            0 :             .range_mut(TenantShardId::tenant_range(tenant_id))
    4436            0 :             .for_each(|(_id, shard)| shard.importing = TimelineImportState::Idle);
    4437              : 
    4438            0 :         Ok(())
    4439            0 :     }
    4440              : 
    4441              :     /// Activate an imported timeline on all shards once the import is complete.
    4442              :     /// Returns the [`TimelineInfo`] reported by shard zero.
    4443            0 :     async fn activate_timeline_post_import(
    4444            0 :         self: &Arc<Self>,
    4445            0 :         import: &TimelineImport,
    4446            0 :     ) -> Result<TimelineInfo, TimelineImportFinalizeError> {
    4447              :         const TIMELINE_ACTIVATE_TIMEOUT: Duration = Duration::from_millis(128);
    4448              : 
    4449            0 :         let mut shards_to_activate: HashSet<ShardIndex> =
    4450            0 :             import.shard_statuses.0.keys().cloned().collect();
    4451            0 :         let mut shard_zero_timeline_info = None;
    4452              : 
    4453            0 :         while !shards_to_activate.is_empty() {
    4454            0 :             if self.cancel.is_cancelled() {
    4455            0 :                 return Err(TimelineImportFinalizeError::ShuttingDown);
    4456            0 :             }
    4457              : 
    4458            0 :             let targets = {
    4459            0 :                 let locked = self.inner.read().unwrap();
    4460            0 :                 let mut targets = Vec::new();
    4461              : 
    4462            0 :                 for (tenant_shard_id, shard) in locked
    4463            0 :                     .tenants
    4464            0 :                     .range(TenantShardId::tenant_range(import.tenant_id))
    4465              :                 {
    4466            0 :                     if !import
    4467            0 :                         .shard_statuses
    4468            0 :                         .0
    4469            0 :                         .contains_key(&tenant_shard_id.to_index())
    4470              :                     {
    4471            0 :                         return Err(TimelineImportFinalizeError::MismatchedShards(
    4472            0 :                             tenant_shard_id.to_index(),
    4473            0 :                         ));
    4474            0 :                     }
    4475              : 
    4476            0 :                     if let Some(node_id) = shard.intent.get_attached() {
    4477            0 :                         let node = locked
    4478            0 :                             .nodes
    4479            0 :                             .get(node_id)
    4480            0 :                             .expect("Pageservers may not be deleted while referenced");
    4481            0 :                         targets.push((*tenant_shard_id, node.clone()));
    4482            0 :                     }
    4483              :                 }
    4484              : 
    4485            0 :                 targets
    4486              :             };
    4487              : 
    4488            0 :             let targeted_tenant_shards: Vec<_> = targets.iter().map(|(tid, _node)| *tid).collect();
    4489              : 
    4490            0 :             let results = self
    4491            0 :                 .tenant_for_shards_api(
    4492            0 :                     targets,
    4493            0 :                     |tenant_shard_id, client| async move {
    4494            0 :                         client
    4495            0 :                             .activate_post_import(
    4496            0 :                                 tenant_shard_id,
    4497            0 :                                 import.timeline_id,
    4498            0 :                                 TIMELINE_ACTIVATE_TIMEOUT,
    4499            0 :                             )
    4500            0 :                             .await
    4501            0 :                     },
    4502              :                     1,
    4503              :                     1,
    4504              :                     SHORT_RECONCILE_TIMEOUT,
    4505            0 :                     &self.cancel,
    4506              :                 )
    4507            0 :                 .await;
    4508              : 
    4509            0 :             let mut failed = 0;
    4510            0 :             for (tid, (_, result)) in targeted_tenant_shards.iter().zip(results.into_iter()) {
    4511            0 :                 match result {
    4512            0 :                     Ok(ok) => {
    4513            0 :                         if tid.is_shard_zero() {
    4514            0 :                             shard_zero_timeline_info = Some(ok);
    4515            0 :                         }
    4516              : 
    4517            0 :                         shards_to_activate.remove(&tid.to_index());
    4518              :                     }
    4519            0 :                     Err(_err) => {
    4520            0 :                         failed += 1;
    4521            0 :                     }
    4522              :                 }
    4523              :             }
    4524              : 
    4525            0 :             if failed > 0 {
    4526            0 :                 tracing::info!(
    4527            0 :                     "Failed to activate timeline on {failed} shards post import. Will retry"
    4528              :                 );
    4529            0 :             }
    4530              : 
    4531            0 :             tokio::select! {
    4532            0 :                 _ = tokio::time::sleep(Duration::from_millis(250)) => {},
    4533            0 :                 _ = self.cancel.cancelled() => {
    4534            0 :                     return Err(TimelineImportFinalizeError::ShuttingDown);
    4535              :                 }
    4536              :             }
    4537              :         }
    4538              : 
    4539            0 :         Ok(shard_zero_timeline_info.expect("All shards replied"))
    4540            0 :     }
    4541              : 
    4542            0 :     async fn finalize_timeline_imports(self: &Arc<Self>, imports: Vec<TimelineImport>) {
    4543            0 :         futures::future::join_all(
    4544            0 :             imports
    4545            0 :                 .into_iter()
    4546            0 :                 .map(|import| self.finalize_timeline_import(import)),
    4547              :         )
    4548            0 :         .await;
    4549            0 :     }
    4550              : 
    4551              :     /// Delete a timeline import if it exists
    4552              :     ///
    4553              :     /// Firstly, delete the entry from the database. Any updates
    4554              :     /// from pageservers after the update will fail with a 404, so the
    4555              :     /// import cannot progress into finalizing state if it's not there already.
    4556              :     /// Secondly, cancel the finalization if one is in progress.
    4557            0 :     pub(crate) async fn maybe_delete_timeline_import(
    4558            0 :         self: &Arc<Self>,
    4559            0 :         tenant_id: TenantId,
    4560            0 :         timeline_id: TimelineId,
    4561            0 :     ) -> Result<(), DatabaseError> {
    4562            0 :         let tenant_has_ongoing_import = {
    4563            0 :             let locked = self.inner.read().unwrap();
    4564            0 :             locked
    4565            0 :                 .tenants
    4566            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4567            0 :                 .any(|(_tid, shard)| shard.importing == TimelineImportState::Importing)
    4568              :         };
    4569              : 
    4570            0 :         if !tenant_has_ongoing_import {
    4571            0 :             return Ok(());
    4572            0 :         }
    4573              : 
    4574            0 :         self.persistence
    4575            0 :             .delete_timeline_import(tenant_id, timeline_id)
    4576            0 :             .await?;
    4577              : 
    4578            0 :         let maybe_finalizing = {
    4579            0 :             let mut locked = self.inner.write().unwrap();
    4580            0 :             locked.imports_finalizing.remove(&(tenant_id, timeline_id))
    4581              :         };
    4582              : 
    4583            0 :         if let Some(finalizing) = maybe_finalizing {
    4584            0 :             finalizing.cancel.cancel();
    4585            0 :             finalizing.gate.close().await;
    4586            0 :         }
    4587              : 
    4588            0 :         Ok(())
    4589            0 :     }
    4590              : 
    4591            0 :     pub(crate) async fn tenant_timeline_archival_config(
    4592            0 :         &self,
    4593            0 :         tenant_id: TenantId,
    4594            0 :         timeline_id: TimelineId,
    4595            0 :         req: TimelineArchivalConfigRequest,
    4596            0 :     ) -> Result<(), ApiError> {
    4597            0 :         tracing::info!(
    4598            0 :             "Setting archival config of timeline {tenant_id}/{timeline_id} to '{:?}'",
    4599              :             req.state
    4600              :         );
    4601              : 
    4602            0 :         let _tenant_lock = trace_shared_lock(
    4603            0 :             &self.tenant_op_locks,
    4604            0 :             tenant_id,
    4605            0 :             TenantOperations::TimelineArchivalConfig,
    4606            0 :         )
    4607            0 :         .await;
    4608              : 
    4609            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    4610            0 :             if targets.0.is_empty() {
    4611            0 :                 return Err(ApiError::NotFound(
    4612            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4613            0 :                 ));
    4614            0 :             }
    4615            0 :             async fn config_one(
    4616            0 :                 tenant_shard_id: TenantShardId,
    4617            0 :                 timeline_id: TimelineId,
    4618            0 :                 node: Node,
    4619            0 :                 http_client: reqwest::Client,
    4620            0 :                 jwt: Option<String>,
    4621            0 :                 req: TimelineArchivalConfigRequest,
    4622            0 :             ) -> Result<(), ApiError> {
    4623            0 :                 tracing::info!(
    4624            0 :                     "Setting archival config of timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    4625              :                 );
    4626              : 
    4627            0 :                 let client = PageserverClient::new(node.get_id(),  http_client, node.base_url(), jwt.as_deref());
    4628              : 
    4629            0 :                 client
    4630            0 :                     .timeline_archival_config(tenant_shard_id, timeline_id, &req)
    4631            0 :                     .await
    4632            0 :                     .map_err(|e| match e {
    4633            0 :                         mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg) => {
    4634            0 :                             ApiError::PreconditionFailed(msg.into_boxed_str())
    4635              :                         }
    4636            0 :                         _ => passthrough_api_error(&node, e),
    4637            0 :                     })
    4638            0 :             }
    4639              : 
    4640              :             // no shard needs to go first/last; the operation should be idempotent
    4641              :             // TODO: it would be great to ensure that all shards return the same error
    4642            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    4643            0 :             let results = self
    4644            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    4645            0 :                     futures::FutureExt::boxed(config_one(
    4646            0 :                         tenant_shard_id,
    4647            0 :                         timeline_id,
    4648            0 :                         node,
    4649            0 :                         self.http_client.clone(),
    4650            0 :                         self.config.pageserver_jwt_token.clone(),
    4651            0 :                         req.clone(),
    4652            0 :                     ))
    4653            0 :                 })
    4654            0 :                 .await?;
    4655            0 :             assert!(!results.is_empty(), "must have at least one result");
    4656              : 
    4657            0 :             Ok(())
    4658            0 :         }).await?
    4659            0 :     }
    4660              : 
    4661            0 :     pub(crate) async fn tenant_timeline_detach_ancestor(
    4662            0 :         &self,
    4663            0 :         tenant_id: TenantId,
    4664            0 :         timeline_id: TimelineId,
    4665            0 :         behavior: Option<DetachBehavior>,
    4666            0 :     ) -> Result<models::detach_ancestor::AncestorDetached, ApiError> {
    4667            0 :         tracing::info!("Detaching timeline {tenant_id}/{timeline_id}",);
    4668              : 
    4669            0 :         let _tenant_lock = trace_shared_lock(
    4670            0 :             &self.tenant_op_locks,
    4671            0 :             tenant_id,
    4672            0 :             TenantOperations::TimelineDetachAncestor,
    4673            0 :         )
    4674            0 :         .await;
    4675              : 
    4676            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    4677            0 :             if targets.0.is_empty() {
    4678            0 :                 return Err(ApiError::NotFound(
    4679            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4680            0 :                 ));
    4681            0 :             }
    4682              : 
    4683            0 :             async fn detach_one(
    4684            0 :                 tenant_shard_id: TenantShardId,
    4685            0 :                 timeline_id: TimelineId,
    4686            0 :                 node: Node,
    4687            0 :                 http_client: reqwest::Client,
    4688            0 :                 jwt: Option<String>,
    4689            0 :                 behavior: Option<DetachBehavior>,
    4690            0 :             ) -> Result<(ShardNumber, models::detach_ancestor::AncestorDetached), ApiError> {
    4691            0 :                 tracing::info!(
    4692            0 :                     "Detaching timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    4693              :                 );
    4694              : 
    4695            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    4696              : 
    4697            0 :                 client
    4698            0 :                     .timeline_detach_ancestor(tenant_shard_id, timeline_id, behavior)
    4699            0 :                     .await
    4700            0 :                     .map_err(|e| {
    4701              :                         use mgmt_api::Error;
    4702              : 
    4703            0 :                         match e {
    4704              :                             // no ancestor (ever)
    4705            0 :                             Error::ApiError(StatusCode::CONFLICT, msg) => ApiError::Conflict(format!(
    4706            0 :                                 "{node}: {}",
    4707            0 :                                 msg.strip_prefix("Conflict: ").unwrap_or(&msg)
    4708            0 :                             )),
    4709              :                             // too many ancestors
    4710            0 :                             Error::ApiError(StatusCode::BAD_REQUEST, msg) => {
    4711            0 :                                 ApiError::BadRequest(anyhow::anyhow!("{node}: {msg}"))
    4712              :                             }
    4713            0 :                             Error::ApiError(StatusCode::INTERNAL_SERVER_ERROR, msg) => {
    4714              :                                 // avoid turning these into conflicts to remain compatible with
    4715              :                                 // pageservers, 500 errors are sadly retryable with timeline ancestor
    4716              :                                 // detach
    4717            0 :                                 ApiError::InternalServerError(anyhow::anyhow!("{node}: {msg}"))
    4718              :                             }
    4719              :                             // rest can be mapped as usual
    4720            0 :                             other => passthrough_api_error(&node, other),
    4721              :                         }
    4722            0 :                     })
    4723            0 :                     .map(|res| (tenant_shard_id.shard_number, res))
    4724            0 :             }
    4725              : 
    4726              :             // no shard needs to go first/last; the operation should be idempotent
    4727            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    4728            0 :             let mut results = self
    4729            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    4730            0 :                     futures::FutureExt::boxed(detach_one(
    4731            0 :                         tenant_shard_id,
    4732            0 :                         timeline_id,
    4733            0 :                         node,
    4734            0 :                         self.http_client.clone(),
    4735            0 :                         self.config.pageserver_jwt_token.clone(),
    4736            0 :                         behavior,
    4737            0 :                     ))
    4738            0 :                 })
    4739            0 :                 .await?;
    4740              : 
    4741            0 :             let any = results.pop().expect("we must have at least one response");
    4742              : 
    4743            0 :             let mismatching = results
    4744            0 :                 .iter()
    4745            0 :                 .filter(|(_, res)| res != &any.1)
    4746            0 :                 .collect::<Vec<_>>();
    4747            0 :             if !mismatching.is_empty() {
    4748              :                 // this can be hit by races which should not happen because operation lock on cplane
    4749            0 :                 let matching = results.len() - mismatching.len();
    4750            0 :                 tracing::error!(
    4751              :                     matching,
    4752              :                     compared_against=?any,
    4753              :                     ?mismatching,
    4754            0 :                     "shards returned different results"
    4755              :                 );
    4756              : 
    4757            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!("pageservers returned mixed results for ancestor detach; manual intervention is required.")));
    4758            0 :             }
    4759              : 
    4760            0 :             Ok(any.1)
    4761            0 :         }).await?
    4762            0 :     }
    4763              : 
    4764            0 :     pub(crate) async fn tenant_timeline_block_unblock_gc(
    4765            0 :         &self,
    4766            0 :         tenant_id: TenantId,
    4767            0 :         timeline_id: TimelineId,
    4768            0 :         dir: BlockUnblock,
    4769            0 :     ) -> Result<(), ApiError> {
    4770            0 :         let _tenant_lock = trace_shared_lock(
    4771            0 :             &self.tenant_op_locks,
    4772            0 :             tenant_id,
    4773            0 :             TenantOperations::TimelineGcBlockUnblock,
    4774            0 :         )
    4775            0 :         .await;
    4776              : 
    4777            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    4778            0 :             if targets.0.is_empty() {
    4779            0 :                 return Err(ApiError::NotFound(
    4780            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4781            0 :                 ));
    4782            0 :             }
    4783              : 
    4784            0 :             async fn do_one(
    4785            0 :                 tenant_shard_id: TenantShardId,
    4786            0 :                 timeline_id: TimelineId,
    4787            0 :                 node: Node,
    4788            0 :                 http_client: reqwest::Client,
    4789            0 :                 jwt: Option<String>,
    4790            0 :                 dir: BlockUnblock,
    4791            0 :             ) -> Result<(), ApiError> {
    4792            0 :                 let client = PageserverClient::new(
    4793            0 :                     node.get_id(),
    4794            0 :                     http_client,
    4795            0 :                     node.base_url(),
    4796            0 :                     jwt.as_deref(),
    4797              :                 );
    4798              : 
    4799            0 :                 client
    4800            0 :                     .timeline_block_unblock_gc(tenant_shard_id, timeline_id, dir)
    4801            0 :                     .await
    4802            0 :                     .map_err(|e| passthrough_api_error(&node, e))
    4803            0 :             }
    4804              : 
    4805              :             // no shard needs to go first/last; the operation should be idempotent
    4806            0 :             let locations = targets
    4807            0 :                 .0
    4808            0 :                 .iter()
    4809            0 :                 .map(|t| (*t.0, t.1.latest.node.clone()))
    4810            0 :                 .collect();
    4811            0 :             self.tenant_for_shards(locations, |tenant_shard_id, node| {
    4812            0 :                 futures::FutureExt::boxed(do_one(
    4813            0 :                     tenant_shard_id,
    4814            0 :                     timeline_id,
    4815            0 :                     node,
    4816            0 :                     self.http_client.clone(),
    4817            0 :                     self.config.pageserver_jwt_token.clone(),
    4818            0 :                     dir,
    4819            0 :                 ))
    4820            0 :             })
    4821            0 :             .await
    4822            0 :         })
    4823            0 :         .await??;
    4824            0 :         Ok(())
    4825            0 :     }
    4826              : 
    4827            0 :     pub(crate) fn is_tenant_not_found_error(body: &str, tenant_id: TenantId) -> bool {
    4828            0 :         body.contains(&format!("tenant {tenant_id}"))
    4829            0 :     }
    4830              : 
    4831            0 :     fn process_result_and_passthrough_errors<T>(
    4832            0 :         &self,
    4833            0 :         tenant_id: TenantId,
    4834            0 :         results: Vec<(Node, Result<T, mgmt_api::Error>)>,
    4835            0 :     ) -> Result<Vec<(Node, T)>, ApiError> {
    4836            0 :         let mut processed_results: Vec<(Node, T)> = Vec::with_capacity(results.len());
    4837            0 :         for (node, res) in results {
    4838            0 :             match res {
    4839            0 :                 Ok(res) => processed_results.push((node, res)),
    4840            0 :                 Err(mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, body))
    4841            0 :                     if Self::is_tenant_not_found_error(&body, tenant_id) =>
    4842              :                 {
    4843              :                     // If there's a tenant not found, we are still in the process of attaching the tenant.
    4844              :                     // Return 503 so that the client can retry.
    4845            0 :                     return Err(ApiError::ResourceUnavailable(
    4846            0 :                         format!(
    4847            0 :                             "Timeline is not attached to the pageserver {} yet, please retry",
    4848            0 :                             node.get_id()
    4849            0 :                         )
    4850            0 :                         .into(),
    4851            0 :                     ));
    4852              :                 }
    4853            0 :                 Err(e) => return Err(passthrough_api_error(&node, e)),
    4854              :             }
    4855              :         }
    4856            0 :         Ok(processed_results)
    4857            0 :     }
    4858              : 
    4859            0 :     pub(crate) async fn tenant_timeline_lsn_lease(
    4860            0 :         &self,
    4861            0 :         tenant_id: TenantId,
    4862            0 :         timeline_id: TimelineId,
    4863            0 :         lsn: Lsn,
    4864            0 :     ) -> Result<LsnLease, ApiError> {
    4865            0 :         let _tenant_lock = trace_shared_lock(
    4866            0 :             &self.tenant_op_locks,
    4867            0 :             tenant_id,
    4868            0 :             TenantOperations::TimelineLsnLease,
    4869            0 :         )
    4870            0 :         .await;
    4871              : 
    4872            0 :         self.tenant_remote_mutation(tenant_id, |locations| async move {
    4873            0 :             if locations.0.is_empty() {
    4874            0 :                 return Err(ApiError::NotFound(
    4875            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4876            0 :                 ));
    4877            0 :             }
    4878              : 
    4879            0 :             let results = self
    4880            0 :                 .tenant_for_shards_api(
    4881            0 :                     locations
    4882            0 :                         .0
    4883            0 :                         .iter()
    4884            0 :                         .map(|(tenant_shard_id, ShardMutationLocations { latest, .. })| {
    4885            0 :                             (*tenant_shard_id, latest.node.clone())
    4886            0 :                         })
    4887            0 :                         .collect(),
    4888            0 :                     |tenant_shard_id, client| async move {
    4889            0 :                         client
    4890            0 :                             .timeline_lease_lsn(tenant_shard_id, timeline_id, lsn)
    4891            0 :                             .await
    4892            0 :                     },
    4893              :                     1,
    4894              :                     1,
    4895              :                     SHORT_RECONCILE_TIMEOUT,
    4896            0 :                     &self.cancel,
    4897              :                 )
    4898            0 :                 .await;
    4899              : 
    4900            0 :             let leases = self.process_result_and_passthrough_errors(tenant_id, results)?;
    4901            0 :             let mut valid_until = None;
    4902            0 :             for (_, lease) in leases {
    4903            0 :                 if let Some(ref mut valid_until) = valid_until {
    4904            0 :                     *valid_until = std::cmp::min(*valid_until, lease.valid_until);
    4905            0 :                 } else {
    4906            0 :                     valid_until = Some(lease.valid_until);
    4907            0 :                 }
    4908              :             }
    4909            0 :             Ok(LsnLease {
    4910            0 :                 valid_until: valid_until.unwrap_or_else(SystemTime::now),
    4911            0 :             })
    4912            0 :         })
    4913            0 :         .await?
    4914            0 :     }
    4915              : 
    4916            0 :     pub(crate) async fn tenant_timeline_download_heatmap_layers(
    4917            0 :         &self,
    4918            0 :         tenant_shard_id: TenantShardId,
    4919            0 :         timeline_id: TimelineId,
    4920            0 :         concurrency: Option<usize>,
    4921            0 :         recurse: bool,
    4922            0 :     ) -> Result<(), ApiError> {
    4923            0 :         let _tenant_lock = trace_shared_lock(
    4924            0 :             &self.tenant_op_locks,
    4925            0 :             tenant_shard_id.tenant_id,
    4926            0 :             TenantOperations::DownloadHeatmapLayers,
    4927            0 :         )
    4928            0 :         .await;
    4929              : 
    4930            0 :         let targets = {
    4931            0 :             let locked = self.inner.read().unwrap();
    4932            0 :             let mut targets = Vec::new();
    4933              : 
    4934              :             // If the request got an unsharded tenant id, then apply
    4935              :             // the operation to all shards. Otherwise, apply it to a specific shard.
    4936            0 :             let shards_range = if tenant_shard_id.is_unsharded() {
    4937            0 :                 TenantShardId::tenant_range(tenant_shard_id.tenant_id)
    4938              :             } else {
    4939            0 :                 tenant_shard_id.range()
    4940              :             };
    4941              : 
    4942            0 :             for (tenant_shard_id, shard) in locked.tenants.range(shards_range) {
    4943            0 :                 if let Some(node_id) = shard.intent.get_attached() {
    4944            0 :                     let node = locked
    4945            0 :                         .nodes
    4946            0 :                         .get(node_id)
    4947            0 :                         .expect("Pageservers may not be deleted while referenced");
    4948            0 : 
    4949            0 :                     targets.push((*tenant_shard_id, node.clone()));
    4950            0 :                 }
    4951              :             }
    4952            0 :             targets
    4953              :         };
    4954              : 
    4955            0 :         self.tenant_for_shards_api(
    4956            0 :             targets,
    4957            0 :             |tenant_shard_id, client| async move {
    4958            0 :                 client
    4959            0 :                     .timeline_download_heatmap_layers(
    4960            0 :                         tenant_shard_id,
    4961            0 :                         timeline_id,
    4962            0 :                         concurrency,
    4963            0 :                         recurse,
    4964            0 :                     )
    4965            0 :                     .await
    4966            0 :             },
    4967              :             1,
    4968              :             1,
    4969              :             SHORT_RECONCILE_TIMEOUT,
    4970            0 :             &self.cancel,
    4971              :         )
    4972            0 :         .await;
    4973              : 
    4974            0 :         Ok(())
    4975            0 :     }
    4976              : 
    4977              :     /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
    4978              :     ///
    4979              :     /// On success, the returned vector contains exactly the same number of elements as the input `locations`
    4980              :     /// and returned element at index `i` is the result for `req_fn(op(locations[i])`.
    4981            0 :     async fn tenant_for_shards<F, R>(
    4982            0 :         &self,
    4983            0 :         locations: Vec<(TenantShardId, Node)>,
    4984            0 :         mut req_fn: F,
    4985            0 :     ) -> Result<Vec<R>, ApiError>
    4986            0 :     where
    4987            0 :         F: FnMut(
    4988            0 :             TenantShardId,
    4989            0 :             Node,
    4990            0 :         )
    4991            0 :             -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
    4992            0 :     {
    4993            0 :         let mut futs = FuturesUnordered::new();
    4994            0 :         let mut results = Vec::with_capacity(locations.len());
    4995              : 
    4996            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    4997            0 :             let fut = req_fn(tenant_shard_id, node);
    4998            0 :             futs.push(async move { (idx, fut.await) });
    4999              :         }
    5000              : 
    5001            0 :         while let Some((idx, r)) = futs.next().await {
    5002            0 :             results.push((idx, r?));
    5003              :         }
    5004              : 
    5005            0 :         results.sort_by_key(|(idx, _)| *idx);
    5006            0 :         Ok(results.into_iter().map(|(_, r)| r).collect())
    5007            0 :     }
    5008              : 
    5009              :     /// Concurrently invoke a pageserver API call on many shards at once.
    5010              :     ///
    5011              :     /// The returned Vec has the same length as the `locations` Vec,
    5012              :     /// and returned element at index `i` is the result for `op(locations[i])`.
    5013            0 :     pub(crate) async fn tenant_for_shards_api<T, O, F>(
    5014            0 :         &self,
    5015            0 :         locations: Vec<(TenantShardId, Node)>,
    5016            0 :         op: O,
    5017            0 :         warn_threshold: u32,
    5018            0 :         max_retries: u32,
    5019            0 :         timeout: Duration,
    5020            0 :         cancel: &CancellationToken,
    5021            0 :     ) -> Vec<(Node, mgmt_api::Result<T>)>
    5022            0 :     where
    5023            0 :         O: Fn(TenantShardId, PageserverClient) -> F + Copy,
    5024            0 :         F: std::future::Future<Output = mgmt_api::Result<T>>,
    5025            0 :     {
    5026            0 :         let mut futs = FuturesUnordered::new();
    5027            0 :         let mut results = Vec::with_capacity(locations.len());
    5028              : 
    5029            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    5030            0 :             futs.push(async move {
    5031            0 :                 let r = node
    5032            0 :                     .with_client_retries(
    5033            0 :                         |client| op(tenant_shard_id, client),
    5034            0 :                         &self.http_client,
    5035            0 :                         &self.config.pageserver_jwt_token,
    5036            0 :                         warn_threshold,
    5037            0 :                         max_retries,
    5038            0 :                         timeout,
    5039            0 :                         cancel,
    5040              :                     )
    5041            0 :                     .await;
    5042            0 :                 (idx, node, r)
    5043            0 :             });
    5044              :         }
    5045              : 
    5046            0 :         while let Some((idx, node, r)) = futs.next().await {
    5047            0 :             results.push((idx, node, r.unwrap_or(Err(mgmt_api::Error::Cancelled))));
    5048            0 :         }
    5049              : 
    5050            0 :         results.sort_by_key(|(idx, _, _)| *idx);
    5051            0 :         results.into_iter().map(|(_, node, r)| (node, r)).collect()
    5052            0 :     }
    5053              : 
    5054              :     /// Helper for safely working with the shards in a tenant remotely on pageservers, for example
    5055              :     /// when creating and deleting timelines:
    5056              :     /// - Makes sure shards are attached somewhere if they weren't already
    5057              :     /// - Looks up the shards and the nodes where they were most recently attached
    5058              :     /// - Guarantees that after the inner function returns, the shards' generations haven't moved on: this
    5059              :     ///   ensures that the remote operation acted on the most recent generation, and is therefore durable.
    5060            0 :     pub(crate) async fn tenant_remote_mutation<R, O, F>(
    5061            0 :         &self,
    5062            0 :         tenant_id: TenantId,
    5063            0 :         op: O,
    5064            0 :     ) -> Result<R, ApiError>
    5065            0 :     where
    5066            0 :         O: FnOnce(TenantMutationLocations) -> F,
    5067            0 :         F: std::future::Future<Output = R>,
    5068            0 :     {
    5069            0 :         self.tenant_remote_mutation_inner(TenantIdOrShardId::TenantId(tenant_id), op)
    5070            0 :             .await
    5071            0 :     }
    5072              : 
    5073            0 :     pub(crate) async fn tenant_shard_remote_mutation<R, O, F>(
    5074            0 :         &self,
    5075            0 :         tenant_shard_id: TenantShardId,
    5076            0 :         op: O,
    5077            0 :     ) -> Result<R, ApiError>
    5078            0 :     where
    5079            0 :         O: FnOnce(TenantMutationLocations) -> F,
    5080            0 :         F: std::future::Future<Output = R>,
    5081            0 :     {
    5082            0 :         self.tenant_remote_mutation_inner(TenantIdOrShardId::TenantShardId(tenant_shard_id), op)
    5083            0 :             .await
    5084            0 :     }
    5085              : 
    5086            0 :     async fn tenant_remote_mutation_inner<R, O, F>(
    5087            0 :         &self,
    5088            0 :         tenant_id_or_shard_id: TenantIdOrShardId,
    5089            0 :         op: O,
    5090            0 :     ) -> Result<R, ApiError>
    5091            0 :     where
    5092            0 :         O: FnOnce(TenantMutationLocations) -> F,
    5093            0 :         F: std::future::Future<Output = R>,
    5094            0 :     {
    5095            0 :         let mutation_locations = {
    5096            0 :             let mut locations = TenantMutationLocations::default();
    5097              : 
    5098              :             // Load the currently attached pageservers for the latest generation of each shard.  This can
    5099              :             // run concurrently with reconciliations, and it is not guaranteed that the node we find here
    5100              :             // will still be the latest when we're done: we will check generations again at the end of
    5101              :             // this function to handle that.
    5102            0 :             let generations = self
    5103            0 :                 .persistence
    5104            0 :                 .tenant_generations(tenant_id_or_shard_id.tenant_id())
    5105            0 :                 .await?
    5106            0 :                 .into_iter()
    5107            0 :                 .filter(|i| tenant_id_or_shard_id.matches(&i.tenant_shard_id))
    5108            0 :                 .collect::<Vec<_>>();
    5109              : 
    5110            0 :             if generations
    5111            0 :                 .iter()
    5112            0 :                 .any(|i| i.generation.is_none() || i.generation_pageserver.is_none())
    5113              :             {
    5114            0 :                 let shard_generations = generations
    5115            0 :                     .into_iter()
    5116            0 :                     .map(|i| (i.tenant_shard_id, (i.generation, i.generation_pageserver)))
    5117            0 :                     .collect::<HashMap<_, _>>();
    5118              : 
    5119              :                 // One or more shards has not been attached to a pageserver.  Check if this is because it's configured
    5120              :                 // to be detached (409: caller should give up), or because it's meant to be attached but isn't yet (503: caller should retry)
    5121            0 :                 let locked = self.inner.read().unwrap();
    5122            0 :                 let tenant_shards = locked
    5123            0 :                     .tenants
    5124            0 :                     .range(TenantShardId::tenant_range(
    5125            0 :                         tenant_id_or_shard_id.tenant_id(),
    5126              :                     ))
    5127            0 :                     .filter(|(shard_id, _)| tenant_id_or_shard_id.matches(shard_id))
    5128            0 :                     .collect::<Vec<_>>();
    5129            0 :                 for (shard_id, shard) in tenant_shards {
    5130            0 :                     match shard.policy {
    5131              :                         PlacementPolicy::Attached(_) => {
    5132              :                             // This shard is meant to be attached: the caller is not wrong to try and
    5133              :                             // use this function, but we can't service the request right now.
    5134            0 :                             let Some(generation) = shard_generations.get(shard_id) else {
    5135              :                                 // This can only happen if there is a split brain controller modifying the database.  This should
    5136              :                                 // never happen when testing, and if it happens in production we can only log the issue.
    5137            0 :                                 debug_assert!(false);
    5138            0 :                                 tracing::error!(
    5139            0 :                                     "Shard {shard_id} not found in generation state!  Is another rogue controller running?"
    5140              :                                 );
    5141            0 :                                 continue;
    5142              :                             };
    5143            0 :                             let (generation, generation_pageserver) = generation;
    5144            0 :                             if let Some(generation) = generation {
    5145            0 :                                 if generation_pageserver.is_none() {
    5146              :                                     // This is legitimate only in a very narrow window where the shard was only just configured into
    5147              :                                     // Attached mode after being created in Secondary or Detached mode, and it has had its generation
    5148              :                                     // set but not yet had a Reconciler run (reconciler is the only thing that sets generation_pageserver).
    5149            0 :                                     tracing::warn!(
    5150            0 :                                         "Shard {shard_id} generation is set ({generation:?}) but generation_pageserver is None, reconciler not run yet?"
    5151              :                                     );
    5152            0 :                                 }
    5153              :                             } else {
    5154              :                                 // This should never happen: a shard with no generation is only permitted when it was created in some state
    5155              :                                 // other than PlacementPolicy::Attached (and generation is always written to DB before setting Attached in memory)
    5156            0 :                                 debug_assert!(false);
    5157            0 :                                 tracing::error!(
    5158            0 :                                     "Shard {shard_id} generation is None, but it is in PlacementPolicy::Attached mode!"
    5159              :                                 );
    5160            0 :                                 continue;
    5161              :                             }
    5162              :                         }
    5163              :                         PlacementPolicy::Secondary | PlacementPolicy::Detached => {
    5164            0 :                             return Err(ApiError::Conflict(format!(
    5165            0 :                                 "Shard {shard_id} tenant has policy {:?}",
    5166            0 :                                 shard.policy
    5167            0 :                             )));
    5168              :                         }
    5169              :                     }
    5170              :                 }
    5171              : 
    5172            0 :                 return Err(ApiError::ResourceUnavailable(
    5173            0 :                     "One or more shards in tenant is not yet attached".into(),
    5174            0 :                 ));
    5175            0 :             }
    5176              : 
    5177            0 :             let locked = self.inner.read().unwrap();
    5178              :             for ShardGenerationState {
    5179            0 :                 tenant_shard_id,
    5180            0 :                 generation,
    5181            0 :                 generation_pageserver,
    5182            0 :             } in generations
    5183              :             {
    5184            0 :                 let node_id = generation_pageserver.expect("We checked for None above");
    5185            0 :                 let node = locked
    5186            0 :                     .nodes
    5187            0 :                     .get(&node_id)
    5188            0 :                     .ok_or(ApiError::Conflict(format!(
    5189            0 :                         "Raced with removal of node {node_id}"
    5190            0 :                     )))?;
    5191            0 :                 let generation = generation.expect("Checked above");
    5192              : 
    5193            0 :                 let tenant = locked.tenants.get(&tenant_shard_id);
    5194              : 
    5195              :                 // TODO(vlad): Abstract the logic that finds stale attached locations
    5196              :                 // from observed state into a [`Service`] method.
    5197            0 :                 let other_locations = match tenant {
    5198            0 :                     Some(tenant) => {
    5199            0 :                         let mut other = tenant.attached_locations();
    5200            0 :                         let latest_location_index =
    5201            0 :                             other.iter().position(|&l| l == (node.get_id(), generation));
    5202            0 :                         if let Some(idx) = latest_location_index {
    5203            0 :                             other.remove(idx);
    5204            0 :                         }
    5205              : 
    5206            0 :                         other
    5207              :                     }
    5208            0 :                     None => Vec::default(),
    5209              :                 };
    5210              : 
    5211            0 :                 let location = ShardMutationLocations {
    5212            0 :                     latest: MutationLocation {
    5213            0 :                         node: node.clone(),
    5214            0 :                         generation,
    5215            0 :                     },
    5216            0 :                     other: other_locations
    5217            0 :                         .into_iter()
    5218            0 :                         .filter_map(|(node_id, generation)| {
    5219            0 :                             let node = locked.nodes.get(&node_id)?;
    5220              : 
    5221            0 :                             Some(MutationLocation {
    5222            0 :                                 node: node.clone(),
    5223            0 :                                 generation,
    5224            0 :                             })
    5225            0 :                         })
    5226            0 :                         .collect(),
    5227              :                 };
    5228            0 :                 locations.0.insert(tenant_shard_id, location);
    5229              :             }
    5230              : 
    5231            0 :             locations
    5232              :         };
    5233              : 
    5234            0 :         let result = op(mutation_locations.clone()).await;
    5235              : 
    5236              :         // Post-check: are all the generations of all the shards the same as they were initially?  This proves that
    5237              :         // our remote operation executed on the latest generation and is therefore persistent.
    5238              :         {
    5239            0 :             let latest_generations = self
    5240            0 :                 .persistence
    5241            0 :                 .tenant_generations(tenant_id_or_shard_id.tenant_id())
    5242            0 :                 .await?
    5243            0 :                 .into_iter()
    5244            0 :                 .filter(|i| tenant_id_or_shard_id.matches(&i.tenant_shard_id))
    5245            0 :                 .collect::<Vec<_>>();
    5246              : 
    5247            0 :             if latest_generations
    5248            0 :                 .into_iter()
    5249            0 :                 .map(
    5250              :                     |ShardGenerationState {
    5251              :                          tenant_shard_id,
    5252              :                          generation,
    5253              :                          generation_pageserver: _,
    5254            0 :                      }| (tenant_shard_id, generation),
    5255              :                 )
    5256            0 :                 .collect::<Vec<_>>()
    5257            0 :                 != mutation_locations
    5258            0 :                     .0
    5259            0 :                     .into_iter()
    5260            0 :                     .map(|i| (i.0, Some(i.1.latest.generation)))
    5261            0 :                     .collect::<Vec<_>>()
    5262              :             {
    5263              :                 // We raced with something that incremented the generation, and therefore cannot be
    5264              :                 // confident that our actions are persistent (they might have hit an old generation).
    5265              :                 //
    5266              :                 // This is safe but requires a retry: ask the client to do that by giving them a 503 response.
    5267            0 :                 return Err(ApiError::ResourceUnavailable(
    5268            0 :                     "Tenant attachment changed, please retry".into(),
    5269            0 :                 ));
    5270            0 :             }
    5271              :         }
    5272              : 
    5273            0 :         Ok(result)
    5274            0 :     }
    5275              : 
    5276            0 :     pub(crate) async fn tenant_timeline_delete(
    5277            0 :         self: &Arc<Self>,
    5278            0 :         tenant_id: TenantId,
    5279            0 :         timeline_id: TimelineId,
    5280            0 :     ) -> Result<StatusCode, ApiError> {
    5281            0 :         tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
    5282            0 :         let _tenant_lock = trace_shared_lock(
    5283            0 :             &self.tenant_op_locks,
    5284            0 :             tenant_id,
    5285            0 :             TenantOperations::TimelineDelete,
    5286            0 :         )
    5287            0 :         .await;
    5288              : 
    5289            0 :         let status_code = self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    5290            0 :             if targets.0.is_empty() {
    5291            0 :                 return Err(ApiError::NotFound(
    5292            0 :                     anyhow::anyhow!("Tenant not found").into(),
    5293            0 :                 ));
    5294            0 :             }
    5295              : 
    5296            0 :             let (shard_zero_tid, shard_zero_locations) = targets.0.pop_first().expect("Must have at least one shard");
    5297            0 :             assert!(shard_zero_tid.is_shard_zero());
    5298              : 
    5299            0 :             async fn delete_one(
    5300            0 :                 tenant_shard_id: TenantShardId,
    5301            0 :                 timeline_id: TimelineId,
    5302            0 :                 node: Node,
    5303            0 :                 http_client: reqwest::Client,
    5304            0 :                 jwt: Option<String>,
    5305            0 :             ) -> Result<StatusCode, ApiError> {
    5306            0 :                 tracing::info!(
    5307            0 :                     "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    5308              :                 );
    5309              : 
    5310            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    5311            0 :                 let res = client
    5312            0 :                     .timeline_delete(tenant_shard_id, timeline_id)
    5313            0 :                     .await;
    5314              : 
    5315            0 :                 match res {
    5316            0 :                     Ok(ok) => Ok(ok),
    5317            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::CONFLICT, _)) => Ok(StatusCode::CONFLICT),
    5318            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg)) if msg.contains("Requested tenant is missing") => {
    5319            0 :                         Err(ApiError::ResourceUnavailable("Tenant migration in progress".into()))
    5320              :                     },
    5321            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg)) => Err(ApiError::ResourceUnavailable(msg.into())),
    5322            0 :                     Err(e) => {
    5323            0 :                         Err(
    5324            0 :                             ApiError::InternalServerError(anyhow::anyhow!(
    5325            0 :                                 "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
    5326            0 :                             ))
    5327            0 :                         )
    5328              :                     }
    5329              :                 }
    5330            0 :             }
    5331              : 
    5332            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    5333            0 :             let statuses = self
    5334            0 :                 .tenant_for_shards(locations, |tenant_shard_id: TenantShardId, node: Node| {
    5335            0 :                     Box::pin(delete_one(
    5336            0 :                         tenant_shard_id,
    5337            0 :                         timeline_id,
    5338            0 :                         node,
    5339            0 :                         self.http_client.clone(),
    5340            0 :                         self.config.pageserver_jwt_token.clone(),
    5341            0 :                     ))
    5342            0 :                 })
    5343            0 :                 .await?;
    5344              : 
    5345              :             // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero.
    5346              :             // We return 409 (Conflict) if deletion was already in progress on any of the shards
    5347              :             // and 202 (Accepted) if deletion was not already in progress on any of the shards.
    5348            0 :             if statuses.iter().any(|s| s == &StatusCode::CONFLICT) {
    5349            0 :                 return Ok(StatusCode::CONFLICT);
    5350            0 :             }
    5351              : 
    5352            0 :             if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
    5353            0 :                 return Ok(StatusCode::ACCEPTED);
    5354            0 :             }
    5355              : 
    5356              :             // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
    5357              :             // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
    5358            0 :             let shard_zero_status = delete_one(
    5359            0 :                 shard_zero_tid,
    5360            0 :                 timeline_id,
    5361            0 :                 shard_zero_locations.latest.node,
    5362            0 :                 self.http_client.clone(),
    5363            0 :                 self.config.pageserver_jwt_token.clone(),
    5364            0 :             )
    5365            0 :             .await?;
    5366            0 :             Ok(shard_zero_status)
    5367            0 :         }).await?;
    5368              : 
    5369            0 :         self.tenant_timeline_delete_safekeepers(tenant_id, timeline_id)
    5370            0 :             .await?;
    5371              : 
    5372            0 :         status_code
    5373            0 :     }
    5374              :     /// When you know the TenantId but not a specific shard, and would like to get the node holding shard 0.
    5375              :     ///
    5376              :     /// Returns the node, tenant shard id, and whether it is consistent with the observed state.
    5377            0 :     pub(crate) async fn tenant_shard0_node(
    5378            0 :         &self,
    5379            0 :         tenant_id: TenantId,
    5380            0 :     ) -> Result<(Node, TenantShardId), ApiError> {
    5381            0 :         let tenant_shard_id = {
    5382            0 :             let locked = self.inner.read().unwrap();
    5383            0 :             let Some((tenant_shard_id, _shard)) = locked
    5384            0 :                 .tenants
    5385            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    5386            0 :                 .next()
    5387              :             else {
    5388            0 :                 return Err(ApiError::NotFound(
    5389            0 :                     anyhow::anyhow!("Tenant {tenant_id} not found").into(),
    5390            0 :                 ));
    5391              :             };
    5392              : 
    5393            0 :             *tenant_shard_id
    5394              :         };
    5395              : 
    5396            0 :         self.tenant_shard_node(tenant_shard_id)
    5397            0 :             .await
    5398            0 :             .map(|node| (node, tenant_shard_id))
    5399            0 :     }
    5400              : 
    5401              :     /// When you need to send an HTTP request to the pageserver that holds a shard of a tenant, this
    5402              :     /// function looks up and returns node. If the shard isn't found, returns Err(ApiError::NotFound)
    5403              :     ///
    5404              :     /// Returns the intent node and whether it is consistent with the observed state.
    5405            0 :     pub(crate) async fn tenant_shard_node(
    5406            0 :         &self,
    5407            0 :         tenant_shard_id: TenantShardId,
    5408            0 :     ) -> Result<Node, ApiError> {
    5409              :         // Look up in-memory state and maybe use the node from there.
    5410              :         {
    5411            0 :             let locked = self.inner.read().unwrap();
    5412            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    5413            0 :                 return Err(ApiError::NotFound(
    5414            0 :                     anyhow::anyhow!("Tenant shard {tenant_shard_id} not found").into(),
    5415            0 :                 ));
    5416              :             };
    5417              : 
    5418            0 :             let Some(intent_node_id) = shard.intent.get_attached() else {
    5419            0 :                 tracing::warn!(
    5420            0 :                     tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
    5421            0 :                     "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
    5422              :                     shard.policy
    5423              :                 );
    5424            0 :                 return Err(ApiError::Conflict(
    5425            0 :                     "Cannot call timeline API on non-attached tenant".to_string(),
    5426            0 :                 ));
    5427              :             };
    5428              : 
    5429            0 :             if shard.reconciler.is_none() {
    5430              :                 // Optimization: while no reconcile is in flight, we may trust our in-memory state
    5431              :                 // to tell us which pageserver to use. Otherwise we will fall through and hit the database
    5432            0 :                 let Some(node) = locked.nodes.get(intent_node_id) else {
    5433              :                     // This should never happen
    5434            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5435            0 :                         "Shard refers to nonexistent node"
    5436            0 :                     )));
    5437              :                 };
    5438            0 :                 return Ok(node.clone());
    5439            0 :             }
    5440              :         };
    5441              : 
    5442              :         // Look up the latest attached pageserver location from the database
    5443              :         // generation state: this will reflect the progress of any ongoing migration.
    5444              :         // Note that it is not guaranteed to _stay_ here, our caller must still handle
    5445              :         // the case where they call through to the pageserver and get a 404.
    5446            0 :         let db_result = self
    5447            0 :             .persistence
    5448            0 :             .tenant_generations(tenant_shard_id.tenant_id)
    5449            0 :             .await?;
    5450              :         let Some(ShardGenerationState {
    5451              :             tenant_shard_id: _,
    5452              :             generation: _,
    5453            0 :             generation_pageserver: Some(node_id),
    5454            0 :         }) = db_result
    5455            0 :             .into_iter()
    5456            0 :             .find(|s| s.tenant_shard_id == tenant_shard_id)
    5457              :         else {
    5458              :             // This can happen if we raced with a tenant deletion or a shard split.  On a retry
    5459              :             // the caller will either succeed (shard split case), get a proper 404 (deletion case),
    5460              :             // or a conflict response (case where tenant was detached in background)
    5461            0 :             return Err(ApiError::ResourceUnavailable(
    5462            0 :                 format!("Shard {tenant_shard_id} not found in database, or is not attached").into(),
    5463            0 :             ));
    5464              :         };
    5465            0 :         let locked = self.inner.read().unwrap();
    5466            0 :         let Some(node) = locked.nodes.get(&node_id) else {
    5467              :             // This should never happen
    5468            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5469            0 :                 "Shard refers to nonexistent node"
    5470            0 :             )));
    5471              :         };
    5472              :         // As a reconciliation is in flight, we do not have the observed state yet, and therefore we assume it is always inconsistent.
    5473            0 :         Ok(node.clone())
    5474            0 :     }
    5475              : 
    5476            0 :     pub(crate) fn tenant_locate(
    5477            0 :         &self,
    5478            0 :         tenant_id: TenantId,
    5479            0 :     ) -> Result<TenantLocateResponse, ApiError> {
    5480            0 :         let locked = self.inner.read().unwrap();
    5481            0 :         tracing::info!("Locating shards for tenant {tenant_id}");
    5482              : 
    5483            0 :         let mut result = Vec::new();
    5484            0 :         let mut shard_params: Option<ShardParameters> = None;
    5485              : 
    5486            0 :         for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5487              :         {
    5488            0 :             let node_id =
    5489            0 :                 shard
    5490            0 :                     .intent
    5491            0 :                     .get_attached()
    5492            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    5493            0 :                         "Cannot locate a tenant that is not attached"
    5494            0 :                     )))?;
    5495              : 
    5496            0 :             let node = locked
    5497            0 :                 .nodes
    5498            0 :                 .get(&node_id)
    5499            0 :                 .expect("Pageservers may not be deleted while referenced");
    5500              : 
    5501            0 :             result.push(node.shard_location(*tenant_shard_id));
    5502              : 
    5503            0 :             match &shard_params {
    5504            0 :                 None => {
    5505            0 :                     shard_params = Some(ShardParameters {
    5506            0 :                         stripe_size: shard.shard.stripe_size,
    5507            0 :                         count: shard.shard.count,
    5508            0 :                     });
    5509            0 :                 }
    5510            0 :                 Some(params) => {
    5511            0 :                     if params.stripe_size != shard.shard.stripe_size {
    5512              :                         // This should never happen.  We enforce at runtime because it's simpler than
    5513              :                         // adding an extra per-tenant data structure to store the things that should be the same
    5514            0 :                         return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5515            0 :                             "Inconsistent shard stripe size parameters!"
    5516            0 :                         )));
    5517            0 :                     }
    5518              :                 }
    5519              :             }
    5520              :         }
    5521              : 
    5522            0 :         if result.is_empty() {
    5523            0 :             return Err(ApiError::NotFound(
    5524            0 :                 anyhow::anyhow!("No shards for this tenant ID found").into(),
    5525            0 :             ));
    5526            0 :         }
    5527            0 :         let shard_params = shard_params.expect("result is non-empty, therefore this is set");
    5528            0 :         tracing::info!(
    5529            0 :             "Located tenant {} with params {:?} on shards {}",
    5530              :             tenant_id,
    5531              :             shard_params,
    5532            0 :             result
    5533            0 :                 .iter()
    5534            0 :                 .map(|s| format!("{s:?}"))
    5535            0 :                 .collect::<Vec<_>>()
    5536            0 :                 .join(",")
    5537              :         );
    5538              : 
    5539            0 :         Ok(TenantLocateResponse {
    5540            0 :             shards: result,
    5541            0 :             shard_params,
    5542            0 :         })
    5543            0 :     }
    5544              : 
    5545              :     /// Returns None if the input iterator of shards does not include a shard with number=0
    5546            0 :     fn tenant_describe_impl<'a>(
    5547            0 :         &self,
    5548            0 :         shards: impl Iterator<Item = &'a TenantShard>,
    5549            0 :     ) -> Option<TenantDescribeResponse> {
    5550            0 :         let mut shard_zero = None;
    5551            0 :         let mut describe_shards = Vec::new();
    5552              : 
    5553            0 :         for shard in shards {
    5554            0 :             if shard.tenant_shard_id.is_shard_zero() {
    5555            0 :                 shard_zero = Some(shard);
    5556            0 :             }
    5557              : 
    5558            0 :             describe_shards.push(TenantDescribeResponseShard {
    5559            0 :                 tenant_shard_id: shard.tenant_shard_id,
    5560            0 :                 node_attached: *shard.intent.get_attached(),
    5561            0 :                 node_secondary: shard.intent.get_secondary().to_vec(),
    5562            0 :                 last_error: shard
    5563            0 :                     .last_error
    5564            0 :                     .lock()
    5565            0 :                     .unwrap()
    5566            0 :                     .as_ref()
    5567            0 :                     .map(|e| format!("{e}"))
    5568            0 :                     .unwrap_or("".to_string())
    5569            0 :                     .clone(),
    5570            0 :                 is_reconciling: shard.reconciler.is_some(),
    5571            0 :                 is_pending_compute_notification: shard.pending_compute_notification,
    5572            0 :                 is_splitting: matches!(shard.splitting, SplitState::Splitting),
    5573            0 :                 is_importing: shard.importing == TimelineImportState::Importing,
    5574            0 :                 scheduling_policy: shard.get_scheduling_policy(),
    5575            0 :                 preferred_az_id: shard.preferred_az().map(ToString::to_string),
    5576              :             })
    5577              :         }
    5578              : 
    5579            0 :         let shard_zero = shard_zero?;
    5580              : 
    5581            0 :         Some(TenantDescribeResponse {
    5582            0 :             tenant_id: shard_zero.tenant_shard_id.tenant_id,
    5583            0 :             shards: describe_shards,
    5584            0 :             stripe_size: shard_zero.shard.stripe_size,
    5585            0 :             policy: shard_zero.policy.clone(),
    5586            0 :             config: shard_zero.config.clone(),
    5587            0 :         })
    5588            0 :     }
    5589              : 
    5590            0 :     pub(crate) fn tenant_describe(
    5591            0 :         &self,
    5592            0 :         tenant_id: TenantId,
    5593            0 :     ) -> Result<TenantDescribeResponse, ApiError> {
    5594            0 :         let locked = self.inner.read().unwrap();
    5595              : 
    5596            0 :         self.tenant_describe_impl(
    5597            0 :             locked
    5598            0 :                 .tenants
    5599            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    5600            0 :                 .map(|(_k, v)| v),
    5601              :         )
    5602            0 :         .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
    5603            0 :     }
    5604              : 
    5605              :     /* BEGIN_HADRON */
    5606            0 :     pub(crate) async fn tenant_timeline_describe(
    5607            0 :         &self,
    5608            0 :         tenant_id: TenantId,
    5609            0 :         timeline_id: TimelineId,
    5610            0 :     ) -> Result<TenantTimelineDescribeResponse, ApiError> {
    5611            0 :         self.tenant_remote_mutation(tenant_id, |locations| async move {
    5612            0 :             if locations.0.is_empty() {
    5613            0 :                 return Err(ApiError::NotFound(
    5614            0 :                     anyhow::anyhow!("Tenant not found").into(),
    5615            0 :                 ));
    5616            0 :             };
    5617              : 
    5618            0 :             let locations: Vec<(TenantShardId, Node)> = locations
    5619            0 :                 .0
    5620            0 :                 .iter()
    5621            0 :                 .map(|t| (*t.0, t.1.latest.node.clone()))
    5622            0 :                 .collect();
    5623            0 :             let mut futs = FuturesUnordered::new();
    5624              : 
    5625            0 :             for (shard_id, node) in locations {
    5626            0 :                 futs.push({
    5627            0 :                     async move {
    5628            0 :                         let result = node
    5629            0 :                             .with_client_retries(
    5630            0 :                                 |client| async move {
    5631            0 :                                     client
    5632            0 :                                         .tenant_timeline_describe(&shard_id, &timeline_id)
    5633            0 :                                         .await
    5634            0 :                                 },
    5635            0 :                                 &self.http_client,
    5636            0 :                                 &self.config.pageserver_jwt_token,
    5637              :                                 3,
    5638              :                                 3,
    5639            0 :                                 Duration::from_secs(30),
    5640            0 :                                 &self.cancel,
    5641              :                             )
    5642            0 :                             .await;
    5643            0 :                         (result, shard_id, node.get_id())
    5644            0 :                     }
    5645              :                 });
    5646              :             }
    5647              : 
    5648            0 :             let mut results: Vec<TimelineInfo> = Vec::new();
    5649            0 :             while let Some((result, tenant_shard_id, node_id)) = futs.next().await {
    5650            0 :                 match result {
    5651            0 :                     Some(Ok(timeline_info)) => results.push(timeline_info),
    5652            0 :                     Some(Err(e)) => {
    5653            0 :                         tracing::warn!(
    5654            0 :                             "Failed to describe tenant {} timeline {} for pageserver {}: {e}",
    5655              :                             tenant_shard_id,
    5656              :                             timeline_id,
    5657              :                             node_id,
    5658              :                         );
    5659            0 :                         return Err(ApiError::ResourceUnavailable(format!("{e}").into()));
    5660              :                     }
    5661            0 :                     None => return Err(ApiError::Cancelled),
    5662              :                 }
    5663              :             }
    5664            0 :             let mut image_consistent_lsn: Option<Lsn> = Some(Lsn::MAX);
    5665            0 :             for timeline_info in &results {
    5666            0 :                 if let Some(tline_image_consistent_lsn) = timeline_info.image_consistent_lsn {
    5667            0 :                     image_consistent_lsn = Some(std::cmp::min(
    5668            0 :                         image_consistent_lsn.unwrap(),
    5669            0 :                         tline_image_consistent_lsn,
    5670            0 :                     ));
    5671            0 :                 } else {
    5672            0 :                     tracing::warn!(
    5673            0 :                         "Timeline {} on shard {} does not have image consistent lsn",
    5674              :                         timeline_info.timeline_id,
    5675              :                         timeline_info.tenant_id
    5676              :                     );
    5677            0 :                     image_consistent_lsn = None;
    5678            0 :                     break;
    5679              :                 }
    5680              :             }
    5681              : 
    5682            0 :             Ok(TenantTimelineDescribeResponse {
    5683            0 :                 shards: results,
    5684            0 :                 image_consistent_lsn,
    5685            0 :             })
    5686            0 :         })
    5687            0 :         .await?
    5688            0 :     }
    5689              :     /* END_HADRON */
    5690              : 
    5691              :     /// limit & offset are pagination parameters. Since we are walking an in-memory HashMap, `offset` does not
    5692              :     /// avoid traversing data, it just avoid returning it. This is suitable for our purposes, since our in memory
    5693              :     /// maps are small enough to traverse fast, our pagination is just to avoid serializing huge JSON responses
    5694              :     /// in our external API.
    5695            0 :     pub(crate) fn tenant_list(
    5696            0 :         &self,
    5697            0 :         limit: Option<usize>,
    5698            0 :         start_after: Option<TenantId>,
    5699            0 :     ) -> Vec<TenantDescribeResponse> {
    5700            0 :         let locked = self.inner.read().unwrap();
    5701              : 
    5702              :         // Apply start_from parameter
    5703            0 :         let shard_range = match start_after {
    5704            0 :             None => locked.tenants.range(..),
    5705            0 :             Some(tenant_id) => locked.tenants.range(
    5706            0 :                 TenantShardId {
    5707            0 :                     tenant_id,
    5708            0 :                     shard_number: ShardNumber(u8::MAX),
    5709            0 :                     shard_count: ShardCount(u8::MAX),
    5710            0 :                 }..,
    5711              :             ),
    5712              :         };
    5713              : 
    5714            0 :         let mut result = Vec::new();
    5715            0 :         for (_tenant_id, tenant_shards) in &shard_range.group_by(|(id, _shard)| id.tenant_id) {
    5716            0 :             result.push(
    5717            0 :                 self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
    5718            0 :                     .expect("Groups are always non-empty"),
    5719              :             );
    5720              : 
    5721              :             // Enforce `limit` parameter
    5722            0 :             if let Some(limit) = limit {
    5723            0 :                 if result.len() >= limit {
    5724            0 :                     break;
    5725            0 :                 }
    5726            0 :             }
    5727              :         }
    5728              : 
    5729            0 :         result
    5730            0 :     }
    5731              : 
    5732              :     #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
    5733              :     async fn abort_tenant_shard_split(
    5734              :         &self,
    5735              :         op: &TenantShardSplitAbort,
    5736              :     ) -> Result<(), TenantShardSplitAbortError> {
    5737              :         // Cleaning up a split:
    5738              :         // - Parent shards are not destroyed during a split, just detached.
    5739              :         // - Failed pageserver split API calls can leave the remote node with just the parent attached,
    5740              :         //   just the children attached, or both.
    5741              :         //
    5742              :         // Therefore our work to do is to:
    5743              :         // 1. Clean up storage controller's internal state to just refer to parents, no children
    5744              :         // 2. Call out to pageservers to ensure that children are detached
    5745              :         // 3. Call out to pageservers to ensure that parents are attached.
    5746              :         //
    5747              :         // Crash safety:
    5748              :         // - If the storage controller stops running during this cleanup *after* clearing the splitting state
    5749              :         //   from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
    5750              :         //   and detach them.
    5751              :         // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
    5752              :         //   from our database, then we will re-enter this cleanup routine on startup.
    5753              : 
    5754              :         let TenantShardSplitAbort {
    5755              :             tenant_id,
    5756              :             new_shard_count,
    5757              :             new_stripe_size,
    5758              :             ..
    5759              :         } = op;
    5760              : 
    5761              :         // First abort persistent state, if any exists.
    5762              :         match self
    5763              :             .persistence
    5764              :             .abort_shard_split(*tenant_id, *new_shard_count)
    5765              :             .await?
    5766              :         {
    5767              :             AbortShardSplitStatus::Aborted => {
    5768              :                 // Proceed to roll back any child shards created on pageservers
    5769              :             }
    5770              :             AbortShardSplitStatus::Complete => {
    5771              :                 // The split completed (we might hit that path if e.g. our database transaction
    5772              :                 // to write the completion landed in the database, but we dropped connection
    5773              :                 // before seeing the result).
    5774              :                 //
    5775              :                 // We must update in-memory state to reflect the successful split.
    5776              :                 self.tenant_shard_split_commit_inmem(
    5777              :                     *tenant_id,
    5778              :                     *new_shard_count,
    5779              :                     *new_stripe_size,
    5780              :                 );
    5781              :                 return Ok(());
    5782              :             }
    5783              :         }
    5784              : 
    5785              :         // Clean up in-memory state, and accumulate the list of child locations that need detaching
    5786              :         let detach_locations: Vec<(Node, TenantShardId)> = {
    5787              :             let mut detach_locations = Vec::new();
    5788              :             let mut locked = self.inner.write().unwrap();
    5789              :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5790              : 
    5791              :             for (tenant_shard_id, shard) in
    5792              :                 tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
    5793              :             {
    5794              :                 if shard.shard.count == op.new_shard_count {
    5795              :                     // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
    5796              :                     // is infallible, so if we got an error we shouldn't have got that far.
    5797              :                     tracing::warn!(
    5798              :                         "During split abort, child shard {tenant_shard_id} found in-memory"
    5799              :                     );
    5800              :                     continue;
    5801              :                 }
    5802              : 
    5803              :                 // Add the children of this shard to this list of things to detach
    5804              :                 if let Some(node_id) = shard.intent.get_attached() {
    5805              :                     for child_id in tenant_shard_id.split(*new_shard_count) {
    5806              :                         detach_locations.push((
    5807              :                             nodes
    5808              :                                 .get(node_id)
    5809              :                                 .expect("Intent references nonexistent node")
    5810              :                                 .clone(),
    5811              :                             child_id,
    5812              :                         ));
    5813              :                     }
    5814              :                 } else {
    5815              :                     tracing::warn!(
    5816              :                         "During split abort, shard {tenant_shard_id} has no attached location"
    5817              :                     );
    5818              :                 }
    5819              : 
    5820              :                 tracing::info!("Restoring parent shard {tenant_shard_id}");
    5821              : 
    5822              :                 // Drop any intents that refer to unavailable nodes, to enable this abort to proceed even
    5823              :                 // if the original attachment location is offline.
    5824              :                 if let Some(node_id) = shard.intent.get_attached() {
    5825              :                     if !nodes.get(node_id).unwrap().is_available() {
    5826              :                         tracing::info!(
    5827              :                             "Demoting attached intent for {tenant_shard_id} on unavailable node {node_id}"
    5828              :                         );
    5829              :                         shard.intent.demote_attached(scheduler, *node_id);
    5830              :                     }
    5831              :                 }
    5832              :                 for node_id in shard.intent.get_secondary().clone() {
    5833              :                     if !nodes.get(&node_id).unwrap().is_available() {
    5834              :                         tracing::info!(
    5835              :                             "Dropping secondary intent for {tenant_shard_id} on unavailable node {node_id}"
    5836              :                         );
    5837              :                         shard.intent.remove_secondary(scheduler, node_id);
    5838              :                     }
    5839              :                 }
    5840              : 
    5841              :                 shard.splitting = SplitState::Idle;
    5842              :                 if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
    5843              :                     // If this shard can't be scheduled now (perhaps due to offline nodes or
    5844              :                     // capacity issues), that must not prevent us rolling back a split.  In this
    5845              :                     // case it should be eventually scheduled in the background.
    5846              :                     tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
    5847              :                 }
    5848              : 
    5849              :                 self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    5850              :             }
    5851              : 
    5852              :             // We don't expect any new_shard_count shards to exist here, but drop them just in case
    5853              :             tenants
    5854            0 :                 .retain(|id, s| !(id.tenant_id == *tenant_id && s.shard.count == *new_shard_count));
    5855              : 
    5856              :             detach_locations
    5857              :         };
    5858              : 
    5859              :         for (node, child_id) in detach_locations {
    5860              :             if !node.is_available() {
    5861              :                 // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
    5862              :                 // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
    5863              :                 // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
    5864              :                 // them from the node.
    5865              :                 tracing::warn!(
    5866              :                     "Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated."
    5867              :                 );
    5868              :                 continue;
    5869              :             }
    5870              : 
    5871              :             // Detach the remote child.  If the pageserver split API call is still in progress, this call will get
    5872              :             // a 503 and retry, up to our limit.
    5873              :             tracing::info!("Detaching {child_id} on {node}...");
    5874              :             match node
    5875              :                 .with_client_retries(
    5876            0 :                     |client| async move {
    5877            0 :                         let config = LocationConfig {
    5878            0 :                             mode: LocationConfigMode::Detached,
    5879            0 :                             generation: None,
    5880            0 :                             secondary_conf: None,
    5881            0 :                             shard_number: child_id.shard_number.0,
    5882            0 :                             shard_count: child_id.shard_count.literal(),
    5883            0 :                             // Stripe size and tenant config don't matter when detaching
    5884            0 :                             shard_stripe_size: 0,
    5885            0 :                             tenant_conf: TenantConfig::default(),
    5886            0 :                         };
    5887              : 
    5888            0 :                         client.location_config(child_id, config, None, false).await
    5889            0 :                     },
    5890              :                     &self.http_client,
    5891              :                     &self.config.pageserver_jwt_token,
    5892              :                     1,
    5893              :                     10,
    5894              :                     Duration::from_secs(5),
    5895              :                     &self.reconcilers_cancel,
    5896              :                 )
    5897              :                 .await
    5898              :             {
    5899              :                 Some(Ok(_)) => {}
    5900              :                 Some(Err(e)) => {
    5901              :                     // We failed to communicate with the remote node.  This is problematic: we may be
    5902              :                     // leaving it with a rogue child shard.
    5903              :                     tracing::warn!(
    5904              :                         "Failed to detach child {child_id} from node {node} during abort"
    5905              :                     );
    5906              :                     return Err(e.into());
    5907              :                 }
    5908              :                 None => {
    5909              :                     // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
    5910              :                     // clean up on restart. The node going offline requires a retry.
    5911              :                     return Err(TenantShardSplitAbortError::Unavailable);
    5912              :                 }
    5913              :             };
    5914              :         }
    5915              : 
    5916              :         tracing::info!("Successfully aborted split");
    5917              :         Ok(())
    5918              :     }
    5919              : 
    5920              :     /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
    5921              :     /// of the tenant map to reflect the child shards that exist after the split.
    5922            0 :     fn tenant_shard_split_commit_inmem(
    5923            0 :         &self,
    5924            0 :         tenant_id: TenantId,
    5925            0 :         new_shard_count: ShardCount,
    5926            0 :         new_stripe_size: Option<ShardStripeSize>,
    5927            0 :     ) -> (
    5928            0 :         TenantShardSplitResponse,
    5929            0 :         Vec<(TenantShardId, NodeId, ShardStripeSize)>,
    5930            0 :         Vec<ReconcilerWaiter>,
    5931            0 :     ) {
    5932            0 :         let mut response = TenantShardSplitResponse {
    5933            0 :             new_shards: Vec::new(),
    5934            0 :         };
    5935            0 :         let mut child_locations = Vec::new();
    5936            0 :         let mut waiters = Vec::new();
    5937              : 
    5938              :         {
    5939            0 :             let mut locked = self.inner.write().unwrap();
    5940              : 
    5941            0 :             let parent_ids = locked
    5942            0 :                 .tenants
    5943            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    5944            0 :                 .map(|(shard_id, _)| *shard_id)
    5945            0 :                 .collect::<Vec<_>>();
    5946              : 
    5947            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5948            0 :             for parent_id in parent_ids {
    5949            0 :                 let child_ids = parent_id.split(new_shard_count);
    5950              : 
    5951              :                 let (
    5952            0 :                     pageserver,
    5953            0 :                     generation,
    5954            0 :                     policy,
    5955            0 :                     parent_ident,
    5956            0 :                     config,
    5957            0 :                     preferred_az,
    5958            0 :                     secondary_count,
    5959              :                 ) = {
    5960            0 :                     let mut old_state = tenants
    5961            0 :                         .remove(&parent_id)
    5962            0 :                         .expect("It was present, we just split it");
    5963              : 
    5964              :                     // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
    5965              :                     // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
    5966              :                     // nothing else can clear this.
    5967            0 :                     assert!(matches!(old_state.splitting, SplitState::Splitting));
    5968              : 
    5969            0 :                     let old_attached = old_state.intent.get_attached().unwrap();
    5970            0 :                     old_state.intent.clear(scheduler);
    5971            0 :                     let generation = old_state.generation.expect("Shard must have been attached");
    5972            0 :                     (
    5973            0 :                         old_attached,
    5974            0 :                         generation,
    5975            0 :                         old_state.policy.clone(),
    5976            0 :                         old_state.shard,
    5977            0 :                         old_state.config.clone(),
    5978            0 :                         old_state.preferred_az().cloned(),
    5979            0 :                         old_state.intent.get_secondary().len(),
    5980            0 :                     )
    5981              :                 };
    5982              : 
    5983            0 :                 let mut schedule_context = ScheduleContext::default();
    5984            0 :                 for child in child_ids {
    5985            0 :                     let mut child_shard = parent_ident;
    5986            0 :                     child_shard.number = child.shard_number;
    5987            0 :                     child_shard.count = child.shard_count;
    5988            0 :                     if let Some(stripe_size) = new_stripe_size {
    5989            0 :                         child_shard.stripe_size = stripe_size;
    5990            0 :                     }
    5991              : 
    5992            0 :                     let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
    5993            0 :                     child_observed.insert(
    5994            0 :                         pageserver,
    5995            0 :                         ObservedStateLocation {
    5996            0 :                             conf: Some(attached_location_conf(
    5997            0 :                                 generation,
    5998            0 :                                 &child_shard,
    5999            0 :                                 &config,
    6000            0 :                                 &policy,
    6001            0 :                                 secondary_count,
    6002            0 :                             )),
    6003            0 :                         },
    6004              :                     );
    6005              : 
    6006            0 :                     let mut child_state =
    6007            0 :                         TenantShard::new(child, child_shard, policy.clone(), preferred_az.clone());
    6008            0 :                     child_state.intent =
    6009            0 :                         IntentState::single(scheduler, Some(pageserver), preferred_az.clone());
    6010            0 :                     child_state.observed = ObservedState {
    6011            0 :                         locations: child_observed,
    6012            0 :                     };
    6013            0 :                     child_state.generation = Some(generation);
    6014            0 :                     child_state.config = config.clone();
    6015              : 
    6016              :                     // The child's TenantShard::splitting is intentionally left at the default value of Idle,
    6017              :                     // as at this point in the split process we have succeeded and this part is infallible:
    6018              :                     // we will never need to do any special recovery from this state.
    6019              : 
    6020            0 :                     child_locations.push((child, pageserver, child_shard.stripe_size));
    6021              : 
    6022            0 :                     if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
    6023              :                         // This is not fatal, because we've implicitly already got an attached
    6024              :                         // location for the child shard.  Failure here just means we couldn't
    6025              :                         // find a secondary (e.g. because cluster is overloaded).
    6026            0 :                         tracing::warn!("Failed to schedule child shard {child}: {e}");
    6027            0 :                     }
    6028              :                     // In the background, attach secondary locations for the new shards
    6029            0 :                     if let Some(waiter) = self.maybe_reconcile_shard(
    6030            0 :                         &mut child_state,
    6031            0 :                         nodes,
    6032            0 :                         ReconcilerPriority::High,
    6033            0 :                     ) {
    6034            0 :                         waiters.push(waiter);
    6035            0 :                     }
    6036              : 
    6037            0 :                     tenants.insert(child, child_state);
    6038            0 :                     response.new_shards.push(child);
    6039              :                 }
    6040              :             }
    6041            0 :             (response, child_locations, waiters)
    6042              :         }
    6043            0 :     }
    6044              : 
    6045            0 :     async fn tenant_shard_split_start_secondaries(
    6046            0 :         &self,
    6047            0 :         tenant_id: TenantId,
    6048            0 :         waiters: Vec<ReconcilerWaiter>,
    6049            0 :     ) {
    6050              :         // Wait for initial reconcile of child shards, this creates the secondary locations
    6051            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    6052              :             // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
    6053              :             // their secondaries couldn't be attached.
    6054            0 :             tracing::warn!("Failed to reconcile after split: {e}");
    6055            0 :             return;
    6056            0 :         }
    6057              : 
    6058              :         // Take the state lock to discover the attached & secondary intents for all shards
    6059            0 :         let (attached, secondary) = {
    6060            0 :             let locked = self.inner.read().unwrap();
    6061            0 :             let mut attached = Vec::new();
    6062            0 :             let mut secondary = Vec::new();
    6063              : 
    6064            0 :             for (tenant_shard_id, shard) in
    6065            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    6066              :             {
    6067            0 :                 let Some(node_id) = shard.intent.get_attached() else {
    6068              :                     // Unexpected.  Race with a PlacementPolicy change?
    6069            0 :                     tracing::warn!(
    6070            0 :                         "No attached node on {tenant_shard_id} immediately after shard split!"
    6071              :                     );
    6072            0 :                     continue;
    6073              :                 };
    6074              : 
    6075            0 :                 let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
    6076              :                     // No secondary location.  Nothing for us to do.
    6077            0 :                     continue;
    6078              :                 };
    6079              : 
    6080            0 :                 let attached_node = locked
    6081            0 :                     .nodes
    6082            0 :                     .get(node_id)
    6083            0 :                     .expect("Pageservers may not be deleted while referenced");
    6084              : 
    6085            0 :                 let secondary_node = locked
    6086            0 :                     .nodes
    6087            0 :                     .get(secondary_node_id)
    6088            0 :                     .expect("Pageservers may not be deleted while referenced");
    6089              : 
    6090            0 :                 attached.push((*tenant_shard_id, attached_node.clone()));
    6091            0 :                 secondary.push((*tenant_shard_id, secondary_node.clone()));
    6092              :             }
    6093            0 :             (attached, secondary)
    6094              :         };
    6095              : 
    6096            0 :         if secondary.is_empty() {
    6097              :             // No secondary locations; nothing for us to do
    6098            0 :             return;
    6099            0 :         }
    6100              : 
    6101            0 :         for (_, result) in self
    6102            0 :             .tenant_for_shards_api(
    6103            0 :                 attached,
    6104            0 :                 |tenant_shard_id, client| async move {
    6105            0 :                     client.tenant_heatmap_upload(tenant_shard_id).await
    6106            0 :                 },
    6107              :                 1,
    6108              :                 1,
    6109              :                 SHORT_RECONCILE_TIMEOUT,
    6110            0 :                 &self.cancel,
    6111              :             )
    6112            0 :             .await
    6113              :         {
    6114            0 :             if let Err(e) = result {
    6115            0 :                 tracing::warn!("Error calling heatmap upload after shard split: {e}");
    6116            0 :                 return;
    6117            0 :             }
    6118              :         }
    6119              : 
    6120            0 :         for (_, result) in self
    6121            0 :             .tenant_for_shards_api(
    6122            0 :                 secondary,
    6123            0 :                 |tenant_shard_id, client| async move {
    6124            0 :                     client
    6125            0 :                         .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
    6126            0 :                         .await
    6127            0 :                 },
    6128              :                 1,
    6129              :                 1,
    6130              :                 SHORT_RECONCILE_TIMEOUT,
    6131            0 :                 &self.cancel,
    6132              :             )
    6133            0 :             .await
    6134              :         {
    6135            0 :             if let Err(e) = result {
    6136            0 :                 tracing::warn!("Error calling secondary download after shard split: {e}");
    6137            0 :                 return;
    6138            0 :             }
    6139              :         }
    6140            0 :     }
    6141              : 
    6142            0 :     pub(crate) async fn tenant_shard_split(
    6143            0 :         &self,
    6144            0 :         tenant_id: TenantId,
    6145            0 :         split_req: TenantShardSplitRequest,
    6146            0 :     ) -> Result<TenantShardSplitResponse, ApiError> {
    6147              :         // TODO: return 503 if we get stuck waiting for this lock
    6148              :         // (issue https://github.com/neondatabase/neon/issues/7108)
    6149            0 :         let _tenant_lock = trace_exclusive_lock(
    6150            0 :             &self.tenant_op_locks,
    6151            0 :             tenant_id,
    6152            0 :             TenantOperations::ShardSplit,
    6153            0 :         )
    6154            0 :         .await;
    6155              : 
    6156            0 :         let _gate = self
    6157            0 :             .reconcilers_gate
    6158            0 :             .enter()
    6159            0 :             .map_err(|_| ApiError::ShuttingDown)?;
    6160              : 
    6161              :         // Timeline imports on the pageserver side can't handle shard-splits.
    6162              :         // If the tenant is importing a timeline, dont't shard split it.
    6163            0 :         match self
    6164            0 :             .persistence
    6165            0 :             .is_tenant_importing_timeline(tenant_id)
    6166            0 :             .await
    6167              :         {
    6168            0 :             Ok(importing) => {
    6169            0 :                 if importing {
    6170            0 :                     return Err(ApiError::Conflict(
    6171            0 :                         "Cannot shard split during timeline import".to_string(),
    6172            0 :                     ));
    6173            0 :                 }
    6174              :             }
    6175            0 :             Err(err) => {
    6176            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6177            0 :                     "Failed to check for running imports: {err}"
    6178            0 :                 )));
    6179              :             }
    6180              :         }
    6181              : 
    6182            0 :         let new_shard_count = ShardCount::new(split_req.new_shard_count);
    6183            0 :         let new_stripe_size = split_req.new_stripe_size;
    6184              : 
    6185              :         // Validate the request and construct parameters.  This phase is fallible, but does not require
    6186              :         // rollback on errors, as it does no I/O and mutates no state.
    6187            0 :         let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
    6188            0 :             ShardSplitAction::NoOp(resp) => return Ok(resp),
    6189            0 :             ShardSplitAction::Split(params) => params,
    6190              :         };
    6191              : 
    6192              :         // Execute this split: this phase mutates state and does remote I/O on pageservers.  If it fails,
    6193              :         // we must roll back.
    6194            0 :         let r = self
    6195            0 :             .do_tenant_shard_split(tenant_id, shard_split_params)
    6196            0 :             .await;
    6197              : 
    6198            0 :         let (response, waiters) = match r {
    6199            0 :             Ok(r) => r,
    6200            0 :             Err(e) => {
    6201              :                 // Split might be part-done, we must do work to abort it.
    6202            0 :                 tracing::warn!("Enqueuing background abort of split on {tenant_id}");
    6203            0 :                 self.abort_tx
    6204            0 :                     .send(TenantShardSplitAbort {
    6205            0 :                         tenant_id,
    6206            0 :                         new_shard_count,
    6207            0 :                         new_stripe_size,
    6208            0 :                         _tenant_lock,
    6209            0 :                         _gate,
    6210            0 :                     })
    6211              :                     // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
    6212            0 :                     .ok();
    6213            0 :                 return Err(e);
    6214              :             }
    6215              :         };
    6216              : 
    6217              :         // The split is now complete.  As an optimization, we will trigger all the child shards to upload
    6218              :         // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
    6219              :         // for the background heatmap/download interval before secondaries get warm enough to migrate shards
    6220              :         // in [`Self::optimize_all`]
    6221            0 :         self.tenant_shard_split_start_secondaries(tenant_id, waiters)
    6222            0 :             .await;
    6223            0 :         Ok(response)
    6224            0 :     }
    6225              : 
    6226            0 :     fn prepare_tenant_shard_split(
    6227            0 :         &self,
    6228            0 :         tenant_id: TenantId,
    6229            0 :         split_req: TenantShardSplitRequest,
    6230            0 :     ) -> Result<ShardSplitAction, ApiError> {
    6231            0 :         fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
    6232            0 :             anyhow::anyhow!("failpoint")
    6233            0 :         )));
    6234              : 
    6235            0 :         let mut policy = None;
    6236            0 :         let mut config = None;
    6237            0 :         let mut shard_ident = None;
    6238            0 :         let mut preferred_az_id = None;
    6239              :         // Validate input, and calculate which shards we will create
    6240            0 :         let (old_shard_count, targets) =
    6241              :             {
    6242            0 :                 let locked = self.inner.read().unwrap();
    6243              : 
    6244            0 :                 let pageservers = locked.nodes.clone();
    6245              : 
    6246            0 :                 let mut targets = Vec::new();
    6247              : 
    6248              :                 // In case this is a retry, count how many already-split shards we found
    6249            0 :                 let mut children_found = Vec::new();
    6250            0 :                 let mut old_shard_count = None;
    6251              : 
    6252            0 :                 for (tenant_shard_id, shard) in
    6253            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    6254              :                 {
    6255            0 :                     match shard.shard.count.count().cmp(&split_req.new_shard_count) {
    6256              :                         Ordering::Equal => {
    6257              :                             //  Already split this
    6258            0 :                             children_found.push(*tenant_shard_id);
    6259            0 :                             continue;
    6260              :                         }
    6261              :                         Ordering::Greater => {
    6262            0 :                             return Err(ApiError::BadRequest(anyhow::anyhow!(
    6263            0 :                                 "Requested count {} but already have shards at count {}",
    6264            0 :                                 split_req.new_shard_count,
    6265            0 :                                 shard.shard.count.count()
    6266            0 :                             )));
    6267              :                         }
    6268            0 :                         Ordering::Less => {
    6269            0 :                             // Fall through: this shard has lower count than requested,
    6270            0 :                             // is a candidate for splitting.
    6271            0 :                         }
    6272              :                     }
    6273              : 
    6274            0 :                     match old_shard_count {
    6275            0 :                         None => old_shard_count = Some(shard.shard.count),
    6276            0 :                         Some(old_shard_count) => {
    6277            0 :                             if old_shard_count != shard.shard.count {
    6278              :                                 // We may hit this case if a caller asked for two splits to
    6279              :                                 // different sizes, before the first one is complete.
    6280              :                                 // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
    6281              :                                 // of shard_count=1 and shard_count=2 shards in the map.
    6282            0 :                                 return Err(ApiError::Conflict(
    6283            0 :                                     "Cannot split, currently mid-split".to_string(),
    6284            0 :                                 ));
    6285            0 :                             }
    6286              :                         }
    6287              :                     }
    6288            0 :                     if policy.is_none() {
    6289            0 :                         policy = Some(shard.policy.clone());
    6290            0 :                     }
    6291            0 :                     if shard_ident.is_none() {
    6292            0 :                         shard_ident = Some(shard.shard);
    6293            0 :                     }
    6294            0 :                     if config.is_none() {
    6295            0 :                         config = Some(shard.config.clone());
    6296            0 :                     }
    6297            0 :                     if preferred_az_id.is_none() {
    6298            0 :                         preferred_az_id = shard.preferred_az().cloned();
    6299            0 :                     }
    6300              : 
    6301            0 :                     if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
    6302            0 :                         tracing::info!(
    6303            0 :                             "Tenant shard {} already has shard count {}",
    6304              :                             tenant_shard_id,
    6305              :                             split_req.new_shard_count
    6306              :                         );
    6307            0 :                         continue;
    6308            0 :                     }
    6309              : 
    6310            0 :                     let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
    6311            0 :                         anyhow::anyhow!("Cannot split a tenant that is not attached"),
    6312            0 :                     ))?;
    6313              : 
    6314            0 :                     let node = pageservers
    6315            0 :                         .get(&node_id)
    6316            0 :                         .expect("Pageservers may not be deleted while referenced");
    6317              : 
    6318            0 :                     targets.push(ShardSplitTarget {
    6319            0 :                         parent_id: *tenant_shard_id,
    6320            0 :                         node: node.clone(),
    6321            0 :                         child_ids: tenant_shard_id
    6322            0 :                             .split(ShardCount::new(split_req.new_shard_count)),
    6323            0 :                     });
    6324              :                 }
    6325              : 
    6326            0 :                 if targets.is_empty() {
    6327            0 :                     if children_found.len() == split_req.new_shard_count as usize {
    6328            0 :                         return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
    6329            0 :                             new_shards: children_found,
    6330            0 :                         }));
    6331              :                     } else {
    6332              :                         // No shards found to split, and no existing children found: the
    6333              :                         // tenant doesn't exist at all.
    6334            0 :                         return Err(ApiError::NotFound(
    6335            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    6336            0 :                         ));
    6337              :                     }
    6338            0 :                 }
    6339              : 
    6340            0 :                 (old_shard_count, targets)
    6341              :             };
    6342              : 
    6343              :         // unwrap safety: we would have returned above if we didn't find at least one shard to split
    6344            0 :         let old_shard_count = old_shard_count.unwrap();
    6345            0 :         let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
    6346              :             // This ShardIdentity will be used as the template for all children, so this implicitly
    6347              :             // applies the new stripe size to the children.
    6348            0 :             let mut shard_ident = shard_ident.unwrap();
    6349            0 :             if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
    6350            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6351            0 :                     "Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards",
    6352            0 :                     shard_ident.stripe_size
    6353            0 :                 )));
    6354            0 :             }
    6355              : 
    6356            0 :             shard_ident.stripe_size = new_stripe_size;
    6357            0 :             tracing::info!("applied  stripe size {}", shard_ident.stripe_size.0);
    6358            0 :             shard_ident
    6359              :         } else {
    6360            0 :             shard_ident.unwrap()
    6361              :         };
    6362            0 :         let policy = policy.unwrap();
    6363            0 :         let config = config.unwrap();
    6364              : 
    6365            0 :         Ok(ShardSplitAction::Split(Box::new(ShardSplitParams {
    6366            0 :             old_shard_count,
    6367            0 :             new_shard_count: ShardCount::new(split_req.new_shard_count),
    6368            0 :             new_stripe_size: split_req.new_stripe_size,
    6369            0 :             targets,
    6370            0 :             policy,
    6371            0 :             config,
    6372            0 :             shard_ident,
    6373            0 :             preferred_az_id,
    6374            0 :         })))
    6375            0 :     }
    6376              : 
    6377            0 :     async fn do_tenant_shard_split(
    6378            0 :         &self,
    6379            0 :         tenant_id: TenantId,
    6380            0 :         params: Box<ShardSplitParams>,
    6381            0 :     ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
    6382              :         // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
    6383              :         // request could occur here, deleting or mutating the tenant.  begin_shard_split checks that the
    6384              :         // parent shards exist as expected, but it would be neater to do the above pre-checks within the
    6385              :         // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
    6386              :         // (https://github.com/neondatabase/neon/issues/6676)
    6387              : 
    6388              :         let ShardSplitParams {
    6389            0 :             old_shard_count,
    6390            0 :             new_shard_count,
    6391            0 :             new_stripe_size,
    6392            0 :             mut targets,
    6393            0 :             policy,
    6394            0 :             config,
    6395            0 :             shard_ident,
    6396            0 :             preferred_az_id,
    6397            0 :         } = *params;
    6398              : 
    6399              :         // Drop any secondary locations: pageservers do not support splitting these, and in any case the
    6400              :         // end-state for a split tenant will usually be to have secondary locations on different nodes.
    6401              :         // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
    6402              :         // at the time of split.
    6403            0 :         let waiters = {
    6404            0 :             let mut locked = self.inner.write().unwrap();
    6405            0 :             let mut waiters = Vec::new();
    6406            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6407            0 :             for target in &mut targets {
    6408            0 :                 let Some(shard) = tenants.get_mut(&target.parent_id) else {
    6409              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    6410            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6411            0 :                         "Shard {} not found",
    6412            0 :                         target.parent_id
    6413            0 :                     )));
    6414              :                 };
    6415              : 
    6416            0 :                 if shard.intent.get_attached() != &Some(target.node.get_id()) {
    6417              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    6418            0 :                     return Err(ApiError::Conflict(format!(
    6419            0 :                         "Shard {} unexpectedly rescheduled during split",
    6420            0 :                         target.parent_id
    6421            0 :                     )));
    6422            0 :                 }
    6423              : 
    6424              :                 // Irrespective of PlacementPolicy, clear secondary locations from intent
    6425            0 :                 shard.intent.clear_secondary(scheduler);
    6426              : 
    6427              :                 // Run Reconciler to execute detach fo secondary locations.
    6428            0 :                 if let Some(waiter) =
    6429            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    6430            0 :                 {
    6431            0 :                     waiters.push(waiter);
    6432            0 :                 }
    6433              :             }
    6434            0 :             waiters
    6435              :         };
    6436            0 :         self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
    6437              : 
    6438              :         // Before creating any new child shards in memory or on the pageservers, persist them: this
    6439              :         // enables us to ensure that we will always be able to clean up if something goes wrong.  This also
    6440              :         // acts as the protection against two concurrent attempts to split: one of them will get a database
    6441              :         // error trying to insert the child shards.
    6442            0 :         let mut child_tsps = Vec::new();
    6443            0 :         for target in &targets {
    6444            0 :             let mut this_child_tsps = Vec::new();
    6445            0 :             for child in &target.child_ids {
    6446            0 :                 let mut child_shard = shard_ident;
    6447            0 :                 child_shard.number = child.shard_number;
    6448            0 :                 child_shard.count = child.shard_count;
    6449              : 
    6450            0 :                 tracing::info!(
    6451            0 :                     "Create child shard persistence with stripe size {}",
    6452              :                     shard_ident.stripe_size.0
    6453              :                 );
    6454              : 
    6455            0 :                 this_child_tsps.push(TenantShardPersistence {
    6456            0 :                     tenant_id: child.tenant_id.to_string(),
    6457            0 :                     shard_number: child.shard_number.0 as i32,
    6458            0 :                     shard_count: child.shard_count.literal() as i32,
    6459            0 :                     shard_stripe_size: shard_ident.stripe_size.0 as i32,
    6460              :                     // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
    6461              :                     // populate the correct generation as part of its transaction, to protect us
    6462              :                     // against racing with changes in the state of the parent.
    6463            0 :                     generation: None,
    6464            0 :                     generation_pageserver: Some(target.node.get_id().0 as i64),
    6465            0 :                     placement_policy: serde_json::to_string(&policy).unwrap(),
    6466            0 :                     config: serde_json::to_string(&config).unwrap(),
    6467            0 :                     splitting: SplitState::Splitting,
    6468              : 
    6469              :                     // Scheduling policies and preferred AZ do not carry through to children
    6470            0 :                     scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    6471            0 :                         .unwrap(),
    6472            0 :                     preferred_az_id: preferred_az_id.as_ref().map(|az| az.0.clone()),
    6473              :                 });
    6474              :             }
    6475              : 
    6476            0 :             child_tsps.push((target.parent_id, this_child_tsps));
    6477              :         }
    6478              : 
    6479            0 :         if let Err(e) = self
    6480            0 :             .persistence
    6481            0 :             .begin_shard_split(old_shard_count, tenant_id, child_tsps)
    6482            0 :             .await
    6483              :         {
    6484            0 :             match e {
    6485              :                 DatabaseError::Query(diesel::result::Error::DatabaseError(
    6486              :                     DatabaseErrorKind::UniqueViolation,
    6487              :                     _,
    6488              :                 )) => {
    6489              :                     // Inserting a child shard violated a unique constraint: we raced with another call to
    6490              :                     // this function
    6491            0 :                     tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
    6492            0 :                     return Err(ApiError::Conflict("Tenant is already splitting".into()));
    6493              :                 }
    6494            0 :                 _ => return Err(ApiError::InternalServerError(e.into())),
    6495              :             }
    6496            0 :         }
    6497            0 :         fail::fail_point!("shard-split-post-begin", |_| Err(
    6498            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    6499              :         ));
    6500              : 
    6501              :         // Now that I have persisted the splitting state, apply it in-memory.  This is infallible, so
    6502              :         // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
    6503              :         // is not set in memory, then it was not persisted.
    6504              :         {
    6505            0 :             let mut locked = self.inner.write().unwrap();
    6506            0 :             for target in &targets {
    6507            0 :                 if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
    6508            0 :                     parent_shard.splitting = SplitState::Splitting;
    6509            0 :                     // Put the observed state to None, to reflect that it is indeterminate once we start the
    6510            0 :                     // split operation.
    6511            0 :                     parent_shard
    6512            0 :                         .observed
    6513            0 :                         .locations
    6514            0 :                         .insert(target.node.get_id(), ObservedStateLocation { conf: None });
    6515            0 :                 }
    6516              :             }
    6517              :         }
    6518              : 
    6519              :         // TODO: issue split calls concurrently (this only matters once we're splitting
    6520              :         // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
    6521              : 
    6522              :         // HADRON: set a timeout for splitting individual shards on page servers.
    6523              :         // Currently we do not perform any retry because it's not clear if page server can handle
    6524              :         // partially split shards correctly.
    6525            0 :         let shard_split_timeout =
    6526            0 :             if let Some(env::DeploymentMode::Local) = env::get_deployment_mode() {
    6527            0 :                 Duration::from_secs(30)
    6528              :             } else {
    6529            0 :                 self.config.shard_split_request_timeout
    6530              :             };
    6531            0 :         let mut http_client_builder = reqwest::ClientBuilder::new()
    6532            0 :             .pool_max_idle_per_host(0)
    6533            0 :             .timeout(shard_split_timeout);
    6534              : 
    6535            0 :         for ssl_ca_cert in &self.config.ssl_ca_certs {
    6536            0 :             http_client_builder = http_client_builder.add_root_certificate(ssl_ca_cert.clone());
    6537            0 :         }
    6538            0 :         let http_client = http_client_builder
    6539            0 :             .build()
    6540            0 :             .expect("Failed to construct HTTP client");
    6541            0 :         for target in &targets {
    6542              :             let ShardSplitTarget {
    6543            0 :                 parent_id,
    6544            0 :                 node,
    6545            0 :                 child_ids,
    6546            0 :             } = target;
    6547              : 
    6548            0 :             let client = PageserverClient::new(
    6549            0 :                 node.get_id(),
    6550            0 :                 http_client.clone(),
    6551            0 :                 node.base_url(),
    6552            0 :                 self.config.pageserver_jwt_token.as_deref(),
    6553              :             );
    6554              : 
    6555            0 :             let response = client
    6556            0 :                 .tenant_shard_split(
    6557            0 :                     *parent_id,
    6558            0 :                     TenantShardSplitRequest {
    6559            0 :                         new_shard_count: new_shard_count.literal(),
    6560            0 :                         new_stripe_size,
    6561            0 :                     },
    6562            0 :                 )
    6563            0 :                 .await
    6564            0 :                 .map_err(|e| ApiError::Conflict(format!("Failed to split {parent_id}: {e}")))?;
    6565              : 
    6566            0 :             fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
    6567            0 :                 "failpoint".to_string()
    6568            0 :             )));
    6569              : 
    6570            0 :             failpoint_support::sleep_millis_async!(
    6571              :                 "shard-split-post-remote-sleep",
    6572            0 :                 &self.reconcilers_cancel
    6573              :             );
    6574              : 
    6575            0 :             tracing::info!(
    6576            0 :                 "Split {} into {}",
    6577              :                 parent_id,
    6578            0 :                 response
    6579            0 :                     .new_shards
    6580            0 :                     .iter()
    6581            0 :                     .map(|s| format!("{s:?}"))
    6582            0 :                     .collect::<Vec<_>>()
    6583            0 :                     .join(",")
    6584              :             );
    6585              : 
    6586            0 :             if &response.new_shards != child_ids {
    6587              :                 // This should never happen: the pageserver should agree with us on how shard splits work.
    6588            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6589            0 :                     "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
    6590            0 :                     parent_id,
    6591            0 :                     response.new_shards,
    6592            0 :                     child_ids
    6593            0 :                 )));
    6594            0 :             }
    6595              :         }
    6596              : 
    6597            0 :         fail::fail_point!("shard-split-pre-complete", |_| Err(ApiError::Conflict(
    6598            0 :             "failpoint".to_string()
    6599            0 :         )));
    6600              : 
    6601            0 :         pausable_failpoint!("shard-split-pre-complete-pause");
    6602              : 
    6603              :         // TODO: if the pageserver restarted concurrently with our split API call,
    6604              :         // the actual generation of the child shard might differ from the generation
    6605              :         // we expect it to have.  In order for our in-database generation to end up
    6606              :         // correct, we should carry the child generation back in the response and apply it here
    6607              :         // in complete_shard_split (and apply the correct generation in memory)
    6608              :         // (or, we can carry generation in the request and reject the request if
    6609              :         //  it doesn't match, but that requires more retry logic on this side)
    6610              : 
    6611            0 :         self.persistence
    6612            0 :             .complete_shard_split(tenant_id, old_shard_count, new_shard_count)
    6613            0 :             .await?;
    6614              : 
    6615            0 :         fail::fail_point!("shard-split-post-complete", |_| Err(
    6616            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    6617              :         ));
    6618              : 
    6619              :         // Replace all the shards we just split with their children: this phase is infallible.
    6620            0 :         let (response, child_locations, waiters) =
    6621            0 :             self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
    6622              : 
    6623              :         // Notify all page servers to detach and clean up the old shards because they will no longer
    6624              :         // be needed. This is best-effort: if it fails, it will be cleaned up on a subsequent
    6625              :         // Pageserver re-attach/startup.
    6626            0 :         let shards_to_cleanup = targets
    6627            0 :             .iter()
    6628            0 :             .map(|target| (target.parent_id, target.node.get_id()))
    6629            0 :             .collect();
    6630            0 :         self.cleanup_locations(shards_to_cleanup).await;
    6631              : 
    6632              :         // Send compute notifications for all the new shards
    6633            0 :         let mut failed_notifications = Vec::new();
    6634            0 :         for (child_id, child_ps, stripe_size) in child_locations {
    6635            0 :             if let Err(e) = self
    6636            0 :                 .compute_hook
    6637            0 :                 .notify_attach(
    6638            0 :                     compute_hook::ShardUpdate {
    6639            0 :                         tenant_shard_id: child_id,
    6640            0 :                         node_id: child_ps,
    6641            0 :                         stripe_size,
    6642            0 :                         preferred_az: preferred_az_id.as_ref().map(Cow::Borrowed),
    6643            0 :                     },
    6644            0 :                     &self.reconcilers_cancel,
    6645            0 :                 )
    6646            0 :                 .await
    6647              :             {
    6648            0 :                 tracing::warn!(
    6649            0 :                     "Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
    6650              :                     child_id,
    6651              :                     child_ps
    6652              :                 );
    6653            0 :                 failed_notifications.push(child_id);
    6654            0 :             }
    6655              :         }
    6656              : 
    6657              :         // If we failed any compute notifications, make a note to retry later.
    6658            0 :         if !failed_notifications.is_empty() {
    6659            0 :             let mut locked = self.inner.write().unwrap();
    6660            0 :             for failed in failed_notifications {
    6661            0 :                 if let Some(shard) = locked.tenants.get_mut(&failed) {
    6662            0 :                     shard.pending_compute_notification = true;
    6663            0 :                 }
    6664              :             }
    6665            0 :         }
    6666              : 
    6667            0 :         Ok((response, waiters))
    6668            0 :     }
    6669              : 
    6670              :     /// A graceful migration: update the preferred node and let optimisation handle the migration
    6671              :     /// in the background (may take a long time as it will fully warm up a location before cutting over)
    6672              :     ///
    6673              :     /// Our external API calls this a 'prewarm=true' migration, but internally it isn't a special prewarm step: it's
    6674              :     /// just a migration that uses the same graceful procedure as our background scheduling optimisations would use.
    6675            0 :     fn tenant_shard_migrate_with_prewarm(
    6676            0 :         &self,
    6677            0 :         migrate_req: &TenantShardMigrateRequest,
    6678            0 :         shard: &mut TenantShard,
    6679            0 :         scheduler: &mut Scheduler,
    6680            0 :         schedule_context: ScheduleContext,
    6681            0 :     ) -> Result<Option<ScheduleOptimization>, ApiError> {
    6682            0 :         shard.set_preferred_node(Some(migrate_req.node_id));
    6683              : 
    6684              :         // Generate whatever the initial change to the intent is: this could be creation of a secondary, or
    6685              :         // cutting over to an existing secondary.  Caller is responsible for validating this before applying it,
    6686              :         // e.g. by checking secondary is warm enough.
    6687            0 :         Ok(shard.optimize_attachment(scheduler, &schedule_context))
    6688            0 :     }
    6689              : 
    6690              :     /// Immediate migration: directly update the intent state and kick off a reconciler
    6691            0 :     fn tenant_shard_migrate_immediate(
    6692            0 :         &self,
    6693            0 :         migrate_req: &TenantShardMigrateRequest,
    6694            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    6695            0 :         shard: &mut TenantShard,
    6696            0 :         scheduler: &mut Scheduler,
    6697            0 :     ) -> Result<Option<ReconcilerWaiter>, ApiError> {
    6698              :         // Non-graceful migration: update the intent state immediately
    6699            0 :         let old_attached = *shard.intent.get_attached();
    6700            0 :         match shard.policy {
    6701            0 :             PlacementPolicy::Attached(n) => {
    6702              :                 // If our new attached node was a secondary, it no longer should be.
    6703            0 :                 shard
    6704            0 :                     .intent
    6705            0 :                     .remove_secondary(scheduler, migrate_req.node_id);
    6706              : 
    6707            0 :                 shard
    6708            0 :                     .intent
    6709            0 :                     .set_attached(scheduler, Some(migrate_req.node_id));
    6710              : 
    6711              :                 // If we were already attached to something, demote that to a secondary
    6712            0 :                 if let Some(old_attached) = old_attached {
    6713            0 :                     if n > 0 {
    6714              :                         // Remove other secondaries to make room for the location we'll demote
    6715            0 :                         while shard.intent.get_secondary().len() >= n {
    6716            0 :                             shard.intent.pop_secondary(scheduler);
    6717            0 :                         }
    6718              : 
    6719            0 :                         shard.intent.push_secondary(scheduler, old_attached);
    6720            0 :                     }
    6721            0 :                 }
    6722              :             }
    6723            0 :             PlacementPolicy::Secondary => {
    6724            0 :                 shard.intent.clear(scheduler);
    6725            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    6726            0 :             }
    6727              :             PlacementPolicy::Detached => {
    6728            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6729            0 :                     "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
    6730            0 :                 )));
    6731              :             }
    6732              :         }
    6733              : 
    6734            0 :         tracing::info!("Migrating: new intent {:?}", shard.intent);
    6735            0 :         shard.sequence = shard.sequence.next();
    6736            0 :         shard.set_preferred_node(None); // Abort any in-flight graceful migration
    6737            0 :         Ok(self.maybe_configured_reconcile_shard(
    6738            0 :             shard,
    6739            0 :             nodes,
    6740            0 :             (&migrate_req.migration_config).into(),
    6741            0 :         ))
    6742            0 :     }
    6743              : 
    6744            0 :     pub(crate) async fn tenant_shard_migrate(
    6745            0 :         &self,
    6746            0 :         tenant_shard_id: TenantShardId,
    6747            0 :         migrate_req: TenantShardMigrateRequest,
    6748            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    6749              :         // Depending on whether the migration is a change and whether it's graceful or immediate, we might
    6750              :         // get a different outcome to handle
    6751              :         enum MigrationOutcome {
    6752              :             Optimization(Option<ScheduleOptimization>),
    6753              :             Reconcile(Option<ReconcilerWaiter>),
    6754              :         }
    6755              : 
    6756            0 :         let outcome = {
    6757            0 :             let mut locked = self.inner.write().unwrap();
    6758            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6759              : 
    6760            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    6761            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6762            0 :                     "Node {} not found",
    6763            0 :                     migrate_req.node_id
    6764            0 :                 )));
    6765              :             };
    6766              : 
    6767              :             // Migration to unavavailable node requires force flag
    6768            0 :             if !node.is_available() {
    6769            0 :                 if migrate_req.migration_config.override_scheduler {
    6770              :                     // Warn but proceed: the caller may intend to manually adjust the placement of
    6771              :                     // a shard even if the node is down, e.g. if intervening during an incident.
    6772            0 :                     tracing::warn!("Forcibly migrating to unavailable node {node}");
    6773              :                 } else {
    6774            0 :                     tracing::warn!("Node {node} is unavailable, refusing migration");
    6775            0 :                     return Err(ApiError::PreconditionFailed(
    6776            0 :                         format!("Node {node} is unavailable").into_boxed_str(),
    6777            0 :                     ));
    6778              :                 }
    6779            0 :             }
    6780              : 
    6781              :             // Calculate the ScheduleContext for this tenant
    6782            0 :             let mut schedule_context = ScheduleContext::default();
    6783            0 :             for (_shard_id, shard) in
    6784            0 :                 tenants.range(TenantShardId::tenant_range(tenant_shard_id.tenant_id))
    6785            0 :             {
    6786            0 :                 schedule_context.avoid(&shard.intent.all_pageservers());
    6787            0 :             }
    6788              : 
    6789              :             // Look up the specific shard we will migrate
    6790            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    6791            0 :                 return Err(ApiError::NotFound(
    6792            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    6793            0 :                 ));
    6794              :             };
    6795              : 
    6796              :             // Migration to a node with unfavorable scheduling score requires a force flag, because it might just
    6797              :             // be migrated back by the optimiser.
    6798            0 :             if let Some(better_node) = shard.find_better_location::<AttachedShardTag>(
    6799            0 :                 scheduler,
    6800            0 :                 &schedule_context,
    6801            0 :                 migrate_req.node_id,
    6802            0 :                 &[],
    6803            0 :             ) {
    6804            0 :                 if !migrate_req.migration_config.override_scheduler {
    6805            0 :                     return Err(ApiError::PreconditionFailed(
    6806            0 :                         "Migration to a worse-scoring node".into(),
    6807            0 :                     ));
    6808              :                 } else {
    6809            0 :                     tracing::info!(
    6810            0 :                         "Migrating to a worse-scoring node {} (optimiser would prefer {better_node})",
    6811              :                         migrate_req.node_id
    6812              :                     );
    6813              :                 }
    6814            0 :             }
    6815              : 
    6816            0 :             if let Some(origin_node_id) = migrate_req.origin_node_id {
    6817            0 :                 if shard.intent.get_attached() != &Some(origin_node_id) {
    6818            0 :                     return Err(ApiError::PreconditionFailed(
    6819            0 :                         format!(
    6820            0 :                             "Migration expected to originate from {} but shard is on {:?}",
    6821            0 :                             origin_node_id,
    6822            0 :                             shard.intent.get_attached()
    6823            0 :                         )
    6824            0 :                         .into(),
    6825            0 :                     ));
    6826            0 :                 }
    6827            0 :             }
    6828              : 
    6829            0 :             if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    6830              :                 // No-op case: we will still proceed to wait for reconciliation in case it is
    6831              :                 // incomplete from an earlier update to the intent.
    6832            0 :                 tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
    6833              : 
    6834              :                 // An instruction to migrate to the currently attached node should
    6835              :                 // cancel any pending graceful migration
    6836            0 :                 shard.set_preferred_node(None);
    6837              : 
    6838            0 :                 MigrationOutcome::Reconcile(self.maybe_configured_reconcile_shard(
    6839            0 :                     shard,
    6840            0 :                     nodes,
    6841            0 :                     (&migrate_req.migration_config).into(),
    6842            0 :                 ))
    6843            0 :             } else if migrate_req.migration_config.prewarm {
    6844            0 :                 MigrationOutcome::Optimization(self.tenant_shard_migrate_with_prewarm(
    6845            0 :                     &migrate_req,
    6846            0 :                     shard,
    6847            0 :                     scheduler,
    6848            0 :                     schedule_context,
    6849            0 :                 )?)
    6850              :             } else {
    6851            0 :                 MigrationOutcome::Reconcile(self.tenant_shard_migrate_immediate(
    6852            0 :                     &migrate_req,
    6853            0 :                     nodes,
    6854            0 :                     shard,
    6855            0 :                     scheduler,
    6856            0 :                 )?)
    6857              :             }
    6858              :         };
    6859              : 
    6860              :         // We may need to validate + apply an optimisation, or we may need to just retrive a reconcile waiter
    6861            0 :         let waiter = match outcome {
    6862            0 :             MigrationOutcome::Optimization(Some(optimization)) => {
    6863              :                 // Validate and apply the optimization -- this would happen anyway in background reconcile loop, but
    6864              :                 // we might as well do it more promptly as this is a direct external request.
    6865            0 :                 let mut validated = self
    6866            0 :                     .optimize_all_validate(vec![(tenant_shard_id, optimization)])
    6867            0 :                     .await;
    6868            0 :                 if let Some((_shard_id, optimization)) = validated.pop() {
    6869            0 :                     let mut locked = self.inner.write().unwrap();
    6870            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    6871            0 :                     let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    6872              :                         // Rare but possible: tenant is removed between generating optimisation and validating it.
    6873            0 :                         return Err(ApiError::NotFound(
    6874            0 :                             anyhow::anyhow!("Tenant shard not found").into(),
    6875            0 :                         ));
    6876              :                     };
    6877              : 
    6878            0 :                     if !shard.apply_optimization(scheduler, optimization) {
    6879              :                         // This can happen but is unusual enough to warn on: something else changed in the shard that made the optimisation stale
    6880              :                         // and therefore not applied.
    6881            0 :                         tracing::warn!(
    6882            0 :                             "Schedule optimisation generated during graceful migration was not applied, shard changed?"
    6883              :                         );
    6884            0 :                     }
    6885            0 :                     self.maybe_configured_reconcile_shard(
    6886            0 :                         shard,
    6887            0 :                         nodes,
    6888            0 :                         (&migrate_req.migration_config).into(),
    6889              :                     )
    6890              :                 } else {
    6891            0 :                     None
    6892              :                 }
    6893              :             }
    6894            0 :             MigrationOutcome::Optimization(None) => None,
    6895            0 :             MigrationOutcome::Reconcile(waiter) => waiter,
    6896              :         };
    6897              : 
    6898              :         // Finally, wait for any reconcile we started to complete.  In the case of immediate-mode migrations to cold
    6899              :         // locations, this has a good chance of timing out.
    6900            0 :         if let Some(waiter) = waiter {
    6901            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    6902              :         } else {
    6903            0 :             tracing::info!("Migration is a no-op");
    6904              :         }
    6905              : 
    6906            0 :         Ok(TenantShardMigrateResponse {})
    6907            0 :     }
    6908              : 
    6909            0 :     pub(crate) async fn tenant_shard_migrate_secondary(
    6910            0 :         &self,
    6911            0 :         tenant_shard_id: TenantShardId,
    6912            0 :         migrate_req: TenantShardMigrateRequest,
    6913            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    6914            0 :         let waiter = {
    6915            0 :             let mut locked = self.inner.write().unwrap();
    6916            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6917              : 
    6918            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    6919            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6920            0 :                     "Node {} not found",
    6921            0 :                     migrate_req.node_id
    6922            0 :                 )));
    6923              :             };
    6924              : 
    6925            0 :             if !node.is_available() {
    6926              :                 // Warn but proceed: the caller may intend to manually adjust the placement of
    6927              :                 // a shard even if the node is down, e.g. if intervening during an incident.
    6928            0 :                 tracing::warn!("Migrating to unavailable node {node}");
    6929            0 :             }
    6930              : 
    6931            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    6932            0 :                 return Err(ApiError::NotFound(
    6933            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    6934            0 :                 ));
    6935              :             };
    6936              : 
    6937            0 :             if shard.intent.get_secondary().len() == 1
    6938            0 :                 && shard.intent.get_secondary()[0] == migrate_req.node_id
    6939              :             {
    6940            0 :                 tracing::info!(
    6941            0 :                     "Migrating secondary to {node}: intent is unchanged {:?}",
    6942              :                     shard.intent
    6943              :                 );
    6944            0 :             } else if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    6945            0 :                 tracing::info!(
    6946            0 :                     "Migrating secondary to {node}: already attached where we were asked to create a secondary"
    6947              :                 );
    6948              :             } else {
    6949            0 :                 let old_secondaries = shard.intent.get_secondary().clone();
    6950            0 :                 for secondary in old_secondaries {
    6951            0 :                     shard.intent.remove_secondary(scheduler, secondary);
    6952            0 :                 }
    6953              : 
    6954            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    6955            0 :                 shard.sequence = shard.sequence.next();
    6956            0 :                 tracing::info!(
    6957            0 :                     "Migrating secondary to {node}: new intent {:?}",
    6958              :                     shard.intent
    6959              :                 );
    6960              :             }
    6961              : 
    6962            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    6963              :         };
    6964              : 
    6965            0 :         if let Some(waiter) = waiter {
    6966            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    6967              :         } else {
    6968            0 :             tracing::info!("Migration is a no-op");
    6969              :         }
    6970              : 
    6971            0 :         Ok(TenantShardMigrateResponse {})
    6972            0 :     }
    6973              : 
    6974              :     /// 'cancel' in this context means cancel any ongoing reconcile
    6975            0 :     pub(crate) async fn tenant_shard_cancel_reconcile(
    6976            0 :         &self,
    6977            0 :         tenant_shard_id: TenantShardId,
    6978            0 :     ) -> Result<(), ApiError> {
    6979              :         // Take state lock and fire the cancellation token, after which we drop lock and wait for any ongoing reconcile to complete
    6980            0 :         let waiter = {
    6981            0 :             let locked = self.inner.write().unwrap();
    6982            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    6983            0 :                 return Err(ApiError::NotFound(
    6984            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    6985            0 :                 ));
    6986              :             };
    6987              : 
    6988            0 :             let waiter = shard.get_waiter();
    6989            0 :             match waiter {
    6990              :                 None => {
    6991            0 :                     tracing::info!("Shard does not have an ongoing Reconciler");
    6992            0 :                     return Ok(());
    6993              :                 }
    6994            0 :                 Some(waiter) => {
    6995            0 :                     tracing::info!("Cancelling Reconciler");
    6996            0 :                     shard.cancel_reconciler();
    6997            0 :                     waiter
    6998              :                 }
    6999              :             }
    7000              :         };
    7001              : 
    7002              :         // Cancellation should be prompt.  If this fails we have still done our job of firing the
    7003              :         // cancellation token, but by returning an ApiError we will indicate to the caller that
    7004              :         // the Reconciler is misbehaving and not respecting the cancellation token
    7005            0 :         self.await_waiters(vec![waiter], SHORT_RECONCILE_TIMEOUT)
    7006            0 :             .await?;
    7007              : 
    7008            0 :         Ok(())
    7009            0 :     }
    7010              : 
    7011              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    7012              :     /// detaching or deleting it on pageservers.
    7013            0 :     pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
    7014            0 :         self.persistence.delete_tenant(tenant_id).await?;
    7015              : 
    7016            0 :         let mut locked = self.inner.write().unwrap();
    7017            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    7018            0 :         let mut shards = Vec::new();
    7019            0 :         for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
    7020            0 :             shards.push(*tenant_shard_id);
    7021            0 :         }
    7022              : 
    7023            0 :         for shard_id in shards {
    7024            0 :             if let Some(mut shard) = tenants.remove(&shard_id) {
    7025            0 :                 shard.intent.clear(scheduler);
    7026            0 :             }
    7027              :         }
    7028              : 
    7029            0 :         Ok(())
    7030            0 :     }
    7031              : 
    7032              :     /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
    7033              :     /// tenant with a very high generation number so that it will see the existing data.
    7034              :     /// It does not create timelines on safekeepers, because they might already exist on some
    7035              :     /// safekeeper set. So, the timelines are not storcon-managed after the import.
    7036            0 :     pub(crate) async fn tenant_import(
    7037            0 :         &self,
    7038            0 :         tenant_id: TenantId,
    7039            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    7040              :         // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
    7041            0 :         let maybe_node = {
    7042            0 :             self.inner
    7043            0 :                 .read()
    7044            0 :                 .unwrap()
    7045            0 :                 .nodes
    7046            0 :                 .values()
    7047            0 :                 .find(|n| n.is_available())
    7048            0 :                 .cloned()
    7049              :         };
    7050            0 :         let Some(node) = maybe_node else {
    7051            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
    7052              :         };
    7053              : 
    7054            0 :         let client = PageserverClient::new(
    7055            0 :             node.get_id(),
    7056            0 :             self.http_client.clone(),
    7057            0 :             node.base_url(),
    7058            0 :             self.config.pageserver_jwt_token.as_deref(),
    7059              :         );
    7060              : 
    7061            0 :         let scan_result = client
    7062            0 :             .tenant_scan_remote_storage(tenant_id)
    7063            0 :             .await
    7064            0 :             .map_err(|e| passthrough_api_error(&node, e))?;
    7065              : 
    7066              :         // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
    7067            0 :         let Some(shard_count) = scan_result
    7068            0 :             .shards
    7069            0 :             .iter()
    7070            0 :             .map(|s| s.tenant_shard_id.shard_count)
    7071            0 :             .max()
    7072              :         else {
    7073            0 :             return Err(ApiError::NotFound(
    7074            0 :                 anyhow::anyhow!("No shards found").into(),
    7075            0 :             ));
    7076              :         };
    7077              : 
    7078              :         // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
    7079              :         // to
    7080            0 :         let generation = scan_result
    7081            0 :             .shards
    7082            0 :             .iter()
    7083            0 :             .map(|s| s.generation)
    7084            0 :             .max()
    7085            0 :             .expect("We already validated >0 shards");
    7086              : 
    7087              :         // Find the tenant's stripe size. This wasn't always persisted in the tenant manifest, so
    7088              :         // fall back to the original default stripe size of 32768 (256 MB) if it's not specified.
    7089              :         const ORIGINAL_STRIPE_SIZE: ShardStripeSize = ShardStripeSize(32768);
    7090            0 :         let stripe_size = scan_result
    7091            0 :             .shards
    7092            0 :             .iter()
    7093            0 :             .find(|s| s.tenant_shard_id.shard_count == shard_count && s.generation == generation)
    7094            0 :             .expect("we validated >0 shards above")
    7095              :             .stripe_size
    7096            0 :             .unwrap_or_else(|| {
    7097            0 :                 if shard_count.count() > 1 {
    7098            0 :                     warn!("unknown stripe size, assuming {ORIGINAL_STRIPE_SIZE}");
    7099            0 :                 }
    7100            0 :                 ORIGINAL_STRIPE_SIZE
    7101            0 :             });
    7102              : 
    7103            0 :         let (response, waiters) = self
    7104            0 :             .do_tenant_create(TenantCreateRequest {
    7105            0 :                 new_tenant_id: TenantShardId::unsharded(tenant_id),
    7106            0 :                 generation,
    7107            0 : 
    7108            0 :                 shard_parameters: ShardParameters {
    7109            0 :                     count: shard_count,
    7110            0 :                     stripe_size,
    7111            0 :                 },
    7112            0 :                 placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
    7113            0 :                 config: TenantConfig::default(),
    7114            0 :             })
    7115            0 :             .await?;
    7116              : 
    7117            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    7118              :             // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
    7119              :             // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
    7120              :             // reconcile, as reconciliation includes notifying compute.
    7121            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
    7122            0 :         }
    7123              : 
    7124            0 :         Ok(response)
    7125            0 :     }
    7126              : 
    7127              :     /// For debug/support: a full JSON dump of TenantShards.  Returns a response so that
    7128              :     /// we don't have to make TenantShard clonable in the return path.
    7129            0 :     pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    7130            0 :         let serialized = {
    7131            0 :             let locked = self.inner.read().unwrap();
    7132            0 :             let result = locked.tenants.values().collect::<Vec<_>>();
    7133            0 :             serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
    7134              :         };
    7135              : 
    7136            0 :         hyper::Response::builder()
    7137            0 :             .status(hyper::StatusCode::OK)
    7138            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    7139            0 :             .body(hyper::Body::from(serialized))
    7140            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    7141            0 :     }
    7142              : 
    7143              :     /// Check the consistency of in-memory state vs. persistent state, and check that the
    7144              :     /// scheduler's statistics are up to date.
    7145              :     ///
    7146              :     /// These consistency checks expect an **idle** system.  If changes are going on while
    7147              :     /// we run, then we can falsely indicate a consistency issue.  This is sufficient for end-of-test
    7148              :     /// checks, but not suitable for running continuously in the background in the field.
    7149            0 :     pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
    7150            0 :         let (mut expect_nodes, mut expect_shards) = {
    7151            0 :             let locked = self.inner.read().unwrap();
    7152              : 
    7153            0 :             locked
    7154            0 :                 .scheduler
    7155            0 :                 .consistency_check(locked.nodes.values(), locked.tenants.values())
    7156            0 :                 .context("Scheduler checks")
    7157            0 :                 .map_err(ApiError::InternalServerError)?;
    7158              : 
    7159            0 :             let expect_nodes = locked
    7160            0 :                 .nodes
    7161            0 :                 .values()
    7162            0 :                 .map(|n| n.to_persistent())
    7163            0 :                 .collect::<Vec<_>>();
    7164              : 
    7165            0 :             let expect_shards = locked
    7166            0 :                 .tenants
    7167            0 :                 .values()
    7168            0 :                 .map(|t| t.to_persistent())
    7169            0 :                 .collect::<Vec<_>>();
    7170              : 
    7171              :             // This method can only validate the state of an idle system: if a reconcile is in
    7172              :             // progress, fail out early to avoid giving false errors on state that won't match
    7173              :             // between database and memory under a ReconcileResult is processed.
    7174            0 :             for t in locked.tenants.values() {
    7175            0 :                 if t.reconciler.is_some() {
    7176            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    7177            0 :                         "Shard {} reconciliation in progress",
    7178            0 :                         t.tenant_shard_id
    7179            0 :                     )));
    7180            0 :                 }
    7181              :             }
    7182              : 
    7183            0 :             (expect_nodes, expect_shards)
    7184              :         };
    7185              : 
    7186            0 :         let mut nodes = self.persistence.list_nodes().await?;
    7187            0 :         expect_nodes.sort_by_key(|n| n.node_id);
    7188            0 :         nodes.sort_by_key(|n| n.node_id);
    7189              : 
    7190              :         // Errors relating to nodes are deferred so that we don't skip the shard checks below if we have a node error
    7191            0 :         let node_result = if nodes != expect_nodes {
    7192            0 :             tracing::error!("Consistency check failed on nodes.");
    7193            0 :             tracing::error!(
    7194            0 :                 "Nodes in memory: {}",
    7195            0 :                 serde_json::to_string(&expect_nodes)
    7196            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    7197              :             );
    7198            0 :             tracing::error!(
    7199            0 :                 "Nodes in database: {}",
    7200            0 :                 serde_json::to_string(&nodes)
    7201            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    7202              :             );
    7203            0 :             Err(ApiError::InternalServerError(anyhow::anyhow!(
    7204            0 :                 "Node consistency failure"
    7205            0 :             )))
    7206              :         } else {
    7207            0 :             Ok(())
    7208              :         };
    7209              : 
    7210            0 :         let mut persistent_shards = self.persistence.load_active_tenant_shards().await?;
    7211            0 :         persistent_shards
    7212            0 :             .sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    7213              : 
    7214            0 :         expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    7215              : 
    7216              :         // Because JSON contents of persistent tenants might disagree with the fields in current `TenantConfig`
    7217              :         // definition, we will do an encode/decode cycle to ensure any legacy fields are dropped and any new
    7218              :         // fields are added, before doing a comparison.
    7219            0 :         for tsp in &mut persistent_shards {
    7220            0 :             let config: TenantConfig = serde_json::from_str(&tsp.config)
    7221            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?;
    7222            0 :             tsp.config = serde_json::to_string(&config).expect("Encoding config is infallible");
    7223              :         }
    7224              : 
    7225            0 :         if persistent_shards != expect_shards {
    7226            0 :             tracing::error!("Consistency check failed on shards.");
    7227              : 
    7228            0 :             tracing::error!(
    7229            0 :                 "Shards in memory: {}",
    7230            0 :                 serde_json::to_string(&expect_shards)
    7231            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    7232              :             );
    7233            0 :             tracing::error!(
    7234            0 :                 "Shards in database: {}",
    7235            0 :                 serde_json::to_string(&persistent_shards)
    7236            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    7237              :             );
    7238              : 
    7239              :             // The total dump log lines above are useful in testing but in the field grafana will
    7240              :             // usually just drop them because they're so large. So we also do some explicit logging
    7241              :             // of just the diffs.
    7242            0 :             let persistent_shards = persistent_shards
    7243            0 :                 .into_iter()
    7244            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    7245            0 :                 .collect::<HashMap<_, _>>();
    7246            0 :             let expect_shards = expect_shards
    7247            0 :                 .into_iter()
    7248            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    7249            0 :                 .collect::<HashMap<_, _>>();
    7250            0 :             for (tenant_shard_id, persistent_tsp) in &persistent_shards {
    7251            0 :                 match expect_shards.get(tenant_shard_id) {
    7252              :                     None => {
    7253            0 :                         tracing::error!(
    7254            0 :                             "Shard {} found in database but not in memory",
    7255              :                             tenant_shard_id
    7256              :                         );
    7257              :                     }
    7258            0 :                     Some(expect_tsp) => {
    7259            0 :                         if expect_tsp != persistent_tsp {
    7260            0 :                             tracing::error!(
    7261            0 :                                 "Shard {} is inconsistent.  In memory: {}, database has: {}",
    7262              :                                 tenant_shard_id,
    7263            0 :                                 serde_json::to_string(expect_tsp).unwrap(),
    7264            0 :                                 serde_json::to_string(&persistent_tsp).unwrap()
    7265              :                             );
    7266            0 :                         }
    7267              :                     }
    7268              :                 }
    7269              :             }
    7270              : 
    7271              :             // Having already logged any differences, log any shards that simply aren't present in the database
    7272            0 :             for (tenant_shard_id, memory_tsp) in &expect_shards {
    7273            0 :                 if !persistent_shards.contains_key(tenant_shard_id) {
    7274            0 :                     tracing::error!(
    7275            0 :                         "Shard {} found in memory but not in database: {}",
    7276              :                         tenant_shard_id,
    7277            0 :                         serde_json::to_string(memory_tsp)
    7278            0 :                             .map_err(|e| ApiError::InternalServerError(e.into()))?
    7279              :                     );
    7280            0 :                 }
    7281              :             }
    7282              : 
    7283            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    7284            0 :                 "Shard consistency failure"
    7285            0 :             )));
    7286            0 :         }
    7287              : 
    7288            0 :         node_result
    7289            0 :     }
    7290              : 
    7291              :     /// For debug/support: a JSON dump of the [`Scheduler`].  Returns a response so that
    7292              :     /// we don't have to make TenantShard clonable in the return path.
    7293            0 :     pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    7294            0 :         let serialized = {
    7295            0 :             let locked = self.inner.read().unwrap();
    7296            0 :             serde_json::to_string(&locked.scheduler)
    7297            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?
    7298              :         };
    7299              : 
    7300            0 :         hyper::Response::builder()
    7301            0 :             .status(hyper::StatusCode::OK)
    7302            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    7303            0 :             .body(hyper::Body::from(serialized))
    7304            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    7305            0 :     }
    7306              : 
    7307              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    7308              :     /// detaching or deleting it on pageservers.  We do not try and re-schedule any
    7309              :     /// tenants that were on this node.
    7310            0 :     pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
    7311            0 :         self.persistence.set_tombstone(node_id).await?;
    7312              : 
    7313            0 :         let mut locked = self.inner.write().unwrap();
    7314              : 
    7315            0 :         for shard in locked.tenants.values_mut() {
    7316            0 :             shard.deref_node(node_id);
    7317            0 :             shard.observed.locations.remove(&node_id);
    7318            0 :         }
    7319              : 
    7320            0 :         let mut nodes = (*locked.nodes).clone();
    7321            0 :         nodes.remove(&node_id);
    7322            0 :         locked.nodes = Arc::new(nodes);
    7323            0 :         metrics::METRICS_REGISTRY
    7324            0 :             .metrics_group
    7325            0 :             .storage_controller_pageserver_nodes
    7326            0 :             .set(locked.nodes.len() as i64);
    7327            0 :         metrics::METRICS_REGISTRY
    7328            0 :             .metrics_group
    7329            0 :             .storage_controller_https_pageserver_nodes
    7330            0 :             .set(locked.nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7331              : 
    7332            0 :         locked.scheduler.node_remove(node_id);
    7333              : 
    7334            0 :         Ok(())
    7335            0 :     }
    7336              : 
    7337              :     /// If a node has any work on it, it will be rescheduled: this is "clean" in the sense
    7338              :     /// that we don't leave any bad state behind in the storage controller, but unclean
    7339              :     /// in the sense that we are not carefully draining the node.
    7340            0 :     pub(crate) async fn node_delete_old(&self, node_id: NodeId) -> Result<(), ApiError> {
    7341            0 :         let _node_lock =
    7342            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Delete).await;
    7343              : 
    7344              :         // 1. Atomically update in-memory state:
    7345              :         //    - set the scheduling state to Pause to make subsequent scheduling ops skip it
    7346              :         //    - update shards' intents to exclude the node, and reschedule any shards whose intents we modified.
    7347              :         //    - drop the node from the main nodes map, so that when running reconciles complete they do not
    7348              :         //      re-insert references to this node into the ObservedState of shards
    7349              :         //    - drop the node from the scheduler
    7350              :         {
    7351            0 :             let mut locked = self.inner.write().unwrap();
    7352            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    7353              : 
    7354              :             {
    7355            0 :                 let mut nodes_mut = (*nodes).deref().clone();
    7356            0 :                 match nodes_mut.get_mut(&node_id) {
    7357            0 :                     Some(node) => {
    7358            0 :                         // We do not bother setting this in the database, because we're about to delete the row anyway, and
    7359            0 :                         // if we crash it would not be desirable to leave the node paused after a restart.
    7360            0 :                         node.set_scheduling(NodeSchedulingPolicy::Pause);
    7361            0 :                     }
    7362              :                     None => {
    7363            0 :                         tracing::info!(
    7364            0 :                             "Node not found: presuming this is a retry and returning success"
    7365              :                         );
    7366            0 :                         return Ok(());
    7367              :                     }
    7368              :                 }
    7369              : 
    7370            0 :                 *nodes = Arc::new(nodes_mut);
    7371              :             }
    7372              : 
    7373            0 :             for (_tenant_id, mut schedule_context, shards) in
    7374            0 :                 TenantShardExclusiveIterator::new(tenants, ScheduleMode::Normal)
    7375              :             {
    7376            0 :                 for shard in shards {
    7377            0 :                     if shard.deref_node(node_id) {
    7378            0 :                         if let Err(e) = shard.schedule(scheduler, &mut schedule_context) {
    7379              :                             // TODO: implement force flag to remove a node even if we can't reschedule
    7380              :                             // a tenant
    7381            0 :                             tracing::error!(
    7382            0 :                                 "Refusing to delete node, shard {} can't be rescheduled: {e}",
    7383              :                                 shard.tenant_shard_id
    7384              :                             );
    7385            0 :                             return Err(e.into());
    7386              :                         } else {
    7387            0 :                             tracing::info!(
    7388            0 :                                 "Rescheduled shard {} away from node during deletion",
    7389              :                                 shard.tenant_shard_id
    7390              :                             )
    7391              :                         }
    7392              : 
    7393            0 :                         self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    7394            0 :                     }
    7395              : 
    7396              :                     // Here we remove an existing observed location for the node we're removing, and it will
    7397              :                     // not be re-added by a reconciler's completion because we filter out removed nodes in
    7398              :                     // process_result.
    7399              :                     //
    7400              :                     // Note that we update the shard's observed state _after_ calling maybe_reconcile_shard: that
    7401              :                     // means any reconciles we spawned will know about the node we're deleting, enabling them
    7402              :                     // to do live migrations if it's still online.
    7403            0 :                     shard.observed.locations.remove(&node_id);
    7404              :                 }
    7405              :             }
    7406              : 
    7407            0 :             scheduler.node_remove(node_id);
    7408              : 
    7409              :             {
    7410            0 :                 let mut nodes_mut = (**nodes).clone();
    7411            0 :                 if let Some(mut removed_node) = nodes_mut.remove(&node_id) {
    7412            0 :                     // Ensure that any reconciler holding an Arc<> to this node will
    7413            0 :                     // drop out when trying to RPC to it (setting Offline state sets the
    7414            0 :                     // cancellation token on the Node object).
    7415            0 :                     removed_node.set_availability(NodeAvailability::Offline);
    7416            0 :                 }
    7417            0 :                 *nodes = Arc::new(nodes_mut);
    7418            0 :                 metrics::METRICS_REGISTRY
    7419            0 :                     .metrics_group
    7420            0 :                     .storage_controller_pageserver_nodes
    7421            0 :                     .set(nodes.len() as i64);
    7422            0 :                 metrics::METRICS_REGISTRY
    7423            0 :                     .metrics_group
    7424            0 :                     .storage_controller_https_pageserver_nodes
    7425            0 :                     .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7426              :             }
    7427              :         }
    7428              : 
    7429              :         // Note: some `generation_pageserver` columns on tenant shards in the database may still refer to
    7430              :         // the removed node, as this column means "The pageserver to which this generation was issued", and
    7431              :         // their generations won't get updated until the reconcilers moving them away from this node complete.
    7432              :         // That is safe because in Service::spawn we only use generation_pageserver if it refers to a node
    7433              :         // that exists.
    7434              : 
    7435              :         // 2. Actually delete the node from in-memory state and set tombstone to the database
    7436              :         // for preventing the node to register again.
    7437            0 :         tracing::info!("Deleting node from database");
    7438            0 :         self.persistence.set_tombstone(node_id).await?;
    7439              : 
    7440            0 :         Ok(())
    7441            0 :     }
    7442              : 
    7443            0 :     pub(crate) async fn delete_node(
    7444            0 :         self: &Arc<Self>,
    7445            0 :         node_id: NodeId,
    7446            0 :         policy_on_start: NodeSchedulingPolicy,
    7447            0 :         force: bool,
    7448            0 :         cancel: CancellationToken,
    7449            0 :     ) -> Result<(), OperationError> {
    7450            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal).build();
    7451              : 
    7452            0 :         let mut waiters: Vec<ReconcilerWaiter> = Vec::new();
    7453            0 :         let mut tid_iter = create_shared_shard_iterator(self.clone());
    7454              : 
    7455            0 :         let reset_node_policy_on_cancel = || async {
    7456            0 :             match self
    7457            0 :                 .node_configure(node_id, None, Some(policy_on_start))
    7458            0 :                 .await
    7459              :             {
    7460            0 :                 Ok(()) => OperationError::Cancelled,
    7461            0 :                 Err(err) => {
    7462            0 :                     OperationError::FinalizeError(
    7463            0 :                         format!(
    7464            0 :                             "Failed to finalise delete cancel of {} by setting scheduling policy to {}: {}",
    7465            0 :                             node_id, String::from(policy_on_start), err
    7466            0 :                         )
    7467            0 :                         .into(),
    7468            0 :                     )
    7469              :                 }
    7470              :             }
    7471            0 :         };
    7472              : 
    7473            0 :         while !tid_iter.finished() {
    7474            0 :             if cancel.is_cancelled() {
    7475            0 :                 return Err(reset_node_policy_on_cancel().await);
    7476            0 :             }
    7477              : 
    7478            0 :             operation_utils::validate_node_state(
    7479            0 :                 &node_id,
    7480            0 :                 self.inner.read().unwrap().nodes.clone(),
    7481            0 :                 NodeSchedulingPolicy::Deleting,
    7482            0 :             )?;
    7483              : 
    7484            0 :             while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    7485            0 :                 let tid = match tid_iter.next() {
    7486            0 :                     Some(tid) => tid,
    7487              :                     None => {
    7488            0 :                         break;
    7489              :                     }
    7490              :                 };
    7491              : 
    7492            0 :                 let mut locked = self.inner.write().unwrap();
    7493            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    7494              : 
    7495              :                 // Calculate a schedule context here to avoid borrow checker issues.
    7496            0 :                 let mut schedule_context = ScheduleContext::default();
    7497            0 :                 for (_, shard) in tenants.range(TenantShardId::tenant_range(tid.tenant_id)) {
    7498            0 :                     schedule_context.avoid(&shard.intent.all_pageservers());
    7499            0 :                 }
    7500              : 
    7501            0 :                 let tenant_shard = match tenants.get_mut(&tid) {
    7502            0 :                     Some(tenant_shard) => tenant_shard,
    7503              :                     None => {
    7504              :                         // Tenant shard was deleted by another operation. Skip it.
    7505            0 :                         continue;
    7506              :                     }
    7507              :                 };
    7508              : 
    7509            0 :                 match tenant_shard.get_scheduling_policy() {
    7510            0 :                     ShardSchedulingPolicy::Active | ShardSchedulingPolicy::Essential => {
    7511            0 :                         // A migration during delete is classed as 'essential' because it is required to
    7512            0 :                         // uphold our availability goals for the tenant: this shard is elegible for migration.
    7513            0 :                     }
    7514              :                     ShardSchedulingPolicy::Pause | ShardSchedulingPolicy::Stop => {
    7515              :                         // If we have been asked to avoid rescheduling this shard, then do not migrate it during a deletion
    7516            0 :                         tracing::warn!(
    7517            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    7518            0 :                             "Skip migration during deletion because shard scheduling policy {:?} disallows it",
    7519            0 :                             tenant_shard.get_scheduling_policy(),
    7520              :                         );
    7521            0 :                         continue;
    7522              :                     }
    7523              :                 }
    7524              : 
    7525            0 :                 if tenant_shard.deref_node(node_id) {
    7526            0 :                     if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
    7527            0 :                         tracing::error!(
    7528            0 :                             "Refusing to delete node, shard {} can't be rescheduled: {e}",
    7529              :                             tenant_shard.tenant_shard_id
    7530              :                         );
    7531            0 :                         return Err(OperationError::ImpossibleConstraint(e.to_string().into()));
    7532              :                     } else {
    7533            0 :                         tracing::info!(
    7534            0 :                             "Rescheduled shard {} away from node during deletion",
    7535              :                             tenant_shard.tenant_shard_id
    7536              :                         )
    7537              :                     }
    7538              : 
    7539            0 :                     let waiter = self.maybe_configured_reconcile_shard(
    7540            0 :                         tenant_shard,
    7541            0 :                         nodes,
    7542            0 :                         reconciler_config,
    7543            0 :                     );
    7544              : 
    7545            0 :                     if force {
    7546            0 :                         // Here we remove an existing observed location for the node we're removing, and it will
    7547            0 :                         // not be re-added by a reconciler's completion because we filter out removed nodes in
    7548            0 :                         // process_result.
    7549            0 :                         //
    7550            0 :                         // Note that we update the shard's observed state _after_ calling maybe_configured_reconcile_shard:
    7551            0 :                         // that means any reconciles we spawned will know about the node we're deleting,
    7552            0 :                         // enabling them to do live migrations if it's still online.
    7553            0 :                         tenant_shard.observed.locations.remove(&node_id);
    7554            0 :                     } else if let Some(waiter) = waiter {
    7555            0 :                         waiters.push(waiter);
    7556            0 :                     }
    7557            0 :                 }
    7558              :             }
    7559              : 
    7560            0 :             waiters = self
    7561            0 :                 .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT)
    7562            0 :                 .await;
    7563              : 
    7564            0 :             failpoint_support::sleep_millis_async!("sleepy-delete-loop", &cancel);
    7565              :         }
    7566              : 
    7567            0 :         while !waiters.is_empty() {
    7568            0 :             if cancel.is_cancelled() {
    7569            0 :                 return Err(reset_node_policy_on_cancel().await);
    7570            0 :             }
    7571              : 
    7572            0 :             tracing::info!("Awaiting {} pending delete reconciliations", waiters.len());
    7573              : 
    7574            0 :             waiters = self
    7575            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    7576            0 :                 .await;
    7577              :         }
    7578              : 
    7579            0 :         let pf = pausable_failpoint!("delete-node-after-reconciles-spawned", &cancel);
    7580            0 :         if pf.is_err() {
    7581              :             // An error from pausable_failpoint indicates the cancel token was triggered.
    7582            0 :             return Err(reset_node_policy_on_cancel().await);
    7583            0 :         }
    7584              : 
    7585            0 :         self.persistence
    7586            0 :             .set_tombstone(node_id)
    7587            0 :             .await
    7588            0 :             .map_err(|e| OperationError::FinalizeError(e.to_string().into()))?;
    7589              : 
    7590              :         {
    7591            0 :             let mut locked = self.inner.write().unwrap();
    7592            0 :             let (nodes, _, scheduler) = locked.parts_mut();
    7593              : 
    7594            0 :             scheduler.node_remove(node_id);
    7595              : 
    7596            0 :             let mut nodes_mut = (**nodes).clone();
    7597            0 :             if let Some(mut removed_node) = nodes_mut.remove(&node_id) {
    7598            0 :                 // Ensure that any reconciler holding an Arc<> to this node will
    7599            0 :                 // drop out when trying to RPC to it (setting Offline state sets the
    7600            0 :                 // cancellation token on the Node object).
    7601            0 :                 removed_node.set_availability(NodeAvailability::Offline);
    7602            0 :             }
    7603            0 :             *nodes = Arc::new(nodes_mut);
    7604              : 
    7605            0 :             metrics::METRICS_REGISTRY
    7606            0 :                 .metrics_group
    7607            0 :                 .storage_controller_pageserver_nodes
    7608            0 :                 .set(nodes.len() as i64);
    7609            0 :             metrics::METRICS_REGISTRY
    7610            0 :                 .metrics_group
    7611            0 :                 .storage_controller_https_pageserver_nodes
    7612            0 :                 .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7613              :         }
    7614              : 
    7615            0 :         Ok(())
    7616            0 :     }
    7617              : 
    7618            0 :     pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
    7619            0 :         let nodes = {
    7620            0 :             self.inner
    7621            0 :                 .read()
    7622            0 :                 .unwrap()
    7623            0 :                 .nodes
    7624            0 :                 .values()
    7625            0 :                 .cloned()
    7626            0 :                 .collect::<Vec<_>>()
    7627              :         };
    7628              : 
    7629            0 :         Ok(nodes)
    7630            0 :     }
    7631              : 
    7632            0 :     pub(crate) async fn tombstone_list(&self) -> Result<Vec<Node>, ApiError> {
    7633            0 :         self.persistence
    7634            0 :             .list_tombstones()
    7635            0 :             .await?
    7636            0 :             .into_iter()
    7637            0 :             .map(|np| Node::from_persistent(np, false))
    7638            0 :             .collect::<Result<Vec<_>, _>>()
    7639            0 :             .map_err(ApiError::InternalServerError)
    7640            0 :     }
    7641              : 
    7642            0 :     pub(crate) async fn tombstone_delete(&self, node_id: NodeId) -> Result<(), ApiError> {
    7643            0 :         let _node_lock = trace_exclusive_lock(
    7644            0 :             &self.node_op_locks,
    7645            0 :             node_id,
    7646            0 :             NodeOperations::DeleteTombstone,
    7647            0 :         )
    7648            0 :         .await;
    7649              : 
    7650            0 :         if matches!(self.get_node(node_id).await, Err(ApiError::NotFound(_))) {
    7651            0 :             self.persistence.delete_node(node_id).await?;
    7652            0 :             Ok(())
    7653              :         } else {
    7654            0 :             Err(ApiError::Conflict(format!(
    7655            0 :                 "Node {node_id} is in use, consider using tombstone API first"
    7656            0 :             )))
    7657              :         }
    7658            0 :     }
    7659              : 
    7660            0 :     pub(crate) async fn get_node(&self, node_id: NodeId) -> Result<Node, ApiError> {
    7661            0 :         self.inner
    7662            0 :             .read()
    7663            0 :             .unwrap()
    7664            0 :             .nodes
    7665            0 :             .get(&node_id)
    7666            0 :             .cloned()
    7667            0 :             .ok_or(ApiError::NotFound(
    7668            0 :                 format!("Node {node_id} not registered").into(),
    7669            0 :             ))
    7670            0 :     }
    7671              : 
    7672            0 :     pub(crate) async fn get_node_shards(
    7673            0 :         &self,
    7674            0 :         node_id: NodeId,
    7675            0 :     ) -> Result<NodeShardResponse, ApiError> {
    7676            0 :         let locked = self.inner.read().unwrap();
    7677            0 :         let mut shards = Vec::new();
    7678            0 :         for (tid, tenant) in locked.tenants.iter() {
    7679            0 :             let is_intended_secondary = match (
    7680            0 :                 tenant.intent.get_attached() == &Some(node_id),
    7681            0 :                 tenant.intent.get_secondary().contains(&node_id),
    7682            0 :             ) {
    7683              :                 (true, true) => {
    7684            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    7685            0 :                         "{} attached as primary+secondary on the same node",
    7686            0 :                         tid
    7687            0 :                     )));
    7688              :                 }
    7689            0 :                 (true, false) => Some(false),
    7690            0 :                 (false, true) => Some(true),
    7691            0 :                 (false, false) => None,
    7692              :             };
    7693            0 :             let is_observed_secondary = if let Some(ObservedStateLocation { conf: Some(conf) }) =
    7694            0 :                 tenant.observed.locations.get(&node_id)
    7695              :             {
    7696            0 :                 Some(conf.secondary_conf.is_some())
    7697              :             } else {
    7698            0 :                 None
    7699              :             };
    7700            0 :             if is_intended_secondary.is_some() || is_observed_secondary.is_some() {
    7701            0 :                 shards.push(NodeShard {
    7702            0 :                     tenant_shard_id: *tid,
    7703            0 :                     is_intended_secondary,
    7704            0 :                     is_observed_secondary,
    7705            0 :                 });
    7706            0 :             }
    7707              :         }
    7708            0 :         Ok(NodeShardResponse { node_id, shards })
    7709            0 :     }
    7710              : 
    7711            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
    7712            0 :         self.persistence.get_leader().await
    7713            0 :     }
    7714              : 
    7715            0 :     pub(crate) async fn node_register(
    7716            0 :         &self,
    7717            0 :         register_req: NodeRegisterRequest,
    7718            0 :     ) -> Result<(), ApiError> {
    7719            0 :         let _node_lock = trace_exclusive_lock(
    7720            0 :             &self.node_op_locks,
    7721            0 :             register_req.node_id,
    7722            0 :             NodeOperations::Register,
    7723            0 :         )
    7724            0 :         .await;
    7725              : 
    7726              :         #[derive(PartialEq)]
    7727              :         enum RegistrationStatus {
    7728              :             UpToDate,
    7729              :             NeedUpdate,
    7730              :             Mismatched,
    7731              :             New,
    7732              :         }
    7733              : 
    7734            0 :         let registration_status = {
    7735            0 :             let locked = self.inner.read().unwrap();
    7736            0 :             if let Some(node) = locked.nodes.get(&register_req.node_id) {
    7737            0 :                 if node.registration_match(&register_req) {
    7738            0 :                     if node.need_update(&register_req) {
    7739            0 :                         RegistrationStatus::NeedUpdate
    7740              :                     } else {
    7741            0 :                         RegistrationStatus::UpToDate
    7742              :                     }
    7743              :                 } else {
    7744            0 :                     RegistrationStatus::Mismatched
    7745              :                 }
    7746              :             } else {
    7747            0 :                 RegistrationStatus::New
    7748              :             }
    7749              :         };
    7750              : 
    7751            0 :         match registration_status {
    7752              :             RegistrationStatus::UpToDate => {
    7753            0 :                 tracing::info!(
    7754            0 :                     "Node {} re-registered with matching address and is up to date",
    7755              :                     register_req.node_id
    7756              :                 );
    7757              : 
    7758            0 :                 return Ok(());
    7759              :             }
    7760              :             RegistrationStatus::Mismatched => {
    7761              :                 // TODO: decide if we want to allow modifying node addresses without removing and re-adding
    7762              :                 // the node.  Safest/simplest thing is to refuse it, and usually we deploy with
    7763              :                 // a fixed address through the lifetime of a node.
    7764            0 :                 tracing::warn!(
    7765            0 :                     "Node {} tried to register with different address",
    7766              :                     register_req.node_id
    7767              :                 );
    7768            0 :                 return Err(ApiError::Conflict(
    7769            0 :                     "Node is already registered with different address".to_string(),
    7770            0 :                 ));
    7771              :             }
    7772            0 :             RegistrationStatus::New | RegistrationStatus::NeedUpdate => {
    7773            0 :                 // fallthrough
    7774            0 :             }
    7775              :         }
    7776              : 
    7777              :         // We do not require that a node is actually online when registered (it will start life
    7778              :         // with it's  availability set to Offline), but we _do_ require that its DNS record exists. We're
    7779              :         // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
    7780              :         // that register themselves with a broken DNS config.  We check only the HTTP hostname, because
    7781              :         // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
    7782            0 :         if tokio::net::lookup_host(format!(
    7783            0 :             "{}:{}",
    7784              :             register_req.listen_http_addr, register_req.listen_http_port
    7785              :         ))
    7786            0 :         .await
    7787            0 :         .is_err()
    7788              :         {
    7789              :             // If we have a transient DNS issue, it's up to the caller to retry their registration.  Because
    7790              :             // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
    7791              :             // we return a soft 503 error, to encourage callers to retry past transient issues.
    7792            0 :             return Err(ApiError::ResourceUnavailable(
    7793            0 :                 format!(
    7794            0 :                     "Node {} tried to register with unknown DNS name '{}'",
    7795            0 :                     register_req.node_id, register_req.listen_http_addr
    7796            0 :                 )
    7797            0 :                 .into(),
    7798            0 :             ));
    7799            0 :         }
    7800              : 
    7801            0 :         if self.config.use_https_pageserver_api && register_req.listen_https_port.is_none() {
    7802            0 :             return Err(ApiError::PreconditionFailed(
    7803            0 :                 format!(
    7804            0 :                     "Node {} has no https port, but use_https is enabled",
    7805            0 :                     register_req.node_id
    7806            0 :                 )
    7807            0 :                 .into(),
    7808            0 :             ));
    7809            0 :         }
    7810              : 
    7811            0 :         if register_req.listen_grpc_addr.is_some() != register_req.listen_grpc_port.is_some() {
    7812            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    7813            0 :                 "must specify both gRPC address and port"
    7814            0 :             )));
    7815            0 :         }
    7816              : 
    7817              :         // Ordering: we must persist the new node _before_ adding it to in-memory state.
    7818              :         // This ensures that before we use it for anything or expose it via any external
    7819              :         // API, it is guaranteed to be available after a restart.
    7820            0 :         let new_node = Node::new(
    7821            0 :             register_req.node_id,
    7822            0 :             register_req.listen_http_addr,
    7823            0 :             register_req.listen_http_port,
    7824            0 :             register_req.listen_https_port,
    7825            0 :             register_req.listen_pg_addr,
    7826            0 :             register_req.listen_pg_port,
    7827            0 :             register_req.listen_grpc_addr.clone(),
    7828            0 :             register_req.listen_grpc_port,
    7829            0 :             register_req.availability_zone_id.clone(),
    7830            0 :             self.config.use_https_pageserver_api,
    7831              :         );
    7832            0 :         let new_node = match new_node {
    7833            0 :             Ok(new_node) => new_node,
    7834            0 :             Err(error) => return Err(ApiError::InternalServerError(error)),
    7835              :         };
    7836              : 
    7837            0 :         match registration_status {
    7838              :             RegistrationStatus::New => {
    7839            0 :                 self.persistence.insert_node(&new_node).await.map_err(|e| {
    7840            0 :                     if matches!(
    7841            0 :                         e,
    7842              :                         crate::persistence::DatabaseError::Query(
    7843              :                             diesel::result::Error::DatabaseError(
    7844              :                                 diesel::result::DatabaseErrorKind::UniqueViolation,
    7845              :                                 _,
    7846              :                             )
    7847              :                         )
    7848              :                     ) {
    7849              :                         // The node can be deleted by tombstone API, and not show up in the list of nodes.
    7850              :                         // If you see this error, check tombstones first.
    7851            0 :                         ApiError::Conflict(format!("Node {} is already exists", new_node.get_id()))
    7852              :                     } else {
    7853            0 :                         ApiError::from(e)
    7854              :                     }
    7855            0 :                 })?;
    7856              :             }
    7857              :             RegistrationStatus::NeedUpdate => {
    7858            0 :                 self.persistence
    7859            0 :                     .update_node_on_registration(
    7860            0 :                         register_req.node_id,
    7861            0 :                         register_req.listen_https_port,
    7862            0 :                         register_req.listen_grpc_addr,
    7863            0 :                         register_req.listen_grpc_port,
    7864            0 :                     )
    7865            0 :                     .await?
    7866              :             }
    7867            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    7868              :         }
    7869              : 
    7870            0 :         let mut locked = self.inner.write().unwrap();
    7871            0 :         let mut new_nodes = (*locked.nodes).clone();
    7872              : 
    7873            0 :         locked.scheduler.node_upsert(&new_node);
    7874            0 :         new_nodes.insert(register_req.node_id, new_node);
    7875              : 
    7876            0 :         locked.nodes = Arc::new(new_nodes);
    7877              : 
    7878            0 :         metrics::METRICS_REGISTRY
    7879            0 :             .metrics_group
    7880            0 :             .storage_controller_pageserver_nodes
    7881            0 :             .set(locked.nodes.len() as i64);
    7882            0 :         metrics::METRICS_REGISTRY
    7883            0 :             .metrics_group
    7884            0 :             .storage_controller_https_pageserver_nodes
    7885            0 :             .set(locked.nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7886              : 
    7887            0 :         match registration_status {
    7888              :             RegistrationStatus::New => {
    7889            0 :                 tracing::info!(
    7890            0 :                     "Registered pageserver {} ({}), now have {} pageservers",
    7891              :                     register_req.node_id,
    7892              :                     register_req.availability_zone_id,
    7893            0 :                     locked.nodes.len()
    7894              :                 );
    7895              :             }
    7896              :             RegistrationStatus::NeedUpdate => {
    7897            0 :                 tracing::info!(
    7898            0 :                     "Re-registered and updated node {} ({})",
    7899              :                     register_req.node_id,
    7900              :                     register_req.availability_zone_id,
    7901              :                 );
    7902              :             }
    7903            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    7904              :         }
    7905            0 :         Ok(())
    7906            0 :     }
    7907              : 
    7908              :     /// Configure in-memory and persistent state of a node as requested
    7909              :     ///
    7910              :     /// Note that this function does not trigger any immediate side effects in response
    7911              :     /// to the changes. That part is handled by [`Self::handle_node_availability_transition`].
    7912            0 :     async fn node_state_configure(
    7913            0 :         &self,
    7914            0 :         node_id: NodeId,
    7915            0 :         availability: Option<NodeAvailability>,
    7916            0 :         scheduling: Option<NodeSchedulingPolicy>,
    7917            0 :         node_lock: &TracingExclusiveGuard<NodeOperations>,
    7918            0 :     ) -> Result<AvailabilityTransition, ApiError> {
    7919            0 :         if let Some(scheduling) = scheduling {
    7920              :             // Scheduling is a persistent part of Node: we must write updates to the database before
    7921              :             // applying them in memory
    7922            0 :             self.persistence
    7923            0 :                 .update_node_scheduling_policy(node_id, scheduling)
    7924            0 :                 .await?;
    7925            0 :         }
    7926              : 
    7927              :         // If we're activating a node, then before setting it active we must reconcile any shard locations
    7928              :         // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
    7929              :         // by calling [`Self::node_activate_reconcile`]
    7930              :         //
    7931              :         // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
    7932              :         // nothing else can mutate its availability while we run.
    7933            0 :         let availability_transition = if let Some(input_availability) = availability.as_ref() {
    7934            0 :             let (activate_node, availability_transition) = {
    7935            0 :                 let locked = self.inner.read().unwrap();
    7936            0 :                 let Some(node) = locked.nodes.get(&node_id) else {
    7937            0 :                     return Err(ApiError::NotFound(
    7938            0 :                         anyhow::anyhow!("Node {} not registered", node_id).into(),
    7939            0 :                     ));
    7940              :                 };
    7941              : 
    7942            0 :                 (
    7943            0 :                     node.clone(),
    7944            0 :                     node.get_availability_transition(input_availability),
    7945            0 :                 )
    7946              :             };
    7947              : 
    7948            0 :             if matches!(availability_transition, AvailabilityTransition::ToActive) {
    7949            0 :                 self.node_activate_reconcile(activate_node, node_lock)
    7950            0 :                     .await?;
    7951            0 :             }
    7952            0 :             availability_transition
    7953              :         } else {
    7954            0 :             AvailabilityTransition::Unchanged
    7955              :         };
    7956              : 
    7957              :         // Apply changes from the request to our in-memory state for the Node
    7958            0 :         let mut locked = self.inner.write().unwrap();
    7959            0 :         let (nodes, _tenants, scheduler) = locked.parts_mut();
    7960              : 
    7961            0 :         let mut new_nodes = (**nodes).clone();
    7962              : 
    7963            0 :         let Some(node) = new_nodes.get_mut(&node_id) else {
    7964            0 :             return Err(ApiError::NotFound(
    7965            0 :                 anyhow::anyhow!("Node not registered").into(),
    7966            0 :             ));
    7967              :         };
    7968              : 
    7969            0 :         if let Some(availability) = availability {
    7970            0 :             node.set_availability(availability);
    7971            0 :         }
    7972              : 
    7973            0 :         if let Some(scheduling) = scheduling {
    7974            0 :             node.set_scheduling(scheduling);
    7975            0 :         }
    7976              : 
    7977              :         // Update the scheduler, in case the elegibility of the node for new shards has changed
    7978            0 :         scheduler.node_upsert(node);
    7979              : 
    7980            0 :         let new_nodes = Arc::new(new_nodes);
    7981            0 :         locked.nodes = new_nodes;
    7982              : 
    7983            0 :         Ok(availability_transition)
    7984            0 :     }
    7985              : 
    7986              :     /// Handle availability transition of one node
    7987              :     ///
    7988              :     /// Note that you should first call [`Self::node_state_configure`] to update
    7989              :     /// the in-memory state referencing that node. If you need to handle more than one transition
    7990              :     /// consider using [`Self::handle_node_availability_transitions`].
    7991            0 :     async fn handle_node_availability_transition(
    7992            0 :         &self,
    7993            0 :         node_id: NodeId,
    7994            0 :         transition: AvailabilityTransition,
    7995            0 :         _node_lock: &TracingExclusiveGuard<NodeOperations>,
    7996            0 :     ) -> Result<(), ApiError> {
    7997              :         // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
    7998            0 :         match transition {
    7999              :             AvailabilityTransition::ToOffline => {
    8000            0 :                 tracing::info!("Node {} transition to offline", node_id);
    8001              : 
    8002            0 :                 let mut locked = self.inner.write().unwrap();
    8003            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    8004              : 
    8005            0 :                 let mut tenants_affected: usize = 0;
    8006              : 
    8007            0 :                 for (_tenant_id, mut schedule_context, shards) in
    8008            0 :                     TenantShardExclusiveIterator::new(tenants, ScheduleMode::Normal)
    8009              :                 {
    8010            0 :                     for tenant_shard in shards {
    8011            0 :                         let tenant_shard_id = tenant_shard.tenant_shard_id;
    8012            0 :                         if let Some(observed_loc) =
    8013            0 :                             tenant_shard.observed.locations.get_mut(&node_id)
    8014            0 :                         {
    8015            0 :                             // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
    8016            0 :                             // not assume our knowledge of the node's configuration is accurate until it comes back online
    8017            0 :                             observed_loc.conf = None;
    8018            0 :                         }
    8019              : 
    8020            0 :                         if nodes.len() == 1 {
    8021              :                             // Special case for single-node cluster: there is no point trying to reschedule
    8022              :                             // any tenant shards: avoid doing so, in order to avoid spewing warnings about
    8023              :                             // failures to schedule them.
    8024            0 :                             continue;
    8025            0 :                         }
    8026              : 
    8027            0 :                         if !nodes
    8028            0 :                             .values()
    8029            0 :                             .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    8030              :                         {
    8031              :                             // Special case for when all nodes are unavailable and/or unschedulable: there is no point
    8032              :                             // trying to reschedule since there's nowhere else to go. Without this
    8033              :                             // branch we incorrectly detach tenants in response to node unavailability.
    8034            0 :                             continue;
    8035            0 :                         }
    8036              : 
    8037            0 :                         if tenant_shard.intent.demote_attached(scheduler, node_id) {
    8038            0 :                             tenant_shard.sequence = tenant_shard.sequence.next();
    8039              : 
    8040            0 :                             match tenant_shard.schedule(scheduler, &mut schedule_context) {
    8041            0 :                                 Err(e) => {
    8042              :                                     // It is possible that some tenants will become unschedulable when too many pageservers
    8043              :                                     // go offline: in this case there isn't much we can do other than make the issue observable.
    8044              :                                     // TODO: give TenantShard a scheduling error attribute to be queried later.
    8045            0 :                                     tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
    8046              :                                 }
    8047              :                                 Ok(()) => {
    8048            0 :                                     if self
    8049            0 :                                         .maybe_reconcile_shard(
    8050            0 :                                             tenant_shard,
    8051            0 :                                             nodes,
    8052            0 :                                             ReconcilerPriority::Normal,
    8053            0 :                                         )
    8054            0 :                                         .is_some()
    8055            0 :                                     {
    8056            0 :                                         tenants_affected += 1;
    8057            0 :                                     };
    8058              :                                 }
    8059              :                             }
    8060            0 :                         }
    8061              :                     }
    8062              :                 }
    8063            0 :                 tracing::info!(
    8064            0 :                     "Launched {} reconciler tasks for tenants affected by node {} going offline",
    8065              :                     tenants_affected,
    8066              :                     node_id
    8067              :                 )
    8068              :             }
    8069              :             AvailabilityTransition::ToActive => {
    8070            0 :                 tracing::info!("Node {} transition to active", node_id);
    8071              : 
    8072            0 :                 let mut locked = self.inner.write().unwrap();
    8073            0 :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    8074              : 
    8075              :                 // When a node comes back online, we must reconcile any tenant that has a None observed
    8076              :                 // location on the node.
    8077            0 :                 for tenant_shard in tenants.values_mut() {
    8078              :                     // If a reconciliation is already in progress, rely on the previous scheduling
    8079              :                     // decision and skip triggering a new reconciliation.
    8080            0 :                     if tenant_shard.reconciler.is_some() {
    8081            0 :                         continue;
    8082            0 :                     }
    8083              : 
    8084            0 :                     if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
    8085            0 :                         if observed_loc.conf.is_none() {
    8086            0 :                             self.maybe_reconcile_shard(
    8087            0 :                                 tenant_shard,
    8088            0 :                                 nodes,
    8089            0 :                                 ReconcilerPriority::Normal,
    8090            0 :                             );
    8091            0 :                         }
    8092            0 :                     }
    8093              :                 }
    8094              : 
    8095              :                 // TODO: in the background, we should balance work back onto this pageserver
    8096              :             }
    8097              :             // No action required for the intermediate unavailable state.
    8098              :             // When we transition into active or offline from the unavailable state,
    8099              :             // the correct handling above will kick in.
    8100              :             AvailabilityTransition::ToWarmingUpFromActive => {
    8101            0 :                 tracing::info!("Node {} transition to unavailable from active", node_id);
    8102              :             }
    8103              :             AvailabilityTransition::ToWarmingUpFromOffline => {
    8104            0 :                 tracing::info!("Node {} transition to unavailable from offline", node_id);
    8105              :             }
    8106              :             AvailabilityTransition::Unchanged => {
    8107            0 :                 tracing::debug!("Node {} no availability change during config", node_id);
    8108              :             }
    8109              :         }
    8110              : 
    8111            0 :         Ok(())
    8112            0 :     }
    8113              : 
    8114              :     /// Handle availability transition for multiple nodes
    8115              :     ///
    8116              :     /// Note that you should first call [`Self::node_state_configure`] for
    8117              :     /// all nodes being handled here for the handling to use fresh in-memory state.
    8118            0 :     async fn handle_node_availability_transitions(
    8119            0 :         &self,
    8120            0 :         transitions: Vec<(
    8121            0 :             NodeId,
    8122            0 :             TracingExclusiveGuard<NodeOperations>,
    8123            0 :             AvailabilityTransition,
    8124            0 :         )>,
    8125            0 :     ) -> Result<(), Vec<(NodeId, ApiError)>> {
    8126            0 :         let mut errors = Vec::default();
    8127            0 :         for (node_id, node_lock, transition) in transitions {
    8128            0 :             let res = self
    8129            0 :                 .handle_node_availability_transition(node_id, transition, &node_lock)
    8130            0 :                 .await;
    8131            0 :             if let Err(err) = res {
    8132            0 :                 errors.push((node_id, err));
    8133            0 :             }
    8134              :         }
    8135              : 
    8136            0 :         if errors.is_empty() {
    8137            0 :             Ok(())
    8138              :         } else {
    8139            0 :             Err(errors)
    8140              :         }
    8141            0 :     }
    8142              : 
    8143            0 :     pub(crate) async fn node_configure(
    8144            0 :         &self,
    8145            0 :         node_id: NodeId,
    8146            0 :         availability: Option<NodeAvailability>,
    8147            0 :         scheduling: Option<NodeSchedulingPolicy>,
    8148            0 :     ) -> Result<(), ApiError> {
    8149            0 :         let node_lock =
    8150            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
    8151              : 
    8152            0 :         let transition = self
    8153            0 :             .node_state_configure(node_id, availability, scheduling, &node_lock)
    8154            0 :             .await?;
    8155            0 :         self.handle_node_availability_transition(node_id, transition, &node_lock)
    8156            0 :             .await
    8157            0 :     }
    8158              : 
    8159              :     /// Wrapper around [`Self::node_configure`] which only allows changes while there is no ongoing
    8160              :     /// operation for HTTP api.
    8161            0 :     pub(crate) async fn external_node_configure(
    8162            0 :         &self,
    8163            0 :         node_id: NodeId,
    8164            0 :         availability: Option<NodeAvailability>,
    8165            0 :         scheduling: Option<NodeSchedulingPolicy>,
    8166            0 :     ) -> Result<(), ApiError> {
    8167              :         {
    8168            0 :             let locked = self.inner.read().unwrap();
    8169            0 :             if let Some(op) = locked.ongoing_operation.as_ref().map(|op| op.operation) {
    8170            0 :                 return Err(ApiError::PreconditionFailed(
    8171            0 :                     format!("Ongoing background operation forbids configuring: {op}").into(),
    8172            0 :                 ));
    8173            0 :             }
    8174              :         }
    8175              : 
    8176            0 :         self.node_configure(node_id, availability, scheduling).await
    8177            0 :     }
    8178              : 
    8179            0 :     pub(crate) async fn start_node_delete(
    8180            0 :         self: &Arc<Self>,
    8181            0 :         node_id: NodeId,
    8182            0 :         force: bool,
    8183            0 :     ) -> Result<(), ApiError> {
    8184            0 :         let (ongoing_op, node_policy, schedulable_nodes_count) = {
    8185            0 :             let locked = self.inner.read().unwrap();
    8186            0 :             let nodes = &locked.nodes;
    8187            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8188            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8189            0 :             ))?;
    8190            0 :             let schedulable_nodes_count = nodes
    8191            0 :                 .iter()
    8192            0 :                 .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    8193            0 :                 .count();
    8194              : 
    8195              :             (
    8196            0 :                 locked
    8197            0 :                     .ongoing_operation
    8198            0 :                     .as_ref()
    8199            0 :                     .map(|ongoing| ongoing.operation),
    8200            0 :                 node.get_scheduling(),
    8201            0 :                 schedulable_nodes_count,
    8202              :             )
    8203              :         };
    8204              : 
    8205            0 :         if let Some(ongoing) = ongoing_op {
    8206            0 :             return Err(ApiError::PreconditionFailed(
    8207            0 :                 format!("Background operation already ongoing for node: {ongoing}").into(),
    8208            0 :             ));
    8209            0 :         }
    8210              : 
    8211            0 :         if schedulable_nodes_count == 0 {
    8212            0 :             return Err(ApiError::PreconditionFailed(
    8213            0 :                 "No other schedulable nodes to move shards".into(),
    8214            0 :             ));
    8215            0 :         }
    8216              : 
    8217            0 :         match node_policy {
    8218              :             NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => {
    8219            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Deleting))
    8220            0 :                     .await?;
    8221              : 
    8222            0 :                 let cancel = self.cancel.child_token();
    8223            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    8224            0 :                 let policy_on_start = node_policy;
    8225              : 
    8226            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    8227            0 :                     operation: Operation::Delete(Delete { node_id }),
    8228            0 :                     cancel: cancel.clone(),
    8229            0 :                 });
    8230              : 
    8231            0 :                 let span = tracing::info_span!(parent: None, "delete_node", %node_id);
    8232              : 
    8233            0 :                 tokio::task::spawn(
    8234              :                     {
    8235            0 :                         let service = self.clone();
    8236            0 :                         let cancel = cancel.clone();
    8237            0 :                         async move {
    8238            0 :                             let _gate_guard = gate_guard;
    8239              : 
    8240            0 :                             scopeguard::defer! {
    8241              :                                 let prev = service.inner.write().unwrap().ongoing_operation.take();
    8242              : 
    8243              :                                 if let Some(Operation::Delete(removed_delete)) = prev.map(|h| h.operation) {
    8244              :                                     assert_eq!(removed_delete.node_id, node_id, "We always take the same operation");
    8245              :                                 } else {
    8246              :                                     panic!("We always remove the same operation")
    8247              :                                 }
    8248              :                             }
    8249              : 
    8250            0 :                             tracing::info!("Delete background operation starting");
    8251            0 :                             let res = service
    8252            0 :                                 .delete_node(node_id, policy_on_start, force, cancel)
    8253            0 :                                 .await;
    8254            0 :                             match res {
    8255              :                                 Ok(()) => {
    8256            0 :                                     tracing::info!(
    8257            0 :                                         "Delete background operation completed successfully"
    8258              :                                     );
    8259              :                                 }
    8260              :                                 Err(OperationError::Cancelled) => {
    8261            0 :                                     tracing::info!("Delete background operation was cancelled");
    8262              :                                 }
    8263            0 :                                 Err(err) => {
    8264            0 :                                     tracing::error!(
    8265            0 :                                         "Delete background operation encountered: {err}"
    8266              :                                     )
    8267              :                                 }
    8268              :                             }
    8269            0 :                         }
    8270              :                     }
    8271            0 :                     .instrument(span),
    8272              :                 );
    8273              :             }
    8274              :             NodeSchedulingPolicy::Deleting => {
    8275            0 :                 return Err(ApiError::Conflict(format!(
    8276            0 :                     "Node {node_id} has delete in progress"
    8277            0 :                 )));
    8278              :             }
    8279            0 :             policy => {
    8280            0 :                 return Err(ApiError::PreconditionFailed(
    8281            0 :                     format!("Node {node_id} cannot be deleted due to {policy:?} policy").into(),
    8282            0 :                 ));
    8283              :             }
    8284              :         }
    8285              : 
    8286            0 :         Ok(())
    8287            0 :     }
    8288              : 
    8289            0 :     pub(crate) async fn cancel_node_delete(
    8290            0 :         self: &Arc<Self>,
    8291            0 :         node_id: NodeId,
    8292            0 :     ) -> Result<(), ApiError> {
    8293              :         {
    8294            0 :             let locked = self.inner.read().unwrap();
    8295            0 :             let nodes = &locked.nodes;
    8296            0 :             nodes.get(&node_id).ok_or(ApiError::NotFound(
    8297            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8298            0 :             ))?;
    8299              :         }
    8300              : 
    8301            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    8302            0 :             if let Operation::Delete(delete) = op_handler.operation {
    8303            0 :                 if delete.node_id == node_id {
    8304            0 :                     tracing::info!("Cancelling background delete operation for node {node_id}");
    8305            0 :                     op_handler.cancel.cancel();
    8306            0 :                     return Ok(());
    8307            0 :                 }
    8308            0 :             }
    8309            0 :         }
    8310              : 
    8311            0 :         Err(ApiError::PreconditionFailed(
    8312            0 :             format!("Node {node_id} has no delete in progress").into(),
    8313            0 :         ))
    8314            0 :     }
    8315              : 
    8316            0 :     pub(crate) async fn start_node_drain(
    8317            0 :         self: &Arc<Self>,
    8318            0 :         node_id: NodeId,
    8319            0 :     ) -> Result<(), ApiError> {
    8320            0 :         let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = {
    8321            0 :             let locked = self.inner.read().unwrap();
    8322            0 :             let nodes = &locked.nodes;
    8323            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8324            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8325            0 :             ))?;
    8326            0 :             let schedulable_nodes_count = nodes
    8327            0 :                 .iter()
    8328            0 :                 .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    8329            0 :                 .count();
    8330              : 
    8331              :             (
    8332            0 :                 locked
    8333            0 :                     .ongoing_operation
    8334            0 :                     .as_ref()
    8335            0 :                     .map(|ongoing| ongoing.operation),
    8336            0 :                 node.is_available(),
    8337            0 :                 node.get_scheduling(),
    8338            0 :                 schedulable_nodes_count,
    8339              :             )
    8340              :         };
    8341              : 
    8342            0 :         if let Some(ongoing) = ongoing_op {
    8343            0 :             return Err(ApiError::PreconditionFailed(
    8344            0 :                 format!("Background operation already ongoing for node: {ongoing}").into(),
    8345            0 :             ));
    8346            0 :         }
    8347              : 
    8348            0 :         if !node_available {
    8349            0 :             return Err(ApiError::ResourceUnavailable(
    8350            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8351            0 :             ));
    8352            0 :         }
    8353              : 
    8354            0 :         if schedulable_nodes_count == 0 {
    8355            0 :             return Err(ApiError::PreconditionFailed(
    8356            0 :                 "No other schedulable nodes to drain to".into(),
    8357            0 :             ));
    8358            0 :         }
    8359              : 
    8360            0 :         match node_policy {
    8361              :             NodeSchedulingPolicy::Active => {
    8362            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining))
    8363            0 :                     .await?;
    8364              : 
    8365            0 :                 let cancel = self.cancel.child_token();
    8366            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    8367              : 
    8368            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    8369            0 :                     operation: Operation::Drain(Drain { node_id }),
    8370            0 :                     cancel: cancel.clone(),
    8371            0 :                 });
    8372              : 
    8373            0 :                 let span = tracing::info_span!(parent: None, "drain_node", %node_id);
    8374              : 
    8375            0 :                 tokio::task::spawn({
    8376            0 :                     let service = self.clone();
    8377            0 :                     let cancel = cancel.clone();
    8378            0 :                     async move {
    8379            0 :                         let _gate_guard = gate_guard;
    8380              : 
    8381            0 :                         scopeguard::defer! {
    8382              :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    8383              : 
    8384              :                             if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) {
    8385              :                                 assert_eq!(removed_drain.node_id, node_id, "We always take the same operation");
    8386              :                             } else {
    8387              :                                 panic!("We always remove the same operation")
    8388              :                             }
    8389              :                         }
    8390              : 
    8391            0 :                         tracing::info!("Drain background operation starting");
    8392            0 :                         let res = service.drain_node(node_id, cancel).await;
    8393            0 :                         match res {
    8394              :                             Ok(()) => {
    8395            0 :                                 tracing::info!("Drain background operation completed successfully");
    8396              :                             }
    8397              :                             Err(OperationError::Cancelled) => {
    8398            0 :                                 tracing::info!("Drain background operation was cancelled");
    8399              :                             }
    8400            0 :                             Err(err) => {
    8401            0 :                                 tracing::error!("Drain background operation encountered: {err}")
    8402              :                             }
    8403              :                         }
    8404            0 :                     }
    8405            0 :                 }.instrument(span));
    8406              :             }
    8407              :             NodeSchedulingPolicy::Draining => {
    8408            0 :                 return Err(ApiError::Conflict(format!(
    8409            0 :                     "Node {node_id} has drain in progress"
    8410            0 :                 )));
    8411              :             }
    8412            0 :             policy => {
    8413            0 :                 return Err(ApiError::PreconditionFailed(
    8414            0 :                     format!("Node {node_id} cannot be drained due to {policy:?} policy").into(),
    8415            0 :                 ));
    8416              :             }
    8417              :         }
    8418              : 
    8419            0 :         Ok(())
    8420            0 :     }
    8421              : 
    8422            0 :     pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> {
    8423            0 :         let node_available = {
    8424            0 :             let locked = self.inner.read().unwrap();
    8425            0 :             let nodes = &locked.nodes;
    8426            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8427            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8428            0 :             ))?;
    8429              : 
    8430            0 :             node.is_available()
    8431              :         };
    8432              : 
    8433            0 :         if !node_available {
    8434            0 :             return Err(ApiError::ResourceUnavailable(
    8435            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8436            0 :             ));
    8437            0 :         }
    8438              : 
    8439            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    8440            0 :             if let Operation::Drain(drain) = op_handler.operation {
    8441            0 :                 if drain.node_id == node_id {
    8442            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    8443            0 :                     op_handler.cancel.cancel();
    8444            0 :                     return Ok(());
    8445            0 :                 }
    8446            0 :             }
    8447            0 :         }
    8448              : 
    8449            0 :         Err(ApiError::PreconditionFailed(
    8450            0 :             format!("Node {node_id} has no drain in progress").into(),
    8451            0 :         ))
    8452            0 :     }
    8453              : 
    8454            0 :     pub(crate) async fn start_node_fill(self: &Arc<Self>, node_id: NodeId) -> Result<(), ApiError> {
    8455            0 :         let (ongoing_op, node_available, node_policy, total_nodes_count) = {
    8456            0 :             let locked = self.inner.read().unwrap();
    8457            0 :             let nodes = &locked.nodes;
    8458            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8459            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8460            0 :             ))?;
    8461              : 
    8462              :             (
    8463            0 :                 locked
    8464            0 :                     .ongoing_operation
    8465            0 :                     .as_ref()
    8466            0 :                     .map(|ongoing| ongoing.operation),
    8467            0 :                 node.is_available(),
    8468            0 :                 node.get_scheduling(),
    8469            0 :                 nodes.len(),
    8470              :             )
    8471              :         };
    8472              : 
    8473            0 :         if let Some(ongoing) = ongoing_op {
    8474            0 :             return Err(ApiError::PreconditionFailed(
    8475            0 :                 format!("Background operation already ongoing for node: {ongoing}").into(),
    8476            0 :             ));
    8477            0 :         }
    8478              : 
    8479            0 :         if !node_available {
    8480            0 :             return Err(ApiError::ResourceUnavailable(
    8481            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8482            0 :             ));
    8483            0 :         }
    8484              : 
    8485            0 :         if total_nodes_count <= 1 {
    8486            0 :             return Err(ApiError::PreconditionFailed(
    8487            0 :                 "No other nodes to fill from".into(),
    8488            0 :             ));
    8489            0 :         }
    8490              : 
    8491            0 :         match node_policy {
    8492              :             NodeSchedulingPolicy::Active => {
    8493            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling))
    8494            0 :                     .await?;
    8495              : 
    8496            0 :                 let cancel = self.cancel.child_token();
    8497            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    8498              : 
    8499            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    8500            0 :                     operation: Operation::Fill(Fill { node_id }),
    8501            0 :                     cancel: cancel.clone(),
    8502            0 :                 });
    8503              : 
    8504            0 :                 let span = tracing::info_span!(parent: None, "fill_node", %node_id);
    8505              : 
    8506            0 :                 tokio::task::spawn({
    8507            0 :                     let service = self.clone();
    8508            0 :                     let cancel = cancel.clone();
    8509            0 :                     async move {
    8510            0 :                         let _gate_guard = gate_guard;
    8511              : 
    8512            0 :                         scopeguard::defer! {
    8513              :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    8514              : 
    8515              :                             if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) {
    8516              :                                 assert_eq!(removed_fill.node_id, node_id, "We always take the same operation");
    8517              :                             } else {
    8518              :                                 panic!("We always remove the same operation")
    8519              :                             }
    8520              :                         }
    8521              : 
    8522            0 :                         tracing::info!("Fill background operation starting");
    8523            0 :                         let res = service.fill_node(node_id, cancel).await;
    8524            0 :                         match res {
    8525              :                             Ok(()) => {
    8526            0 :                                 tracing::info!("Fill background operation completed successfully");
    8527              :                             }
    8528              :                             Err(OperationError::Cancelled) => {
    8529            0 :                                 tracing::info!("Fill background operation was cancelled");
    8530              :                             }
    8531            0 :                             Err(err) => {
    8532            0 :                                 tracing::error!("Fill background operation encountered: {err}")
    8533              :                             }
    8534              :                         }
    8535            0 :                     }
    8536            0 :                 }.instrument(span));
    8537              :             }
    8538              :             NodeSchedulingPolicy::Filling => {
    8539            0 :                 return Err(ApiError::Conflict(format!(
    8540            0 :                     "Node {node_id} has fill in progress"
    8541            0 :                 )));
    8542              :             }
    8543            0 :             policy => {
    8544            0 :                 return Err(ApiError::PreconditionFailed(
    8545            0 :                     format!("Node {node_id} cannot be filled due to {policy:?} policy").into(),
    8546            0 :                 ));
    8547              :             }
    8548              :         }
    8549              : 
    8550            0 :         Ok(())
    8551            0 :     }
    8552              : 
    8553            0 :     pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> {
    8554            0 :         let node_available = {
    8555            0 :             let locked = self.inner.read().unwrap();
    8556            0 :             let nodes = &locked.nodes;
    8557            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8558            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8559            0 :             ))?;
    8560              : 
    8561            0 :             node.is_available()
    8562              :         };
    8563              : 
    8564            0 :         if !node_available {
    8565            0 :             return Err(ApiError::ResourceUnavailable(
    8566            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8567            0 :             ));
    8568            0 :         }
    8569              : 
    8570            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    8571            0 :             if let Operation::Fill(fill) = op_handler.operation {
    8572            0 :                 if fill.node_id == node_id {
    8573            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    8574            0 :                     op_handler.cancel.cancel();
    8575            0 :                     return Ok(());
    8576            0 :                 }
    8577            0 :             }
    8578            0 :         }
    8579              : 
    8580            0 :         Err(ApiError::PreconditionFailed(
    8581            0 :             format!("Node {node_id} has no fill in progress").into(),
    8582            0 :         ))
    8583            0 :     }
    8584              : 
    8585              :     /// Like [`Self::maybe_configured_reconcile_shard`], but uses the default reconciler
    8586              :     /// configuration
    8587            0 :     fn maybe_reconcile_shard(
    8588            0 :         &self,
    8589            0 :         shard: &mut TenantShard,
    8590            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    8591            0 :         priority: ReconcilerPriority,
    8592            0 :     ) -> Option<ReconcilerWaiter> {
    8593            0 :         self.maybe_configured_reconcile_shard(shard, nodes, ReconcilerConfig::new(priority))
    8594            0 :     }
    8595              : 
    8596              :     /// Before constructing a Reconciler, acquire semaphore units from the appropriate concurrency limit (depends on priority)
    8597            0 :     fn get_reconciler_units(
    8598            0 :         &self,
    8599            0 :         priority: ReconcilerPriority,
    8600            0 :     ) -> Result<ReconcileUnits, TryAcquireError> {
    8601            0 :         let units = match priority {
    8602            0 :             ReconcilerPriority::Normal => self.reconciler_concurrency.clone().try_acquire_owned(),
    8603              :             ReconcilerPriority::High => {
    8604            0 :                 match self
    8605            0 :                     .priority_reconciler_concurrency
    8606            0 :                     .clone()
    8607            0 :                     .try_acquire_owned()
    8608              :                 {
    8609            0 :                     Ok(u) => Ok(u),
    8610              :                     Err(TryAcquireError::NoPermits) => {
    8611              :                         // If the high priority semaphore is exhausted, then high priority tasks may steal units from
    8612              :                         // the normal priority semaphore.
    8613            0 :                         self.reconciler_concurrency.clone().try_acquire_owned()
    8614              :                     }
    8615            0 :                     Err(e) => Err(e),
    8616              :                 }
    8617              :             }
    8618              :         };
    8619              : 
    8620            0 :         units.map(ReconcileUnits::new)
    8621            0 :     }
    8622              : 
    8623              :     /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
    8624            0 :     fn maybe_configured_reconcile_shard(
    8625            0 :         &self,
    8626            0 :         shard: &mut TenantShard,
    8627            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    8628            0 :         reconciler_config: ReconcilerConfig,
    8629            0 :     ) -> Option<ReconcilerWaiter> {
    8630            0 :         let reconcile_needed = shard.get_reconcile_needed(nodes);
    8631              : 
    8632            0 :         let reconcile_reason = match reconcile_needed {
    8633            0 :             ReconcileNeeded::No => return None,
    8634            0 :             ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
    8635            0 :             ReconcileNeeded::Yes(reason) => {
    8636              :                 // Fall through to try and acquire units for spawning reconciler
    8637            0 :                 reason
    8638              :             }
    8639              :         };
    8640              : 
    8641            0 :         let units = match self.get_reconciler_units(reconciler_config.priority) {
    8642            0 :             Ok(u) => u,
    8643              :             Err(_) => {
    8644            0 :                 tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
    8645            0 :                     "Concurrency limited: enqueued for reconcile later");
    8646            0 :                 if !shard.delayed_reconcile {
    8647            0 :                     match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
    8648            0 :                         Err(TrySendError::Closed(_)) => {
    8649            0 :                             // Weird mid-shutdown case?
    8650            0 :                         }
    8651              :                         Err(TrySendError::Full(_)) => {
    8652              :                             // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
    8653            0 :                             tracing::warn!(
    8654            0 :                                 "Many shards are waiting to reconcile: delayed_reconcile queue is full"
    8655              :                             );
    8656              :                         }
    8657            0 :                         Ok(()) => {
    8658            0 :                             shard.delayed_reconcile = true;
    8659            0 :                         }
    8660              :                     }
    8661            0 :                 }
    8662              : 
    8663              :                 // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
    8664              :                 // number to advance.  When this function is eventually called again and succeeds in getting units,
    8665              :                 // it will spawn a reconciler that makes this waiter complete.
    8666            0 :                 return Some(shard.future_reconcile_waiter());
    8667              :             }
    8668              :         };
    8669              : 
    8670            0 :         let Ok(gate_guard) = self.reconcilers_gate.enter() else {
    8671              :             // Gate closed: we're shutting down, drop out.
    8672            0 :             return None;
    8673              :         };
    8674              : 
    8675            0 :         shard.spawn_reconciler(
    8676            0 :             reconcile_reason,
    8677            0 :             &self.result_tx,
    8678            0 :             nodes,
    8679            0 :             &self.compute_hook,
    8680            0 :             reconciler_config,
    8681            0 :             &self.config,
    8682            0 :             &self.persistence,
    8683            0 :             units,
    8684            0 :             gate_guard,
    8685            0 :             &self.reconcilers_cancel,
    8686            0 :             self.http_client.clone(),
    8687              :         )
    8688            0 :     }
    8689              : 
    8690              :     /// Check all tenants for pending reconciliation work, and reconcile those in need.
    8691              :     /// Additionally, reschedule tenants that require it.
    8692              :     ///
    8693              :     /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
    8694              :     /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
    8695              :     /// available.  A return value of 0 indicates that everything is fully reconciled already.
    8696            0 :     fn reconcile_all(&self) -> ReconcileAllResult {
    8697            0 :         let mut locked = self.inner.write().unwrap();
    8698            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    8699            0 :         let pageservers = nodes.clone();
    8700              : 
    8701              :         // This function is an efficient place to update lazy statistics, since we are walking
    8702              :         // all tenants.
    8703            0 :         let mut pending_reconciles = 0;
    8704            0 :         let mut stuck_reconciles = 0;
    8705            0 :         let mut az_violations = 0;
    8706              : 
    8707              :         // If we find any tenants to drop from memory, stash them to offload after
    8708              :         // we're done traversing the map of tenants.
    8709            0 :         let mut drop_detached_tenants = Vec::new();
    8710              : 
    8711            0 :         let mut spawned_reconciles = 0;
    8712            0 :         let mut has_delayed_reconciles = false;
    8713              : 
    8714            0 :         for shard in tenants.values_mut() {
    8715              :             // Accumulate scheduling statistics
    8716            0 :             if let (Some(attached), Some(preferred)) =
    8717            0 :                 (shard.intent.get_attached(), shard.preferred_az())
    8718              :             {
    8719            0 :                 let node_az = nodes
    8720            0 :                     .get(attached)
    8721            0 :                     .expect("Nodes exist if referenced")
    8722            0 :                     .get_availability_zone_id();
    8723            0 :                 if node_az != preferred {
    8724            0 :                     az_violations += 1;
    8725            0 :                 }
    8726            0 :             }
    8727              : 
    8728              :             // Skip checking if this shard is already enqueued for reconciliation
    8729            0 :             if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
    8730              :                 // If there is something delayed, then return a nonzero count so that
    8731              :                 // callers like reconcile_all_now do not incorrectly get the impression
    8732              :                 // that the system is in a quiescent state.
    8733            0 :                 has_delayed_reconciles = true;
    8734            0 :                 pending_reconciles += 1;
    8735            0 :                 continue;
    8736            0 :             }
    8737              : 
    8738              :             // Eventual consistency: if an earlier reconcile job failed, and the shard is still
    8739              :             // dirty, spawn another one
    8740            0 :             if self
    8741            0 :                 .maybe_reconcile_shard(shard, &pageservers, ReconcilerPriority::Normal)
    8742            0 :                 .is_some()
    8743              :             {
    8744            0 :                 spawned_reconciles += 1;
    8745              : 
    8746            0 :                 if shard.consecutive_reconciles_count >= MAX_CONSECUTIVE_RECONCILES {
    8747              :                     // Count shards that are stuck, butwe still want to reconcile them.
    8748              :                     // We don't want to consider them when deciding to run optimizations.
    8749            0 :                     tracing::warn!(
    8750              :                         tenant_id=%shard.tenant_shard_id.tenant_id,
    8751            0 :                         shard_id=%shard.tenant_shard_id.shard_slug(),
    8752            0 :                         "Shard reconciliation is stuck: {} consecutive launches",
    8753              :                         shard.consecutive_reconciles_count
    8754              :                     );
    8755            0 :                     stuck_reconciles += 1;
    8756            0 :                 }
    8757              :             } else {
    8758            0 :                 if shard.delayed_reconcile {
    8759            0 :                     // Shard wanted to reconcile but for some reason couldn't.
    8760            0 :                     pending_reconciles += 1;
    8761            0 :                 }
    8762              : 
    8763              :                 // Reset the counter when we don't need to launch a reconcile.
    8764            0 :                 shard.consecutive_reconciles_count = 0;
    8765              :             }
    8766              :             // If this tenant is detached, try dropping it from memory. This is usually done
    8767              :             // proactively in [`Self::process_results`], but we do it here to handle the edge
    8768              :             // case where a reconcile completes while someone else is holding an op lock for the tenant.
    8769            0 :             if shard.tenant_shard_id.shard_number == ShardNumber(0)
    8770            0 :                 && shard.policy == PlacementPolicy::Detached
    8771              :             {
    8772            0 :                 if let Some(guard) = self.tenant_op_locks.try_exclusive(
    8773            0 :                     shard.tenant_shard_id.tenant_id,
    8774            0 :                     TenantOperations::DropDetached,
    8775            0 :                 ) {
    8776            0 :                     drop_detached_tenants.push((shard.tenant_shard_id.tenant_id, guard));
    8777            0 :                 }
    8778            0 :             }
    8779              :         }
    8780              : 
    8781              :         // Some metrics are calculated from SchedulerNode state, update these periodically
    8782            0 :         scheduler.update_metrics();
    8783              : 
    8784              :         // Process any deferred tenant drops
    8785            0 :         for (tenant_id, guard) in drop_detached_tenants {
    8786            0 :             self.maybe_drop_tenant(tenant_id, &mut locked, &guard);
    8787            0 :         }
    8788              : 
    8789            0 :         metrics::METRICS_REGISTRY
    8790            0 :             .metrics_group
    8791            0 :             .storage_controller_schedule_az_violation
    8792            0 :             .set(az_violations as i64);
    8793              : 
    8794            0 :         metrics::METRICS_REGISTRY
    8795            0 :             .metrics_group
    8796            0 :             .storage_controller_pending_reconciles
    8797            0 :             .set(pending_reconciles as i64);
    8798              : 
    8799            0 :         metrics::METRICS_REGISTRY
    8800            0 :             .metrics_group
    8801            0 :             .storage_controller_stuck_reconciles
    8802            0 :             .set(stuck_reconciles as i64);
    8803              : 
    8804            0 :         ReconcileAllResult::new(spawned_reconciles, stuck_reconciles, has_delayed_reconciles)
    8805            0 :     }
    8806              : 
    8807              :     /// `optimize` in this context means identifying shards which have valid scheduled locations, but
    8808              :     /// could be scheduled somewhere better:
    8809              :     /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
    8810              :     ///    * e.g. after a node fails then recovers, to move some work back to it
    8811              :     /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
    8812              :     ///    * e.g. after a shard split, the initial attached locations will all be on the node where
    8813              :     ///      we did the split, but are probably better placed elsewhere.
    8814              :     /// - Creating new secondary locations if it improves the spreading of a sharded tenant
    8815              :     ///    * e.g. after a shard split, some locations will be on the same node (where the split
    8816              :     ///      happened), and will probably be better placed elsewhere.
    8817              :     ///
    8818              :     /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
    8819              :     /// the time of scheduling, this function looks for cases where a better-scoring location is available
    8820              :     /// according to those same soft constraints.
    8821            0 :     async fn optimize_all(&self) -> usize {
    8822              :         // Limit on how many shards' optmizations each call to this function will execute.  Combined
    8823              :         // with the frequency of background calls, this acts as an implicit rate limit that runs a small
    8824              :         // trickle of optimizations in the background, rather than executing a large number in parallel
    8825              :         // when a change occurs.
    8826              :         const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 16;
    8827              : 
    8828              :         // Synchronous prepare: scan shards for possible scheduling optimizations
    8829            0 :         let candidate_work = self.optimize_all_plan();
    8830            0 :         let candidate_work_len = candidate_work.len();
    8831              : 
    8832              :         // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
    8833            0 :         let validated_work = self.optimize_all_validate(candidate_work).await;
    8834              : 
    8835            0 :         let was_work_filtered = validated_work.len() != candidate_work_len;
    8836              : 
    8837              :         // Synchronous apply: update the shards' intent states according to validated optimisations
    8838            0 :         let mut reconciles_spawned = 0;
    8839            0 :         let mut optimizations_applied = 0;
    8840            0 :         let mut locked = self.inner.write().unwrap();
    8841            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    8842            0 :         for (tenant_shard_id, optimization) in validated_work {
    8843            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    8844              :                 // Shard was dropped between planning and execution;
    8845            0 :                 continue;
    8846              :             };
    8847            0 :             tracing::info!(tenant_shard_id=%tenant_shard_id, "Applying optimization: {optimization:?}");
    8848            0 :             if shard.apply_optimization(scheduler, optimization) {
    8849            0 :                 optimizations_applied += 1;
    8850            0 :                 if self
    8851            0 :                     .maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal)
    8852            0 :                     .is_some()
    8853            0 :                 {
    8854            0 :                     reconciles_spawned += 1;
    8855            0 :                 }
    8856            0 :             }
    8857              : 
    8858            0 :             if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
    8859            0 :                 break;
    8860            0 :             }
    8861              :         }
    8862              : 
    8863            0 :         if was_work_filtered {
    8864            0 :             // If we filtered any work out during validation, ensure we return a nonzero value to indicate
    8865            0 :             // to callers that the system is not in a truly quiet state, it's going to do some work as soon
    8866            0 :             // as these validations start passing.
    8867            0 :             reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
    8868            0 :         }
    8869              : 
    8870            0 :         reconciles_spawned
    8871            0 :     }
    8872              : 
    8873            0 :     fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
    8874              :         // How many candidate optimizations we will generate, before evaluating them for readniess: setting
    8875              :         // this higher than the execution limit gives us a chance to execute some work even if the first
    8876              :         // few optimizations we find are not ready.
    8877              :         const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 64;
    8878              : 
    8879            0 :         let mut work = Vec::new();
    8880            0 :         let mut locked = self.inner.write().unwrap();
    8881            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    8882              : 
    8883              :         // We are going to plan a bunch of optimisations before applying any of them, so the
    8884              :         // utilisation stats on nodes will be effectively stale for the >1st optimisation we
    8885              :         // generate.  To avoid this causing unstable migrations/flapping, it's important that the
    8886              :         // code in TenantShard for finding optimisations uses [`NodeAttachmentSchedulingScore::disregard_utilization`]
    8887              :         // to ignore the utilisation component of the score.
    8888              : 
    8889            0 :         for (_tenant_id, schedule_context, shards) in
    8890            0 :             TenantShardExclusiveIterator::new(tenants, ScheduleMode::Speculative)
    8891              :         {
    8892            0 :             if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
    8893            0 :                 break;
    8894            0 :             }
    8895            0 :             for shard in shards {
    8896            0 :                 if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
    8897            0 :                     break;
    8898            0 :                 }
    8899            0 :                 match shard.get_scheduling_policy() {
    8900            0 :                     ShardSchedulingPolicy::Active => {
    8901            0 :                         // Ok to do optimization
    8902            0 :                     }
    8903            0 :                     ShardSchedulingPolicy::Essential if shard.get_preferred_node().is_some() => {
    8904            0 :                         // Ok to do optimization: we are executing a graceful migration that
    8905            0 :                         // has set preferred_node
    8906            0 :                     }
    8907              :                     ShardSchedulingPolicy::Essential
    8908              :                     | ShardSchedulingPolicy::Pause
    8909              :                     | ShardSchedulingPolicy::Stop => {
    8910              :                         // Policy prevents optimizing this shard.
    8911            0 :                         continue;
    8912              :                     }
    8913              :                 }
    8914              : 
    8915            0 :                 if !matches!(shard.splitting, SplitState::Idle)
    8916            0 :                     || matches!(shard.policy, PlacementPolicy::Detached)
    8917            0 :                     || shard.reconciler.is_some()
    8918              :                 {
    8919              :                     // Do not start any optimizations while another change to the tenant is ongoing: this
    8920              :                     // is not necessary for correctness, but simplifies operations and implicitly throttles
    8921              :                     // optimization changes to happen in a "trickle" over time.
    8922            0 :                     continue;
    8923            0 :                 }
    8924              : 
    8925              :                 // Fast path: we may quickly identify shards that don't have any possible optimisations
    8926            0 :                 if !shard.maybe_optimizable(scheduler, &schedule_context) {
    8927            0 :                     if cfg!(feature = "testing") {
    8928              :                         // Check that maybe_optimizable doesn't disagree with the actual optimization functions.
    8929              :                         // Only do this in testing builds because it is not a correctness-critical check, so we shouldn't
    8930              :                         // panic in prod if we hit this, or spend cycles on it in prod.
    8931            0 :                         assert!(
    8932            0 :                             shard
    8933            0 :                                 .optimize_attachment(scheduler, &schedule_context)
    8934            0 :                                 .is_none()
    8935              :                         );
    8936            0 :                         assert!(
    8937            0 :                             shard
    8938            0 :                                 .optimize_secondary(scheduler, &schedule_context)
    8939            0 :                                 .is_none()
    8940              :                         );
    8941            0 :                     }
    8942            0 :                     continue;
    8943            0 :                 }
    8944              : 
    8945            0 :                 if let Some(optimization) =
    8946              :                     // If idle, maybe optimize attachments: if a shard has a secondary location that is preferable to
    8947              :                     // its primary location based on soft constraints, cut it over.
    8948            0 :                     shard.optimize_attachment(scheduler, &schedule_context)
    8949              :                 {
    8950            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for attachment: {optimization:?}");
    8951            0 :                     work.push((shard.tenant_shard_id, optimization));
    8952            0 :                     break;
    8953            0 :                 } else if let Some(optimization) =
    8954              :                     // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
    8955              :                     // better placed on another node, based on ScheduleContext, then adjust it.  This
    8956              :                     // covers cases like after a shard split, where we might have too many shards
    8957              :                     // in the same tenant with secondary locations on the node where they originally split.
    8958            0 :                     shard.optimize_secondary(scheduler, &schedule_context)
    8959              :                 {
    8960            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for secondary: {optimization:?}");
    8961            0 :                     work.push((shard.tenant_shard_id, optimization));
    8962            0 :                     break;
    8963            0 :                 }
    8964              :             }
    8965              :         }
    8966              : 
    8967            0 :         work
    8968            0 :     }
    8969              : 
    8970            0 :     async fn optimize_all_validate(
    8971            0 :         &self,
    8972            0 :         candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
    8973            0 :     ) -> Vec<(TenantShardId, ScheduleOptimization)> {
    8974              :         // Take a clone of the node map to use outside the lock in async validation phase
    8975            0 :         let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
    8976              : 
    8977            0 :         let mut want_secondary_status = Vec::new();
    8978              : 
    8979              :         // Validate our plans: this is an async phase where we may do I/O to pageservers to
    8980              :         // check that the state of locations is acceptable to run the optimization, such as
    8981              :         // checking that a secondary location is sufficiently warmed-up to cleanly cut over
    8982              :         // in a live migration.
    8983            0 :         let mut validated_work = Vec::new();
    8984            0 :         for (tenant_shard_id, optimization) in candidate_work {
    8985            0 :             match optimization.action {
    8986              :                 ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
    8987              :                     old_attached_node_id: _,
    8988            0 :                     new_attached_node_id,
    8989              :                 }) => {
    8990            0 :                     match validation_nodes.get(&new_attached_node_id) {
    8991            0 :                         None => {
    8992            0 :                             // Node was dropped between planning and validation
    8993            0 :                         }
    8994            0 :                         Some(node) => {
    8995            0 :                             if !node.is_available() {
    8996            0 :                                 tracing::info!(
    8997            0 :                                     "Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable"
    8998              :                                 );
    8999            0 :                             } else {
    9000            0 :                                 // Accumulate optimizations that require fetching secondary status, so that we can execute these
    9001            0 :                                 // remote API requests concurrently.
    9002            0 :                                 want_secondary_status.push((
    9003            0 :                                     tenant_shard_id,
    9004            0 :                                     node.clone(),
    9005            0 :                                     optimization,
    9006            0 :                                 ));
    9007            0 :                             }
    9008              :                         }
    9009              :                     }
    9010              :                 }
    9011              :                 ScheduleOptimizationAction::ReplaceSecondary(_)
    9012              :                 | ScheduleOptimizationAction::CreateSecondary(_)
    9013              :                 | ScheduleOptimizationAction::RemoveSecondary(_) => {
    9014              :                     // No extra checks needed to manage secondaries: this does not interrupt client access
    9015            0 :                     validated_work.push((tenant_shard_id, optimization))
    9016              :                 }
    9017              :             };
    9018              :         }
    9019              : 
    9020              :         // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
    9021              :         // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
    9022              :         // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
    9023            0 :         let results = self
    9024            0 :             .tenant_for_shards_api(
    9025            0 :                 want_secondary_status
    9026            0 :                     .iter()
    9027            0 :                     .map(|i| (i.0, i.1.clone()))
    9028            0 :                     .collect(),
    9029            0 :                 |tenant_shard_id, client| async move {
    9030            0 :                     client.tenant_secondary_status(tenant_shard_id).await
    9031            0 :                 },
    9032              :                 1,
    9033              :                 1,
    9034              :                 SHORT_RECONCILE_TIMEOUT,
    9035            0 :                 &self.cancel,
    9036              :             )
    9037            0 :             .await;
    9038              : 
    9039            0 :         for ((tenant_shard_id, node, optimization), (_, secondary_status)) in
    9040            0 :             want_secondary_status.into_iter().zip(results.into_iter())
    9041              :         {
    9042            0 :             match secondary_status {
    9043            0 :                 Err(e) => {
    9044            0 :                     tracing::info!(
    9045            0 :                         "Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}"
    9046              :                     );
    9047              :                 }
    9048            0 :                 Ok(progress) => {
    9049              :                     // We require secondary locations to have less than 10GiB of downloads pending before we will use
    9050              :                     // them in an optimization
    9051              :                     const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
    9052              : 
    9053            0 :                     if progress.heatmap_mtime.is_none()
    9054            0 :                         || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
    9055            0 :                             && progress.bytes_downloaded != progress.bytes_total
    9056            0 :                         || progress.bytes_total - progress.bytes_downloaded
    9057            0 :                             > DOWNLOAD_FRESHNESS_THRESHOLD
    9058              :                     {
    9059            0 :                         tracing::info!(
    9060            0 :                             "Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}"
    9061              :                         );
    9062              : 
    9063            0 :                         if progress.heatmap_mtime.is_none() {
    9064              :                             // No heatmap might mean the attached location has never uploaded one, or that
    9065              :                             // the secondary download hasn't happened yet.  This is relatively unusual in the field,
    9066              :                             // but fairly common in tests.
    9067            0 :                             self.kick_secondary_download(tenant_shard_id).await;
    9068            0 :                         }
    9069              :                     } else {
    9070              :                         // Location looks ready: proceed
    9071            0 :                         tracing::info!(
    9072            0 :                             "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
    9073              :                         );
    9074            0 :                         validated_work.push((tenant_shard_id, optimization))
    9075              :                     }
    9076              :                 }
    9077              :             }
    9078              :         }
    9079              : 
    9080            0 :         validated_work
    9081            0 :     }
    9082              : 
    9083              :     /// Some aspects of scheduling optimisation wait for secondary locations to be warm.  This
    9084              :     /// happens on multi-minute timescales in the field, which is fine because optimisation is meant
    9085              :     /// to be a lazy background thing. However, when testing, it is not practical to wait around, so
    9086              :     /// we have this helper to move things along faster.
    9087            0 :     async fn kick_secondary_download(&self, tenant_shard_id: TenantShardId) {
    9088            0 :         if !self.config.kick_secondary_downloads {
    9089              :             // No-op if kick_secondary_downloads functionaliuty is not configured
    9090            0 :             return;
    9091            0 :         }
    9092              : 
    9093            0 :         let (attached_node, secondaries) = {
    9094            0 :             let locked = self.inner.read().unwrap();
    9095            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    9096            0 :                 tracing::warn!(
    9097            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: not found"
    9098              :                 );
    9099            0 :                 return;
    9100              :             };
    9101              : 
    9102            0 :             let Some(attached) = shard.intent.get_attached() else {
    9103            0 :                 tracing::warn!(
    9104            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: no attached"
    9105              :                 );
    9106            0 :                 return;
    9107              :             };
    9108              : 
    9109            0 :             let secondaries = shard
    9110            0 :                 .intent
    9111            0 :                 .get_secondary()
    9112            0 :                 .iter()
    9113            0 :                 .map(|n| locked.nodes.get(n).unwrap().clone())
    9114            0 :                 .collect::<Vec<_>>();
    9115              : 
    9116            0 :             (locked.nodes.get(attached).unwrap().clone(), secondaries)
    9117              :         };
    9118              : 
    9119              :         // Make remote API calls to upload + download heatmaps: we ignore errors because this is just
    9120              :         // a 'kick' to let scheduling optimisation run more promptly.
    9121            0 :         match attached_node
    9122            0 :             .with_client_retries(
    9123            0 :                 |client| async move { client.tenant_heatmap_upload(tenant_shard_id).await },
    9124            0 :                 &self.http_client,
    9125            0 :                 &self.config.pageserver_jwt_token,
    9126              :                 3,
    9127              :                 10,
    9128              :                 SHORT_RECONCILE_TIMEOUT,
    9129            0 :                 &self.cancel,
    9130              :             )
    9131            0 :             .await
    9132              :         {
    9133            0 :             Some(Err(e)) => {
    9134            0 :                 tracing::info!(
    9135            0 :                     "Failed to upload heatmap from {attached_node} for {tenant_shard_id}: {e}"
    9136              :                 );
    9137              :             }
    9138              :             None => {
    9139            0 :                 tracing::info!(
    9140            0 :                     "Cancelled while uploading heatmap from {attached_node} for {tenant_shard_id}"
    9141              :                 );
    9142              :             }
    9143              :             Some(Ok(_)) => {
    9144            0 :                 tracing::info!(
    9145            0 :                     "Successfully uploaded heatmap from {attached_node} for {tenant_shard_id}"
    9146              :                 );
    9147              :             }
    9148              :         }
    9149              : 
    9150            0 :         for secondary_node in secondaries {
    9151            0 :             match secondary_node
    9152            0 :                 .with_client_retries(
    9153            0 :                     |client| async move {
    9154            0 :                         client
    9155            0 :                             .tenant_secondary_download(
    9156            0 :                                 tenant_shard_id,
    9157            0 :                                 Some(Duration::from_secs(1)),
    9158            0 :                             )
    9159            0 :                             .await
    9160            0 :                     },
    9161            0 :                     &self.http_client,
    9162            0 :                     &self.config.pageserver_jwt_token,
    9163              :                     3,
    9164              :                     10,
    9165              :                     SHORT_RECONCILE_TIMEOUT,
    9166            0 :                     &self.cancel,
    9167              :                 )
    9168            0 :                 .await
    9169              :             {
    9170            0 :                 Some(Err(e)) => {
    9171            0 :                     tracing::info!(
    9172            0 :                         "Failed to download heatmap from {secondary_node} for {tenant_shard_id}: {e}"
    9173              :                     );
    9174              :                 }
    9175              :                 None => {
    9176            0 :                     tracing::info!(
    9177            0 :                         "Cancelled while downloading heatmap from {secondary_node} for {tenant_shard_id}"
    9178              :                     );
    9179              :                 }
    9180            0 :                 Some(Ok(progress)) => {
    9181            0 :                     tracing::info!(
    9182            0 :                         "Successfully downloaded heatmap from {secondary_node} for {tenant_shard_id}: {progress:?}"
    9183              :                     );
    9184              :                 }
    9185              :             }
    9186              :         }
    9187            0 :     }
    9188              : 
    9189              :     /// Asynchronously split a tenant that's eligible for automatic splits. At most one tenant will
    9190              :     /// be split per call.
    9191              :     ///
    9192              :     /// Two sets of criteria are used: initial splits and size-based splits (in that order).
    9193              :     /// Initial splits are used to eagerly split unsharded tenants that may be performing initial
    9194              :     /// ingestion, since sharded tenants have significantly better ingestion throughput. Size-based
    9195              :     /// splits are used to bound the maximum shard size and balance out load.
    9196              :     ///
    9197              :     /// Splits are based on max_logical_size, i.e. the logical size of the largest timeline in a
    9198              :     /// tenant. We use this instead of the total logical size because branches will duplicate
    9199              :     /// logical size without actually using more storage. We could also use visible physical size,
    9200              :     /// but this might overestimate tenants that frequently churn branches.
    9201              :     ///
    9202              :     /// Initial splits (initial_split_threshold):
    9203              :     /// * Applies to tenants with 1 shard.
    9204              :     /// * The largest timeline (max_logical_size) exceeds initial_split_threshold.
    9205              :     /// * Splits into initial_split_shards.
    9206              :     ///
    9207              :     /// Size-based splits (split_threshold):
    9208              :     /// * Applies to all tenants.
    9209              :     /// * The largest timeline (max_logical_size) divided by shard count exceeds split_threshold.
    9210              :     /// * Splits such that max_logical_size / shard_count <= split_threshold, in powers of 2.
    9211              :     ///
    9212              :     /// Tenant shards are ordered by descending max_logical_size, first initial split candidates
    9213              :     /// then size-based split candidates. The first matching candidate is split.
    9214              :     ///
    9215              :     /// The shard count is clamped to max_split_shards. If a candidate is eligible for both initial
    9216              :     /// and size-based splits, the largest shard count will be used.
    9217              :     ///
    9218              :     /// An unsharded tenant will get DEFAULT_STRIPE_SIZE, regardless of what its ShardIdentity says.
    9219              :     /// A sharded tenant will retain its stripe size, as splits do not allow changing it.
    9220              :     ///
    9221              :     /// TODO: consider spawning multiple splits in parallel: this is only called once every 20
    9222              :     /// seconds, so a large backlog can take a long time, and if a tenant fails to split it will
    9223              :     /// block all other splits.
    9224            0 :     async fn autosplit_tenants(self: &Arc<Self>) {
    9225              :         // If max_split_shards is set to 0 or 1, we can't split.
    9226            0 :         let max_split_shards = self.config.max_split_shards;
    9227            0 :         if max_split_shards <= 1 {
    9228            0 :             return;
    9229            0 :         }
    9230              : 
    9231              :         // If initial_split_shards is set to 0 or 1, disable initial splits.
    9232            0 :         let mut initial_split_threshold = self.config.initial_split_threshold.unwrap_or(0);
    9233            0 :         let initial_split_shards = self.config.initial_split_shards;
    9234            0 :         if initial_split_shards <= 1 {
    9235            0 :             initial_split_threshold = 0;
    9236            0 :         }
    9237              : 
    9238              :         // If no split_threshold nor initial_split_threshold, disable autosplits.
    9239            0 :         let split_threshold = self.config.split_threshold.unwrap_or(0);
    9240            0 :         if split_threshold == 0 && initial_split_threshold == 0 {
    9241            0 :             return;
    9242            0 :         }
    9243              : 
    9244              :         // Fetch split candidates in prioritized order.
    9245              :         //
    9246              :         // If initial splits are enabled, fetch eligible tenants first. We prioritize initial splits
    9247              :         // over size-based splits, since these are often performing initial ingestion and rely on
    9248              :         // splits to improve ingest throughput.
    9249            0 :         let mut candidates = Vec::new();
    9250              : 
    9251            0 :         if initial_split_threshold > 0 {
    9252              :             // Initial splits: fetch tenants with 1 shard where the logical size of the largest
    9253              :             // timeline exceeds the initial split threshold.
    9254            0 :             let initial_candidates = self
    9255            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    9256            0 :                     order_by: TenantSorting::MaxLogicalSize,
    9257            0 :                     limit: 10,
    9258            0 :                     where_shards_lt: Some(ShardCount(2)),
    9259            0 :                     where_gt: Some(initial_split_threshold),
    9260            0 :                 })
    9261            0 :                 .await;
    9262            0 :             candidates.extend(initial_candidates);
    9263            0 :         }
    9264              : 
    9265            0 :         if split_threshold > 0 {
    9266              :             // Size-based splits: fetch tenants where the logical size of the largest timeline
    9267              :             // divided by shard count exceeds the split threshold.
    9268              :             //
    9269              :             // max_logical_size is only tracked on shard 0, and contains the total logical size
    9270              :             // across all shards. We have to order and filter by MaxLogicalSizePerShard, i.e.
    9271              :             // max_logical_size / shard_count, such that we only receive tenants that are actually
    9272              :             // eligible for splits. But we still use max_logical_size for later split calculations.
    9273            0 :             let size_candidates = self
    9274            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    9275            0 :                     order_by: TenantSorting::MaxLogicalSizePerShard,
    9276            0 :                     limit: 10,
    9277            0 :                     where_shards_lt: Some(ShardCount(max_split_shards)),
    9278            0 :                     where_gt: Some(split_threshold),
    9279            0 :                 })
    9280            0 :                 .await;
    9281              :             #[cfg(feature = "testing")]
    9282            0 :             assert!(
    9283            0 :                 size_candidates.iter().all(|c| c.id.is_shard_zero()),
    9284            0 :                 "MaxLogicalSizePerShard returned non-zero shard: {size_candidates:?}",
    9285              :             );
    9286            0 :             candidates.extend(size_candidates);
    9287            0 :         }
    9288              : 
    9289              :         // Filter out tenants in a prohibiting scheduling modes
    9290              :         // and tenants with an ongoing import.
    9291              :         //
    9292              :         // Note that the import check here is oportunistic. An import might start
    9293              :         // after the check before we actually update [`TenantShard::splitting`].
    9294              :         // [`Self::tenant_shard_split`] checks the database whilst holding the exclusive
    9295              :         // tenant lock. Imports might take a long time, so the check here allows us
    9296              :         // to split something else instead of trying the same shard over and over.
    9297              :         {
    9298            0 :             let state = self.inner.read().unwrap();
    9299            0 :             candidates.retain(|i| {
    9300            0 :                 let shard = state.tenants.get(&i.id);
    9301            0 :                 match shard {
    9302            0 :                     Some(t) => {
    9303            0 :                         t.get_scheduling_policy() == ShardSchedulingPolicy::Active
    9304            0 :                             && t.importing == TimelineImportState::Idle
    9305              :                     }
    9306            0 :                     None => false,
    9307              :                 }
    9308            0 :             });
    9309              :         }
    9310              : 
    9311              :         // Pick the first candidate to split. This will generally always be the first one in
    9312              :         // candidates, but we defensively skip candidates that end up not actually splitting.
    9313            0 :         let Some((candidate, new_shard_count)) = candidates
    9314            0 :             .into_iter()
    9315            0 :             .filter_map(|candidate| {
    9316            0 :                 let new_shard_count = Self::compute_split_shards(ShardSplitInputs {
    9317            0 :                     shard_count: candidate.id.shard_count,
    9318            0 :                     max_logical_size: candidate.max_logical_size,
    9319            0 :                     split_threshold,
    9320            0 :                     max_split_shards,
    9321            0 :                     initial_split_threshold,
    9322            0 :                     initial_split_shards,
    9323            0 :                 });
    9324            0 :                 new_shard_count.map(|shards| (candidate, shards.count()))
    9325            0 :             })
    9326            0 :             .next()
    9327              :         else {
    9328            0 :             debug!("no split-eligible tenants found");
    9329            0 :             return;
    9330              :         };
    9331              : 
    9332              :         // Retain the stripe size of sharded tenants, as splits don't allow changing it. Otherwise,
    9333              :         // use DEFAULT_STRIPE_SIZE for unsharded tenants -- their stripe size doesn't really matter,
    9334              :         // and if we change the default stripe size we want to use the new default rather than an
    9335              :         // old, persisted stripe size.
    9336            0 :         let new_stripe_size = match candidate.id.shard_count.count() {
    9337            0 :             0 => panic!("invalid shard count 0"),
    9338            0 :             1 => Some(DEFAULT_STRIPE_SIZE),
    9339            0 :             2.. => None,
    9340              :         };
    9341              : 
    9342              :         // We spawn a task to run this, so it's exactly like some external API client requesting
    9343              :         // it.  We don't want to block the background reconcile loop on this.
    9344            0 :         let old_shard_count = candidate.id.shard_count.count();
    9345            0 :         info!(
    9346            0 :             "auto-splitting tenant {old_shard_count} → {new_shard_count} shards, \
    9347            0 :                 current size {candidate:?} (split_threshold={split_threshold} \
    9348            0 :                 initial_split_threshold={initial_split_threshold})"
    9349              :         );
    9350              : 
    9351            0 :         let this = self.clone();
    9352            0 :         tokio::spawn(
    9353            0 :             async move {
    9354            0 :                 match this
    9355            0 :                     .tenant_shard_split(
    9356            0 :                         candidate.id.tenant_id,
    9357            0 :                         TenantShardSplitRequest {
    9358            0 :                             new_shard_count,
    9359            0 :                             new_stripe_size,
    9360            0 :                         },
    9361            0 :                     )
    9362            0 :                     .await
    9363              :                 {
    9364              :                     Ok(_) => {
    9365            0 :                         info!("successful auto-split {old_shard_count} → {new_shard_count} shards")
    9366              :                     }
    9367            0 :                     Err(err) => error!("auto-split failed: {err}"),
    9368              :                 }
    9369            0 :             }
    9370            0 :             .instrument(info_span!("auto_split", tenant_id=%candidate.id.tenant_id)),
    9371              :         );
    9372            0 :     }
    9373              : 
    9374              :     /// Returns the number of shards to split a tenant into, or None if the tenant shouldn't split,
    9375              :     /// based on the total logical size of the largest timeline summed across all shards. Uses the
    9376              :     /// larger of size-based and initial splits, clamped to max_split_shards.
    9377              :     ///
    9378              :     /// NB: the thresholds are exclusive, since TopTenantShardsRequest uses where_gt.
    9379           25 :     fn compute_split_shards(inputs: ShardSplitInputs) -> Option<ShardCount> {
    9380              :         let ShardSplitInputs {
    9381           25 :             shard_count,
    9382           25 :             max_logical_size,
    9383           25 :             split_threshold,
    9384           25 :             max_split_shards,
    9385           25 :             initial_split_threshold,
    9386           25 :             initial_split_shards,
    9387           25 :         } = inputs;
    9388              : 
    9389           25 :         let mut new_shard_count: u8 = shard_count.count();
    9390              : 
    9391              :         // Size-based splits. Ensures max_logical_size / new_shard_count <= split_threshold, using
    9392              :         // power-of-two shard counts.
    9393              :         //
    9394              :         // If the current shard count is not a power of two, and does not exceed split_threshold,
    9395              :         // then we leave it alone rather than forcing a power-of-two split.
    9396           25 :         if split_threshold > 0
    9397           18 :             && max_logical_size.div_ceil(split_threshold) > shard_count.count() as u64
    9398           12 :         {
    9399           12 :             new_shard_count = max_logical_size
    9400           12 :                 .div_ceil(split_threshold)
    9401           12 :                 .checked_next_power_of_two()
    9402           12 :                 .unwrap_or(u8::MAX as u64)
    9403           12 :                 .try_into()
    9404           12 :                 .unwrap_or(u8::MAX);
    9405           13 :         }
    9406              : 
    9407              :         // Initial splits. Use the larger of size-based and initial split shard counts. This only
    9408              :         // applies to unsharded tenants, i.e. changes to initial_split_threshold or
    9409              :         // initial_split_shards are not retroactive for sharded tenants.
    9410           25 :         if initial_split_threshold > 0
    9411           14 :             && shard_count.count() <= 1
    9412           11 :             && max_logical_size > initial_split_threshold
    9413            8 :         {
    9414            8 :             new_shard_count = new_shard_count.max(initial_split_shards);
    9415           17 :         }
    9416              : 
    9417              :         // Clamp to max shards.
    9418           25 :         new_shard_count = new_shard_count.min(max_split_shards);
    9419              : 
    9420              :         // Don't split if we're not increasing the shard count.
    9421           25 :         if new_shard_count <= shard_count.count() {
    9422           10 :             return None;
    9423           15 :         }
    9424              : 
    9425           15 :         Some(ShardCount(new_shard_count))
    9426           25 :     }
    9427              : 
    9428              :     /// Fetches the top tenant shards from every available node, in descending order of
    9429              :     /// max logical size. Offline nodes are skipped, and any errors from available nodes
    9430              :     /// will be logged and ignored.
    9431            0 :     async fn get_top_tenant_shards(
    9432            0 :         &self,
    9433            0 :         request: &TopTenantShardsRequest,
    9434            0 :     ) -> Vec<TopTenantShardItem> {
    9435            0 :         let nodes = self
    9436            0 :             .inner
    9437            0 :             .read()
    9438            0 :             .unwrap()
    9439            0 :             .nodes
    9440            0 :             .values()
    9441            0 :             .filter(|node| node.is_available())
    9442            0 :             .cloned()
    9443            0 :             .collect_vec();
    9444              : 
    9445            0 :         let mut futures = FuturesUnordered::new();
    9446            0 :         for node in nodes {
    9447            0 :             futures.push(async move {
    9448            0 :                 node.with_client_retries(
    9449            0 :                     |client| async move { client.top_tenant_shards(request.clone()).await },
    9450            0 :                     &self.http_client,
    9451            0 :                     &self.config.pageserver_jwt_token,
    9452              :                     3,
    9453              :                     3,
    9454            0 :                     Duration::from_secs(5),
    9455            0 :                     &self.cancel,
    9456              :                 )
    9457            0 :                 .await
    9458            0 :             });
    9459              :         }
    9460              : 
    9461            0 :         let mut top = Vec::new();
    9462            0 :         while let Some(output) = futures.next().await {
    9463            0 :             match output {
    9464            0 :                 Some(Ok(response)) => top.extend(response.shards),
    9465            0 :                 Some(Err(mgmt_api::Error::Cancelled)) => {}
    9466            0 :                 Some(Err(err)) => warn!("failed to fetch top tenants: {err}"),
    9467            0 :                 None => {} // node is shutting down
    9468              :             }
    9469              :         }
    9470              : 
    9471            0 :         top.sort_by_key(|i| i.max_logical_size);
    9472            0 :         top.reverse();
    9473            0 :         top
    9474            0 :     }
    9475              : 
    9476              :     /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
    9477              :     /// also wait for any generated Reconcilers to complete.  Calling this until it returns zero should
    9478              :     /// put the system into a quiescent state where future background reconciliations won't do anything.
    9479            0 :     pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
    9480            0 :         let reconcile_all_result = self.reconcile_all();
    9481            0 :         let mut spawned_reconciles = reconcile_all_result.spawned_reconciles;
    9482            0 :         if reconcile_all_result.can_run_optimizations() {
    9483              :             // Only optimize when we are otherwise idle
    9484            0 :             let optimization_reconciles = self.optimize_all().await;
    9485            0 :             spawned_reconciles += optimization_reconciles;
    9486            0 :         }
    9487              : 
    9488            0 :         let waiters = {
    9489            0 :             let mut waiters = Vec::new();
    9490            0 :             let locked = self.inner.read().unwrap();
    9491            0 :             for (_tenant_shard_id, shard) in locked.tenants.iter() {
    9492            0 :                 if let Some(waiter) = shard.get_waiter() {
    9493            0 :                     waiters.push(waiter);
    9494            0 :                 }
    9495              :             }
    9496            0 :             waiters
    9497              :         };
    9498              : 
    9499            0 :         let waiter_count = waiters.len();
    9500            0 :         match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    9501            0 :             Ok(()) => {}
    9502            0 :             Err(e) => {
    9503            0 :                 if let ReconcileWaitError::Failed(_, reconcile_error) = &e {
    9504            0 :                     match **reconcile_error {
    9505              :                         ReconcileError::Cancel
    9506            0 :                         | ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
    9507            0 :                             // Ignore reconciler cancel errors: this reconciler might have shut down
    9508            0 :                             // because some other change superceded it.  We will return a nonzero number,
    9509            0 :                             // so the caller knows they might have to call again to quiesce the system.
    9510            0 :                         }
    9511              :                         _ => {
    9512            0 :                             return Err(e);
    9513              :                         }
    9514              :                     }
    9515              :                 } else {
    9516            0 :                     return Err(e);
    9517              :                 }
    9518              :             }
    9519              :         };
    9520              : 
    9521            0 :         tracing::info!(
    9522            0 :             "{} reconciles in reconcile_all, {} waiters",
    9523              :             spawned_reconciles,
    9524              :             waiter_count
    9525              :         );
    9526              : 
    9527            0 :         Ok(std::cmp::max(waiter_count, spawned_reconciles))
    9528            0 :     }
    9529              : 
    9530            0 :     async fn stop_reconciliations(&self, reason: StopReconciliationsReason) {
    9531              :         // Cancel all on-going reconciles and wait for them to exit the gate.
    9532            0 :         tracing::info!("{reason}: cancelling and waiting for in-flight reconciles");
    9533            0 :         self.reconcilers_cancel.cancel();
    9534            0 :         self.reconcilers_gate.close().await;
    9535              : 
    9536              :         // Signal the background loop in [`Service::process_results`] to exit once
    9537              :         // it has proccessed the results from all the reconciles we cancelled earlier.
    9538            0 :         tracing::info!("{reason}: processing results from previously in-flight reconciles");
    9539            0 :         self.result_tx.send(ReconcileResultRequest::Stop).ok();
    9540            0 :         self.result_tx.closed().await;
    9541            0 :     }
    9542              : 
    9543            0 :     pub async fn shutdown(&self) {
    9544            0 :         self.stop_reconciliations(StopReconciliationsReason::ShuttingDown)
    9545            0 :             .await;
    9546              : 
    9547              :         // Background tasks hold gate guards: this notifies them of the cancellation and
    9548              :         // waits for them all to complete.
    9549            0 :         tracing::info!("Shutting down: cancelling and waiting for background tasks to exit");
    9550            0 :         self.cancel.cancel();
    9551            0 :         self.gate.close().await;
    9552            0 :     }
    9553              : 
    9554              :     /// Spot check the download lag for a secondary location of a shard.
    9555              :     /// Should be used as a heuristic, since it's not always precise: the
    9556              :     /// secondary might have not downloaded the new heat map yet and, hence,
    9557              :     /// is not aware of the lag.
    9558              :     ///
    9559              :     /// Returns:
    9560              :     /// * Ok(None) if the lag could not be determined from the status,
    9561              :     /// * Ok(Some(_)) if the lag could be determind
    9562              :     /// * Err on failures to query the pageserver.
    9563            0 :     async fn secondary_lag(
    9564            0 :         &self,
    9565            0 :         secondary: &NodeId,
    9566            0 :         tenant_shard_id: TenantShardId,
    9567            0 :     ) -> Result<Option<u64>, mgmt_api::Error> {
    9568            0 :         let nodes = self.inner.read().unwrap().nodes.clone();
    9569            0 :         let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError(
    9570            0 :             StatusCode::NOT_FOUND,
    9571            0 :             format!("Node with id {secondary} not found"),
    9572            0 :         ))?;
    9573              : 
    9574            0 :         match node
    9575            0 :             .with_client_retries(
    9576            0 :                 |client| async move { client.tenant_secondary_status(tenant_shard_id).await },
    9577            0 :                 &self.http_client,
    9578            0 :                 &self.config.pageserver_jwt_token,
    9579              :                 1,
    9580              :                 3,
    9581            0 :                 Duration::from_millis(250),
    9582            0 :                 &self.cancel,
    9583              :             )
    9584            0 :             .await
    9585              :         {
    9586            0 :             Some(Ok(status)) => match status.heatmap_mtime {
    9587            0 :                 Some(_) => Ok(Some(status.bytes_total - status.bytes_downloaded)),
    9588            0 :                 None => Ok(None),
    9589              :             },
    9590            0 :             Some(Err(e)) => Err(e),
    9591            0 :             None => Err(mgmt_api::Error::Cancelled),
    9592              :         }
    9593            0 :     }
    9594              : 
    9595              :     /// Drain a node by moving the shards attached to it as primaries.
    9596              :     /// This is a long running operation and it should run as a separate Tokio task.
    9597            0 :     pub(crate) async fn drain_node(
    9598            0 :         self: &Arc<Self>,
    9599            0 :         node_id: NodeId,
    9600            0 :         cancel: CancellationToken,
    9601            0 :     ) -> Result<(), OperationError> {
    9602              :         const MAX_SECONDARY_LAG_BYTES_DEFAULT: u64 = 256 * 1024 * 1024;
    9603            0 :         let max_secondary_lag_bytes = self
    9604            0 :             .config
    9605            0 :             .max_secondary_lag_bytes
    9606            0 :             .unwrap_or(MAX_SECONDARY_LAG_BYTES_DEFAULT);
    9607              : 
    9608              :         // By default, live migrations are generous about the wait time for getting
    9609              :         // the secondary location up to speed. When draining, give up earlier in order
    9610              :         // to not stall the operation when a cold secondary is encountered.
    9611              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(30);
    9612              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    9613            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    9614            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    9615            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    9616            0 :             .build();
    9617              : 
    9618            0 :         let mut waiters = Vec::new();
    9619              : 
    9620            0 :         let mut tid_iter = create_shared_shard_iterator(self.clone());
    9621              : 
    9622            0 :         while !tid_iter.finished() {
    9623            0 :             if cancel.is_cancelled() {
    9624            0 :                 match self
    9625            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9626            0 :                     .await
    9627              :                 {
    9628            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9629            0 :                     Err(err) => {
    9630            0 :                         return Err(OperationError::FinalizeError(
    9631            0 :                             format!(
    9632            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9633            0 :                             )
    9634            0 :                             .into(),
    9635            0 :                         ));
    9636              :                     }
    9637              :                 }
    9638            0 :             }
    9639              : 
    9640            0 :             operation_utils::validate_node_state(
    9641            0 :                 &node_id,
    9642            0 :                 self.inner.read().unwrap().nodes.clone(),
    9643            0 :                 NodeSchedulingPolicy::Draining,
    9644            0 :             )?;
    9645              : 
    9646            0 :             while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    9647            0 :                 let tid = match tid_iter.next() {
    9648            0 :                     Some(tid) => tid,
    9649              :                     None => {
    9650            0 :                         break;
    9651              :                     }
    9652              :                 };
    9653              : 
    9654            0 :                 let tid_drain = TenantShardDrain {
    9655            0 :                     drained_node: node_id,
    9656            0 :                     tenant_shard_id: tid,
    9657            0 :                 };
    9658              : 
    9659            0 :                 let drain_action = {
    9660            0 :                     let locked = self.inner.read().unwrap();
    9661            0 :                     tid_drain.tenant_shard_eligible_for_drain(&locked.tenants, &locked.scheduler)
    9662              :                 };
    9663              : 
    9664            0 :                 let dest_node_id = match drain_action {
    9665            0 :                     TenantShardDrainAction::RescheduleToSecondary(dest_node_id) => dest_node_id,
    9666            0 :                     TenantShardDrainAction::Reconcile(intent_node_id) => intent_node_id,
    9667              :                     TenantShardDrainAction::Skip => {
    9668            0 :                         continue;
    9669              :                     }
    9670              :                 };
    9671              : 
    9672            0 :                 match self.secondary_lag(&dest_node_id, tid).await {
    9673            0 :                     Ok(Some(lag)) if lag <= max_secondary_lag_bytes => {
    9674            0 :                         // The secondary is reasonably up to date.
    9675            0 :                         // Migrate to it
    9676            0 :                     }
    9677            0 :                     Ok(Some(lag)) => {
    9678            0 :                         tracing::info!(
    9679            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9680            0 :                             "Secondary on node {dest_node_id} is lagging by {lag}. Skipping reconcile."
    9681              :                         );
    9682            0 :                         continue;
    9683              :                     }
    9684              :                     Ok(None) => {
    9685            0 :                         tracing::info!(
    9686            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9687            0 :                             "Could not determine lag for secondary on node {dest_node_id}. Skipping reconcile."
    9688              :                         );
    9689            0 :                         continue;
    9690              :                     }
    9691            0 :                     Err(err) => {
    9692            0 :                         tracing::warn!(
    9693            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9694            0 :                             "Failed to get secondary lag from node {dest_node_id}. Skipping reconcile: {err}"
    9695              :                         );
    9696            0 :                         continue;
    9697              :                     }
    9698              :                 }
    9699              : 
    9700              :                 {
    9701            0 :                     let mut locked = self.inner.write().unwrap();
    9702            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    9703              : 
    9704            0 :                     let tenant_shard = match drain_action {
    9705            0 :                         TenantShardDrainAction::RescheduleToSecondary(dest_node_id) => tid_drain
    9706            0 :                             .reschedule_to_secondary(dest_node_id, tenants, scheduler, nodes)?,
    9707            0 :                         TenantShardDrainAction::Reconcile(_) => tenants.get_mut(&tid),
    9708              :                         // Note: Unreachable, handled above.
    9709            0 :                         TenantShardDrainAction::Skip => None,
    9710              :                     };
    9711              : 
    9712            0 :                     if let Some(tenant_shard) = tenant_shard {
    9713            0 :                         let waiter = self.maybe_configured_reconcile_shard(
    9714            0 :                             tenant_shard,
    9715            0 :                             nodes,
    9716            0 :                             reconciler_config,
    9717            0 :                         );
    9718            0 :                         if let Some(some) = waiter {
    9719            0 :                             waiters.push(some);
    9720            0 :                         }
    9721            0 :                     }
    9722              :                 }
    9723              :             }
    9724              : 
    9725            0 :             waiters = self
    9726            0 :                 .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT)
    9727            0 :                 .await;
    9728              : 
    9729            0 :             failpoint_support::sleep_millis_async!("sleepy-drain-loop", &cancel);
    9730              :         }
    9731              : 
    9732            0 :         while !waiters.is_empty() {
    9733            0 :             if cancel.is_cancelled() {
    9734            0 :                 match self
    9735            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9736            0 :                     .await
    9737              :                 {
    9738            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9739            0 :                     Err(err) => {
    9740            0 :                         return Err(OperationError::FinalizeError(
    9741            0 :                             format!(
    9742            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9743            0 :                             )
    9744            0 :                             .into(),
    9745            0 :                         ));
    9746              :                     }
    9747              :                 }
    9748            0 :             }
    9749              : 
    9750            0 :             tracing::info!("Awaiting {} pending drain reconciliations", waiters.len());
    9751              : 
    9752            0 :             waiters = self
    9753            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    9754            0 :                 .await;
    9755              :         }
    9756              : 
    9757              :         // At this point we have done the best we could to drain shards from this node.
    9758              :         // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]`
    9759              :         // to complete the drain.
    9760            0 :         if let Err(err) = self
    9761            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart))
    9762            0 :             .await
    9763              :         {
    9764              :             // This is not fatal. Anything that is polling the node scheduling policy to detect
    9765              :             // the end of the drain operations will hang, but all such places should enforce an
    9766              :             // overall timeout. The scheduling policy will be updated upon node re-attach and/or
    9767              :             // by the counterpart fill operation.
    9768            0 :             return Err(OperationError::FinalizeError(
    9769            0 :                 format!(
    9770            0 :                     "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}"
    9771            0 :                 )
    9772            0 :                 .into(),
    9773            0 :             ));
    9774            0 :         }
    9775              : 
    9776            0 :         Ok(())
    9777            0 :     }
    9778              : 
    9779              :     /// Create a node fill plan (pick secondaries to promote), based on:
    9780              :     /// 1. Shards which have a secondary on this node, and this node is in their home AZ, and are currently attached to a node
    9781              :     ///    outside their home AZ, should be migrated back here.
    9782              :     /// 2. If after step 1 we have not migrated enough shards for this node to have its fair share of
    9783              :     ///    attached shards, we will promote more shards from the nodes with the most attached shards, unless
    9784              :     ///    those shards have a home AZ that doesn't match the node we're filling.
    9785            0 :     fn fill_node_plan(&self, node_id: NodeId) -> Vec<TenantShardId> {
    9786            0 :         let mut locked = self.inner.write().unwrap();
    9787            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    9788              : 
    9789            0 :         let node_az = nodes
    9790            0 :             .get(&node_id)
    9791            0 :             .expect("Node must exist")
    9792            0 :             .get_availability_zone_id()
    9793            0 :             .clone();
    9794              : 
    9795              :         // The tenant shard IDs that we plan to promote from secondary to attached on this node
    9796            0 :         let mut plan = Vec::new();
    9797              : 
    9798              :         // Collect shards which do not have a preferred AZ & are elegible for moving in stage 2
    9799            0 :         let mut free_tids_by_node: HashMap<NodeId, Vec<TenantShardId>> = HashMap::new();
    9800              : 
    9801              :         // Don't respect AZ preferences if there is only one AZ.  This comes up in tests, but it could
    9802              :         // conceivably come up in real life if deploying a single-AZ region intentionally.
    9803            0 :         let respect_azs = nodes
    9804            0 :             .values()
    9805            0 :             .map(|n| n.get_availability_zone_id())
    9806            0 :             .unique()
    9807            0 :             .count()
    9808              :             > 1;
    9809              : 
    9810              :         // Step 1: collect all shards that we are required to migrate back to this node because their AZ preference
    9811              :         // requires it.
    9812            0 :         for (tsid, tenant_shard) in tenants {
    9813            0 :             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    9814              :                 // Shard doesn't have a secondary on this node, ignore it.
    9815            0 :                 continue;
    9816            0 :             }
    9817              : 
    9818              :             // AZ check: when filling nodes after a restart, our intent is to move _back_ the
    9819              :             // shards which belong on this node, not to promote shards whose scheduling preference
    9820              :             // would be on their currently attached node.  So will avoid promoting shards whose
    9821              :             // home AZ doesn't match the AZ of the node we're filling.
    9822            0 :             match tenant_shard.preferred_az() {
    9823            0 :                 _ if !respect_azs => {
    9824            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    9825            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    9826            0 :                     }
    9827              :                 }
    9828              :                 None => {
    9829              :                     // Shard doesn't have an AZ preference: it is elegible to be moved, but we
    9830              :                     // will only do so if our target shard count requires it.
    9831            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    9832            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    9833            0 :                     }
    9834              :                 }
    9835            0 :                 Some(az) if az == &node_az => {
    9836              :                     // This shard's home AZ is equal to the node we're filling: it should
    9837              :                     // be moved back to this node as part of filling, unless its currently
    9838              :                     // attached location is also in its home AZ.
    9839            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    9840            0 :                         if nodes
    9841            0 :                             .get(primary)
    9842            0 :                             .expect("referenced node must exist")
    9843            0 :                             .get_availability_zone_id()
    9844            0 :                             != tenant_shard
    9845            0 :                                 .preferred_az()
    9846            0 :                                 .expect("tenant must have an AZ preference")
    9847              :                         {
    9848            0 :                             plan.push(*tsid)
    9849            0 :                         }
    9850              :                     } else {
    9851            0 :                         plan.push(*tsid)
    9852              :                     }
    9853              :                 }
    9854            0 :                 Some(_) => {
    9855            0 :                     // This shard's home AZ is somewhere other than the node we're filling,
    9856            0 :                     // it may not be moved back to this node as part of filling.  Ignore it
    9857            0 :                 }
    9858              :             }
    9859              :         }
    9860              : 
    9861              :         // Step 2: also promote any AZ-agnostic shards as required to achieve the target number of attachments
    9862            0 :         let fill_requirement = locked.scheduler.compute_fill_requirement(node_id);
    9863              : 
    9864            0 :         let expected_attached = locked.scheduler.expected_attached_shard_count();
    9865            0 :         let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count();
    9866              : 
    9867            0 :         let mut promoted_per_tenant: HashMap<TenantId, usize> = HashMap::new();
    9868              : 
    9869            0 :         for (node_id, attached) in nodes_by_load {
    9870            0 :             let available = locked.nodes.get(&node_id).is_some_and(|n| n.is_available());
    9871            0 :             if !available {
    9872            0 :                 continue;
    9873            0 :             }
    9874              : 
    9875            0 :             if plan.len() >= fill_requirement
    9876            0 :                 || free_tids_by_node.is_empty()
    9877            0 :                 || attached <= expected_attached
    9878              :             {
    9879            0 :                 break;
    9880            0 :             }
    9881              : 
    9882            0 :             let can_take = attached - expected_attached;
    9883            0 :             let needed = fill_requirement - plan.len();
    9884            0 :             let mut take = std::cmp::min(can_take, needed);
    9885              : 
    9886            0 :             let mut remove_node = false;
    9887            0 :             while take > 0 {
    9888            0 :                 match free_tids_by_node.get_mut(&node_id) {
    9889            0 :                     Some(tids) => match tids.pop() {
    9890            0 :                         Some(tid) => {
    9891            0 :                             let max_promote_for_tenant = std::cmp::max(
    9892            0 :                                 tid.shard_count.count() as usize / locked.nodes.len(),
    9893              :                                 1,
    9894              :                             );
    9895            0 :                             let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default();
    9896            0 :                             if *promoted < max_promote_for_tenant {
    9897            0 :                                 plan.push(tid);
    9898            0 :                                 *promoted += 1;
    9899            0 :                                 take -= 1;
    9900            0 :                             }
    9901              :                         }
    9902              :                         None => {
    9903            0 :                             remove_node = true;
    9904            0 :                             break;
    9905              :                         }
    9906              :                     },
    9907              :                     None => {
    9908            0 :                         break;
    9909              :                     }
    9910              :                 }
    9911              :             }
    9912              : 
    9913            0 :             if remove_node {
    9914            0 :                 free_tids_by_node.remove(&node_id);
    9915            0 :             }
    9916              :         }
    9917              : 
    9918            0 :         plan
    9919            0 :     }
    9920              : 
    9921              :     /// Fill a node by promoting its secondaries until the cluster is balanced
    9922              :     /// with regards to attached shard counts. Note that this operation only
    9923              :     /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`].
    9924              :     /// This is a long running operation and it should run as a separate Tokio task.
    9925            0 :     pub(crate) async fn fill_node(
    9926            0 :         &self,
    9927            0 :         node_id: NodeId,
    9928            0 :         cancel: CancellationToken,
    9929            0 :     ) -> Result<(), OperationError> {
    9930              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(30);
    9931              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    9932            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    9933            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    9934            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    9935            0 :             .build();
    9936              : 
    9937            0 :         let mut tids_to_promote = self.fill_node_plan(node_id);
    9938            0 :         let mut waiters = Vec::new();
    9939              : 
    9940              :         // Execute the plan we've composed above. Before aplying each move from the plan,
    9941              :         // we validate to ensure that it has not gone stale in the meantime.
    9942            0 :         while !tids_to_promote.is_empty() {
    9943            0 :             if cancel.is_cancelled() {
    9944            0 :                 match self
    9945            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9946            0 :                     .await
    9947              :                 {
    9948            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9949            0 :                     Err(err) => {
    9950            0 :                         return Err(OperationError::FinalizeError(
    9951            0 :                             format!(
    9952            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9953            0 :                             )
    9954            0 :                             .into(),
    9955            0 :                         ));
    9956              :                     }
    9957              :                 }
    9958            0 :             }
    9959              : 
    9960              :             {
    9961            0 :                 let mut locked = self.inner.write().unwrap();
    9962            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    9963              : 
    9964            0 :                 let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
    9965            0 :                     format!("node {node_id} was removed").into(),
    9966            0 :                 ))?;
    9967              : 
    9968            0 :                 let current_policy = node.get_scheduling();
    9969            0 :                 if !matches!(current_policy, NodeSchedulingPolicy::Filling) {
    9970              :                     // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
    9971              :                     // about it
    9972            0 :                     return Err(OperationError::NodeStateChanged(
    9973            0 :                         format!("node {node_id} changed state to {current_policy:?}").into(),
    9974            0 :                     ));
    9975            0 :                 }
    9976              : 
    9977            0 :                 while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    9978            0 :                     if let Some(tid) = tids_to_promote.pop() {
    9979            0 :                         if let Some(tenant_shard) = tenants.get_mut(&tid) {
    9980              :                             // If the node being filled is not a secondary anymore,
    9981              :                             // skip the promotion.
    9982            0 :                             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    9983            0 :                                 continue;
    9984            0 :                             }
    9985              : 
    9986            0 :                             let previously_attached_to = *tenant_shard.intent.get_attached();
    9987            0 :                             match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) {
    9988            0 :                                 Err(e) => {
    9989            0 :                                     tracing::warn!(
    9990            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9991            0 :                                         "Scheduling error when filling pageserver {} : {e}", node_id
    9992              :                                     );
    9993              :                                 }
    9994              :                                 Ok(()) => {
    9995            0 :                                     tracing::info!(
    9996            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9997            0 :                                         "Rescheduled shard while filling node {}: {:?} -> {}",
    9998              :                                         node_id,
    9999              :                                         previously_attached_to,
   10000              :                                         node_id
   10001              :                                     );
   10002              : 
   10003            0 :                                     if let Some(waiter) = self.maybe_configured_reconcile_shard(
   10004            0 :                                         tenant_shard,
   10005            0 :                                         nodes,
   10006            0 :                                         reconciler_config,
   10007            0 :                                     ) {
   10008            0 :                                         waiters.push(waiter);
   10009            0 :                                     }
   10010              :                                 }
   10011              :                             }
   10012            0 :                         }
   10013              :                     } else {
   10014            0 :                         break;
   10015              :                     }
   10016              :                 }
   10017              :             }
   10018              : 
   10019            0 :             waiters = self
   10020            0 :                 .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT)
   10021            0 :                 .await;
   10022              :         }
   10023              : 
   10024            0 :         while !waiters.is_empty() {
   10025            0 :             if cancel.is_cancelled() {
   10026            0 :                 match self
   10027            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
   10028            0 :                     .await
   10029              :                 {
   10030            0 :                     Ok(()) => return Err(OperationError::Cancelled),
   10031            0 :                     Err(err) => {
   10032            0 :                         return Err(OperationError::FinalizeError(
   10033            0 :                             format!(
   10034            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
   10035            0 :                             )
   10036            0 :                             .into(),
   10037            0 :                         ));
   10038              :                     }
   10039              :                 }
   10040            0 :             }
   10041              : 
   10042            0 :             tracing::info!("Awaiting {} pending fill reconciliations", waiters.len());
   10043              : 
   10044            0 :             waiters = self
   10045            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
   10046            0 :                 .await;
   10047              :         }
   10048              : 
   10049            0 :         if let Err(err) = self
   10050            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
   10051            0 :             .await
   10052              :         {
   10053              :             // This isn't a huge issue since the filling process starts upon request. However, it
   10054              :             // will prevent the next drain from starting. The only case in which this can fail
   10055              :             // is database unavailability. Such a case will require manual intervention.
   10056            0 :             return Err(OperationError::FinalizeError(
   10057            0 :                 format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}")
   10058            0 :                     .into(),
   10059            0 :             ));
   10060            0 :         }
   10061              : 
   10062            0 :         Ok(())
   10063            0 :     }
   10064              : 
   10065              :     /// Updates scrubber metadata health check results.
   10066            0 :     pub(crate) async fn metadata_health_update(
   10067            0 :         &self,
   10068            0 :         update_req: MetadataHealthUpdateRequest,
   10069            0 :     ) -> Result<(), ApiError> {
   10070            0 :         let now = chrono::offset::Utc::now();
   10071            0 :         let (healthy_records, unhealthy_records) = {
   10072            0 :             let locked = self.inner.read().unwrap();
   10073            0 :             let healthy_records = update_req
   10074            0 :                 .healthy_tenant_shards
   10075            0 :                 .into_iter()
   10076              :                 // Retain only health records associated with tenant shards managed by storage controller.
   10077            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
   10078            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, true, now))
   10079            0 :                 .collect();
   10080            0 :             let unhealthy_records = update_req
   10081            0 :                 .unhealthy_tenant_shards
   10082            0 :                 .into_iter()
   10083            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
   10084            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, false, now))
   10085            0 :                 .collect();
   10086              : 
   10087            0 :             (healthy_records, unhealthy_records)
   10088              :         };
   10089              : 
   10090            0 :         self.persistence
   10091            0 :             .update_metadata_health_records(healthy_records, unhealthy_records, now)
   10092            0 :             .await?;
   10093            0 :         Ok(())
   10094            0 :     }
   10095              : 
   10096              :     /// Lists the tenant shards that has unhealthy metadata status.
   10097            0 :     pub(crate) async fn metadata_health_list_unhealthy(
   10098            0 :         &self,
   10099            0 :     ) -> Result<Vec<TenantShardId>, ApiError> {
   10100            0 :         let result = self
   10101            0 :             .persistence
   10102            0 :             .list_unhealthy_metadata_health_records()
   10103            0 :             .await?
   10104            0 :             .iter()
   10105            0 :             .map(|p| p.get_tenant_shard_id().unwrap())
   10106            0 :             .collect();
   10107              : 
   10108            0 :         Ok(result)
   10109            0 :     }
   10110              : 
   10111              :     /// Lists the tenant shards that have not been scrubbed for some duration.
   10112            0 :     pub(crate) async fn metadata_health_list_outdated(
   10113            0 :         &self,
   10114            0 :         not_scrubbed_for: Duration,
   10115            0 :     ) -> Result<Vec<MetadataHealthRecord>, ApiError> {
   10116            0 :         let earlier = chrono::offset::Utc::now() - not_scrubbed_for;
   10117            0 :         let result = self
   10118            0 :             .persistence
   10119            0 :             .list_outdated_metadata_health_records(earlier)
   10120            0 :             .await?
   10121            0 :             .into_iter()
   10122            0 :             .map(|record| record.into())
   10123            0 :             .collect();
   10124            0 :         Ok(result)
   10125            0 :     }
   10126              : 
   10127            0 :     pub(crate) fn get_leadership_status(&self) -> LeadershipStatus {
   10128            0 :         self.inner.read().unwrap().get_leadership_status()
   10129            0 :     }
   10130              : 
   10131              :     /// Handler for step down requests
   10132              :     ///
   10133              :     /// Step down runs in separate task since once it's called it should
   10134              :     /// be driven to completion. Subsequent requests will wait on the same
   10135              :     /// step down task.
   10136            0 :     pub(crate) async fn step_down(self: &Arc<Self>) -> GlobalObservedState {
   10137            0 :         let handle = self.step_down_barrier.get_or_init(|| {
   10138            0 :             let step_down_self = self.clone();
   10139            0 :             let (tx, rx) = tokio::sync::watch::channel::<Option<GlobalObservedState>>(None);
   10140            0 :             tokio::spawn(async move {
   10141            0 :                 let state = step_down_self.step_down_task().await;
   10142            0 :                 tx.send(Some(state))
   10143            0 :                     .expect("Task Arc<Service> keeps receiver alive");
   10144            0 :             });
   10145              : 
   10146            0 :             rx
   10147            0 :         });
   10148              : 
   10149            0 :         handle
   10150            0 :             .clone()
   10151            0 :             .wait_for(|observed_state| observed_state.is_some())
   10152            0 :             .await
   10153            0 :             .expect("Task Arc<Service> keeps sender alive")
   10154            0 :             .deref()
   10155            0 :             .clone()
   10156            0 :             .expect("Checked above")
   10157            0 :     }
   10158              : 
   10159            0 :     async fn step_down_task(&self) -> GlobalObservedState {
   10160            0 :         tracing::info!("Received step down request from peer");
   10161            0 :         failpoint_support::sleep_millis_async!("sleep-on-step-down-handling");
   10162              : 
   10163            0 :         self.inner.write().unwrap().step_down();
   10164              : 
   10165            0 :         let stop_reconciliations =
   10166            0 :             self.stop_reconciliations(StopReconciliationsReason::SteppingDown);
   10167            0 :         let mut stop_reconciliations = std::pin::pin!(stop_reconciliations);
   10168              : 
   10169            0 :         let started_at = Instant::now();
   10170              : 
   10171              :         // Wait for reconciliations to stop and warn if that's taking a long time
   10172              :         loop {
   10173            0 :             tokio::select! {
   10174            0 :                 _ = &mut stop_reconciliations => {
   10175            0 :                     tracing::info!("Reconciliations stopped, proceeding with step down");
   10176            0 :                     break;
   10177              :                 }
   10178            0 :                 _ = tokio::time::sleep(Duration::from_secs(10)) => {
   10179            0 :                     tracing::warn!(
   10180            0 :                         elapsed_sec=%started_at.elapsed().as_secs(),
   10181            0 :                         "Stopping reconciliations during step down is taking too long"
   10182              :                     );
   10183              :                 }
   10184              :             }
   10185              :         }
   10186              : 
   10187            0 :         let mut global_observed = GlobalObservedState::default();
   10188            0 :         let locked = self.inner.read().unwrap();
   10189            0 :         for (tid, tenant_shard) in locked.tenants.iter() {
   10190            0 :             global_observed
   10191            0 :                 .0
   10192            0 :                 .insert(*tid, tenant_shard.observed.clone());
   10193            0 :         }
   10194              : 
   10195            0 :         global_observed
   10196            0 :     }
   10197              : 
   10198            0 :     pub(crate) async fn update_shards_preferred_azs(
   10199            0 :         &self,
   10200            0 :         req: ShardsPreferredAzsRequest,
   10201            0 :     ) -> Result<ShardsPreferredAzsResponse, ApiError> {
   10202            0 :         let preferred_azs = req.preferred_az_ids.into_iter().collect::<Vec<_>>();
   10203            0 :         let updated = self
   10204            0 :             .persistence
   10205            0 :             .set_tenant_shard_preferred_azs(preferred_azs)
   10206            0 :             .await
   10207            0 :             .map_err(|err| {
   10208            0 :                 ApiError::InternalServerError(anyhow::anyhow!(
   10209            0 :                     "Failed to persist preferred AZs: {err}"
   10210            0 :                 ))
   10211            0 :             })?;
   10212              : 
   10213            0 :         let mut updated_in_mem_and_db = Vec::default();
   10214              : 
   10215            0 :         let mut locked = self.inner.write().unwrap();
   10216            0 :         let state = locked.deref_mut();
   10217            0 :         for (tid, az_id) in updated {
   10218            0 :             let shard = state.tenants.get_mut(&tid);
   10219            0 :             if let Some(shard) = shard {
   10220            0 :                 shard.set_preferred_az(&mut state.scheduler, az_id);
   10221            0 :                 updated_in_mem_and_db.push(tid);
   10222            0 :             }
   10223              :         }
   10224              : 
   10225            0 :         Ok(ShardsPreferredAzsResponse {
   10226            0 :             updated: updated_in_mem_and_db,
   10227            0 :         })
   10228            0 :     }
   10229              : }
   10230              : 
   10231              : #[cfg(test)]
   10232              : mod tests {
   10233              :     use super::*;
   10234              : 
   10235              :     /// Tests Service::compute_split_shards. For readability, this specifies sizes in GBs rather
   10236              :     /// than bytes. Note that max_logical_size is the total logical size of the largest timeline
   10237              :     /// summed across all shards.
   10238              :     #[test]
   10239            1 :     fn compute_split_shards() {
   10240              :         // Size-based split: two shards have a 500 GB timeline, which need to split into 8 shards
   10241              :         // that are <= 64 GB,
   10242            1 :         assert_eq!(
   10243            1 :             Service::compute_split_shards(ShardSplitInputs {
   10244            1 :                 shard_count: ShardCount(2),
   10245            1 :                 max_logical_size: 500,
   10246            1 :                 split_threshold: 64,
   10247            1 :                 max_split_shards: 16,
   10248            1 :                 initial_split_threshold: 0,
   10249            1 :                 initial_split_shards: 0,
   10250            1 :             }),
   10251              :             Some(ShardCount(8))
   10252              :         );
   10253              : 
   10254              :         // Size-based split: noop at or below threshold, fires above.
   10255            1 :         assert_eq!(
   10256            1 :             Service::compute_split_shards(ShardSplitInputs {
   10257            1 :                 shard_count: ShardCount(2),
   10258            1 :                 max_logical_size: 127,
   10259            1 :                 split_threshold: 64,
   10260            1 :                 max_split_shards: 16,
   10261            1 :                 initial_split_threshold: 0,
   10262            1 :                 initial_split_shards: 0,
   10263            1 :             }),
   10264              :             None,
   10265              :         );
   10266            1 :         assert_eq!(
   10267            1 :             Service::compute_split_shards(ShardSplitInputs {
   10268            1 :                 shard_count: ShardCount(2),
   10269            1 :                 max_logical_size: 128,
   10270            1 :                 split_threshold: 64,
   10271            1 :                 max_split_shards: 16,
   10272            1 :                 initial_split_threshold: 0,
   10273            1 :                 initial_split_shards: 0,
   10274            1 :             }),
   10275              :             None,
   10276              :         );
   10277            1 :         assert_eq!(
   10278            1 :             Service::compute_split_shards(ShardSplitInputs {
   10279            1 :                 shard_count: ShardCount(2),
   10280            1 :                 max_logical_size: 129,
   10281            1 :                 split_threshold: 64,
   10282            1 :                 max_split_shards: 16,
   10283            1 :                 initial_split_threshold: 0,
   10284            1 :                 initial_split_shards: 0,
   10285            1 :             }),
   10286              :             Some(ShardCount(4)),
   10287              :         );
   10288              : 
   10289              :         // Size-based split: clamped to max_split_shards.
   10290            1 :         assert_eq!(
   10291            1 :             Service::compute_split_shards(ShardSplitInputs {
   10292            1 :                 shard_count: ShardCount(2),
   10293            1 :                 max_logical_size: 10000,
   10294            1 :                 split_threshold: 64,
   10295            1 :                 max_split_shards: 16,
   10296            1 :                 initial_split_threshold: 0,
   10297            1 :                 initial_split_shards: 0,
   10298            1 :             }),
   10299              :             Some(ShardCount(16))
   10300              :         );
   10301              : 
   10302              :         // Size-based split: tenant already at or beyond max_split_shards is not split.
   10303            1 :         assert_eq!(
   10304            1 :             Service::compute_split_shards(ShardSplitInputs {
   10305            1 :                 shard_count: ShardCount(16),
   10306            1 :                 max_logical_size: 10000,
   10307            1 :                 split_threshold: 64,
   10308            1 :                 max_split_shards: 16,
   10309            1 :                 initial_split_threshold: 0,
   10310            1 :                 initial_split_shards: 0,
   10311            1 :             }),
   10312              :             None
   10313              :         );
   10314              : 
   10315            1 :         assert_eq!(
   10316            1 :             Service::compute_split_shards(ShardSplitInputs {
   10317            1 :                 shard_count: ShardCount(32),
   10318            1 :                 max_logical_size: 10000,
   10319            1 :                 split_threshold: 64,
   10320            1 :                 max_split_shards: 16,
   10321            1 :                 initial_split_threshold: 0,
   10322            1 :                 initial_split_shards: 0,
   10323            1 :             }),
   10324              :             None
   10325              :         );
   10326              : 
   10327              :         // Size-based split: a non-power-of-2 shard count is normalized to power-of-2 if it
   10328              :         // exceeds split_threshold (i.e. a 3-shard tenant splits into 8, not 6).
   10329            1 :         assert_eq!(
   10330            1 :             Service::compute_split_shards(ShardSplitInputs {
   10331            1 :                 shard_count: ShardCount(3),
   10332            1 :                 max_logical_size: 320,
   10333            1 :                 split_threshold: 64,
   10334            1 :                 max_split_shards: 16,
   10335            1 :                 initial_split_threshold: 0,
   10336            1 :                 initial_split_shards: 0,
   10337            1 :             }),
   10338              :             Some(ShardCount(8))
   10339              :         );
   10340              : 
   10341              :         // Size-based split: a non-power-of-2 shard count is not normalized to power-of-2 if the
   10342              :         // existing shards are below or at split_threshold, but splits into 4 if it exceeds it.
   10343            1 :         assert_eq!(
   10344            1 :             Service::compute_split_shards(ShardSplitInputs {
   10345            1 :                 shard_count: ShardCount(3),
   10346            1 :                 max_logical_size: 191,
   10347            1 :                 split_threshold: 64,
   10348            1 :                 max_split_shards: 16,
   10349            1 :                 initial_split_threshold: 0,
   10350            1 :                 initial_split_shards: 0,
   10351            1 :             }),
   10352              :             None
   10353              :         );
   10354            1 :         assert_eq!(
   10355            1 :             Service::compute_split_shards(ShardSplitInputs {
   10356            1 :                 shard_count: ShardCount(3),
   10357            1 :                 max_logical_size: 192,
   10358            1 :                 split_threshold: 64,
   10359            1 :                 max_split_shards: 16,
   10360            1 :                 initial_split_threshold: 0,
   10361            1 :                 initial_split_shards: 0,
   10362            1 :             }),
   10363              :             None
   10364              :         );
   10365            1 :         assert_eq!(
   10366            1 :             Service::compute_split_shards(ShardSplitInputs {
   10367            1 :                 shard_count: ShardCount(3),
   10368            1 :                 max_logical_size: 193,
   10369            1 :                 split_threshold: 64,
   10370            1 :                 max_split_shards: 16,
   10371            1 :                 initial_split_threshold: 0,
   10372            1 :                 initial_split_shards: 0,
   10373            1 :             }),
   10374              :             Some(ShardCount(4))
   10375              :         );
   10376              : 
   10377              :         // Initial split: tenant has a 10 GB timeline, split into 4 shards.
   10378            1 :         assert_eq!(
   10379            1 :             Service::compute_split_shards(ShardSplitInputs {
   10380            1 :                 shard_count: ShardCount(1),
   10381            1 :                 max_logical_size: 10,
   10382            1 :                 split_threshold: 0,
   10383            1 :                 max_split_shards: 16,
   10384            1 :                 initial_split_threshold: 8,
   10385            1 :                 initial_split_shards: 4,
   10386            1 :             }),
   10387              :             Some(ShardCount(4))
   10388              :         );
   10389              : 
   10390              :         // Initial split: 0 ShardCount is equivalent to 1.
   10391            1 :         assert_eq!(
   10392            1 :             Service::compute_split_shards(ShardSplitInputs {
   10393            1 :                 shard_count: ShardCount(0),
   10394            1 :                 max_logical_size: 10,
   10395            1 :                 split_threshold: 0,
   10396            1 :                 max_split_shards: 16,
   10397            1 :                 initial_split_threshold: 8,
   10398            1 :                 initial_split_shards: 4,
   10399            1 :             }),
   10400              :             Some(ShardCount(4))
   10401              :         );
   10402              : 
   10403              :         // Initial split: at or below threshold is noop.
   10404            1 :         assert_eq!(
   10405            1 :             Service::compute_split_shards(ShardSplitInputs {
   10406            1 :                 shard_count: ShardCount(1),
   10407            1 :                 max_logical_size: 7,
   10408            1 :                 split_threshold: 0,
   10409            1 :                 max_split_shards: 16,
   10410            1 :                 initial_split_threshold: 8,
   10411            1 :                 initial_split_shards: 4,
   10412            1 :             }),
   10413              :             None,
   10414              :         );
   10415            1 :         assert_eq!(
   10416            1 :             Service::compute_split_shards(ShardSplitInputs {
   10417            1 :                 shard_count: ShardCount(1),
   10418            1 :                 max_logical_size: 8,
   10419            1 :                 split_threshold: 0,
   10420            1 :                 max_split_shards: 16,
   10421            1 :                 initial_split_threshold: 8,
   10422            1 :                 initial_split_shards: 4,
   10423            1 :             }),
   10424              :             None,
   10425              :         );
   10426            1 :         assert_eq!(
   10427            1 :             Service::compute_split_shards(ShardSplitInputs {
   10428            1 :                 shard_count: ShardCount(1),
   10429            1 :                 max_logical_size: 9,
   10430            1 :                 split_threshold: 0,
   10431            1 :                 max_split_shards: 16,
   10432            1 :                 initial_split_threshold: 8,
   10433            1 :                 initial_split_shards: 4,
   10434            1 :             }),
   10435              :             Some(ShardCount(4))
   10436              :         );
   10437              : 
   10438              :         // Initial split: already sharded tenant is not affected, even if above threshold and below
   10439              :         // shard count.
   10440            1 :         assert_eq!(
   10441            1 :             Service::compute_split_shards(ShardSplitInputs {
   10442            1 :                 shard_count: ShardCount(2),
   10443            1 :                 max_logical_size: 20,
   10444            1 :                 split_threshold: 0,
   10445            1 :                 max_split_shards: 16,
   10446            1 :                 initial_split_threshold: 8,
   10447            1 :                 initial_split_shards: 4,
   10448            1 :             }),
   10449              :             None,
   10450              :         );
   10451              : 
   10452              :         // Initial split: clamped to max_shards.
   10453            1 :         assert_eq!(
   10454            1 :             Service::compute_split_shards(ShardSplitInputs {
   10455            1 :                 shard_count: ShardCount(1),
   10456            1 :                 max_logical_size: 10,
   10457            1 :                 split_threshold: 0,
   10458            1 :                 max_split_shards: 3,
   10459            1 :                 initial_split_threshold: 8,
   10460            1 :                 initial_split_shards: 4,
   10461            1 :             }),
   10462              :             Some(ShardCount(3)),
   10463              :         );
   10464              : 
   10465              :         // Initial+size split: tenant eligible for both will use the larger shard count.
   10466            1 :         assert_eq!(
   10467            1 :             Service::compute_split_shards(ShardSplitInputs {
   10468            1 :                 shard_count: ShardCount(1),
   10469            1 :                 max_logical_size: 10,
   10470            1 :                 split_threshold: 64,
   10471            1 :                 max_split_shards: 16,
   10472            1 :                 initial_split_threshold: 8,
   10473            1 :                 initial_split_shards: 4,
   10474            1 :             }),
   10475              :             Some(ShardCount(4)),
   10476              :         );
   10477            1 :         assert_eq!(
   10478            1 :             Service::compute_split_shards(ShardSplitInputs {
   10479            1 :                 shard_count: ShardCount(1),
   10480            1 :                 max_logical_size: 500,
   10481            1 :                 split_threshold: 64,
   10482            1 :                 max_split_shards: 16,
   10483            1 :                 initial_split_threshold: 8,
   10484            1 :                 initial_split_shards: 4,
   10485            1 :             }),
   10486              :             Some(ShardCount(8)),
   10487              :         );
   10488              : 
   10489              :         // Initial+size split: sharded tenant is only eligible for size-based split.
   10490            1 :         assert_eq!(
   10491            1 :             Service::compute_split_shards(ShardSplitInputs {
   10492            1 :                 shard_count: ShardCount(2),
   10493            1 :                 max_logical_size: 200,
   10494            1 :                 split_threshold: 64,
   10495            1 :                 max_split_shards: 16,
   10496            1 :                 initial_split_threshold: 8,
   10497            1 :                 initial_split_shards: 8,
   10498            1 :             }),
   10499              :             Some(ShardCount(4)),
   10500              :         );
   10501              : 
   10502              :         // Initial+size split: uses the larger shard count even with initial_split_threshold above
   10503              :         // split_threshold.
   10504            1 :         assert_eq!(
   10505            1 :             Service::compute_split_shards(ShardSplitInputs {
   10506            1 :                 shard_count: ShardCount(1),
   10507            1 :                 max_logical_size: 10,
   10508            1 :                 split_threshold: 4,
   10509            1 :                 max_split_shards: 16,
   10510            1 :                 initial_split_threshold: 8,
   10511            1 :                 initial_split_shards: 8,
   10512            1 :             }),
   10513              :             Some(ShardCount(8)),
   10514              :         );
   10515              : 
   10516              :         // Test backwards compatibility with production settings when initial/size-based splits were
   10517              :         // rolled out: a single split into 8 shards at 64 GB. Any already sharded tenants with <8
   10518              :         // shards will split according to split_threshold.
   10519            1 :         assert_eq!(
   10520            1 :             Service::compute_split_shards(ShardSplitInputs {
   10521            1 :                 shard_count: ShardCount(1),
   10522            1 :                 max_logical_size: 65,
   10523            1 :                 split_threshold: 64,
   10524            1 :                 max_split_shards: 8,
   10525            1 :                 initial_split_threshold: 64,
   10526            1 :                 initial_split_shards: 8,
   10527            1 :             }),
   10528              :             Some(ShardCount(8)),
   10529              :         );
   10530              : 
   10531            1 :         assert_eq!(
   10532            1 :             Service::compute_split_shards(ShardSplitInputs {
   10533            1 :                 shard_count: ShardCount(1),
   10534            1 :                 max_logical_size: 64,
   10535            1 :                 split_threshold: 64,
   10536            1 :                 max_split_shards: 8,
   10537            1 :                 initial_split_threshold: 64,
   10538            1 :                 initial_split_shards: 8,
   10539            1 :             }),
   10540              :             None,
   10541              :         );
   10542              : 
   10543            1 :         assert_eq!(
   10544            1 :             Service::compute_split_shards(ShardSplitInputs {
   10545            1 :                 shard_count: ShardCount(2),
   10546            1 :                 max_logical_size: 129,
   10547            1 :                 split_threshold: 64,
   10548            1 :                 max_split_shards: 8,
   10549            1 :                 initial_split_threshold: 64,
   10550            1 :                 initial_split_shards: 8,
   10551            1 :             }),
   10552              :             Some(ShardCount(4)),
   10553              :         );
   10554            1 :     }
   10555              : }
        

Generated by: LCOV version 2.1-beta