LCOV - code coverage report
Current view: top level - storage_controller/src - service.rs (source / functions) Coverage Total Hit
Test: 17080b14f46954d6812ea0a7dad4b2247e0840a8.info Lines: 4.6 % 5662 258
Test Date: 2025-07-08 18:30:10 Functions: 0.4 % 500 2

            Line data    Source code
       1              : pub mod chaos_injector;
       2              : pub mod feature_flag;
       3              : pub(crate) mod safekeeper_reconciler;
       4              : mod safekeeper_service;
       5              : mod tenant_shard_iterator;
       6              : 
       7              : use std::borrow::Cow;
       8              : use std::cmp::Ordering;
       9              : use std::collections::{BTreeMap, HashMap, HashSet};
      10              : use std::error::Error;
      11              : use std::num::NonZeroU32;
      12              : use std::ops::{Deref, DerefMut};
      13              : use std::path::PathBuf;
      14              : use std::str::FromStr;
      15              : use std::sync::{Arc, OnceLock};
      16              : use std::time::{Duration, Instant, SystemTime};
      17              : 
      18              : use anyhow::Context;
      19              : use control_plane::storage_controller::{
      20              :     AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
      21              : };
      22              : use diesel::result::DatabaseErrorKind;
      23              : use futures::StreamExt;
      24              : use futures::stream::FuturesUnordered;
      25              : use http_utils::error::ApiError;
      26              : use hyper::Uri;
      27              : use itertools::Itertools;
      28              : use pageserver_api::config::PostHogConfig;
      29              : use pageserver_api::controller_api::{
      30              :     AvailabilityZone, MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability,
      31              :     NodeRegisterRequest, NodeSchedulingPolicy, NodeShard, NodeShardResponse, PlacementPolicy,
      32              :     ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse,
      33              :     SkSchedulingPolicy, TenantCreateRequest, TenantCreateResponse, TenantCreateResponseShard,
      34              :     TenantDescribeResponse, TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
      35              :     TenantShardMigrateRequest, TenantShardMigrateResponse,
      36              : };
      37              : use pageserver_api::models::{
      38              :     self, DetachBehavior, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease,
      39              :     PageserverUtilization, SecondaryProgress, ShardImportStatus, ShardParameters, TenantConfig,
      40              :     TenantConfigPatchRequest, TenantConfigRequest, TenantLocationConfigRequest,
      41              :     TenantLocationConfigResponse, TenantShardLocation, TenantShardSplitRequest,
      42              :     TenantShardSplitResponse, TenantSorting, TenantTimeTravelRequest,
      43              :     TimelineArchivalConfigRequest, TimelineCreateRequest, TimelineCreateResponseStorcon,
      44              :     TimelineInfo, TopTenantShardItem, TopTenantShardsRequest,
      45              : };
      46              : use pageserver_api::shard::{
      47              :     DEFAULT_STRIPE_SIZE, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
      48              : };
      49              : use pageserver_api::upcall_api::{
      50              :     PutTimelineImportStatusRequest, ReAttachRequest, ReAttachResponse, ReAttachResponseTenant,
      51              :     TimelineImportStatusRequest, ValidateRequest, ValidateResponse, ValidateResponseTenant,
      52              : };
      53              : use pageserver_client::{BlockUnblock, mgmt_api};
      54              : use reqwest::{Certificate, StatusCode};
      55              : use safekeeper_api::models::SafekeeperUtilization;
      56              : use safekeeper_reconciler::SafekeeperReconcilers;
      57              : use tenant_shard_iterator::{TenantShardExclusiveIterator, create_shared_shard_iterator};
      58              : use tokio::sync::TryAcquireError;
      59              : use tokio::sync::mpsc::error::TrySendError;
      60              : use tokio_util::sync::CancellationToken;
      61              : use tracing::{Instrument, debug, error, info, info_span, instrument, warn};
      62              : use utils::completion::Barrier;
      63              : use utils::generation::Generation;
      64              : use utils::id::{NodeId, TenantId, TimelineId};
      65              : use utils::lsn::Lsn;
      66              : use utils::shard::ShardIndex;
      67              : use utils::sync::gate::{Gate, GateGuard};
      68              : use utils::{failpoint_support, pausable_failpoint};
      69              : 
      70              : use crate::background_node_operations::{
      71              :     Delete, Drain, Fill, MAX_RECONCILES_PER_OPERATION, Operation, OperationError, OperationHandler,
      72              : };
      73              : use crate::compute_hook::{self, ComputeHook, NotifyError};
      74              : use crate::heartbeater::{Heartbeater, PageserverState, SafekeeperState};
      75              : use crate::id_lock_map::{
      76              :     IdLockMap, TracingExclusiveGuard, trace_exclusive_lock, trace_shared_lock,
      77              : };
      78              : use crate::leadership::Leadership;
      79              : use crate::metrics;
      80              : use crate::node::{AvailabilityTransition, Node};
      81              : use crate::operation_utils::{self, TenantShardDrain};
      82              : use crate::pageserver_client::PageserverClient;
      83              : use crate::peer_client::GlobalObservedState;
      84              : use crate::persistence::split_state::SplitState;
      85              : use crate::persistence::{
      86              :     AbortShardSplitStatus, ControllerPersistence, DatabaseError, DatabaseResult,
      87              :     MetadataHealthPersistence, Persistence, ShardGenerationState, TenantFilter,
      88              :     TenantShardPersistence,
      89              : };
      90              : use crate::reconciler::{
      91              :     ReconcileError, ReconcileUnits, ReconcilerConfig, ReconcilerConfigBuilder, ReconcilerPriority,
      92              :     attached_location_conf,
      93              : };
      94              : use crate::safekeeper::Safekeeper;
      95              : use crate::scheduler::{
      96              :     AttachedShardTag, MaySchedule, ScheduleContext, ScheduleError, ScheduleMode, Scheduler,
      97              : };
      98              : use crate::tenant_shard::{
      99              :     IntentState, MigrateAttachment, ObservedState, ObservedStateDelta, ObservedStateLocation,
     100              :     ReconcileNeeded, ReconcileResult, ReconcileWaitError, ReconcilerStatus, ReconcilerWaiter,
     101              :     ScheduleOptimization, ScheduleOptimizationAction, TenantShard,
     102              : };
     103              : use crate::timeline_import::{
     104              :     FinalizingImport, ImportResult, ShardImportStatuses, TimelineImport,
     105              :     TimelineImportFinalizeError, TimelineImportState, UpcallClient,
     106              : };
     107              : 
     108              : const WAITER_OPERATION_POLL_TIMEOUT: Duration = Duration::from_millis(500);
     109              : 
     110              : // For operations that should be quick, like attaching a new tenant
     111              : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
     112              : 
     113              : // For operations that might be slow, like migrating a tenant with
     114              : // some data in it.
     115              : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     116              : 
     117              : // If we receive a call using Secondary mode initially, it will omit generation.  We will initialize
     118              : // tenant shards into this generation, and as long as it remains in this generation, we will accept
     119              : // input generation from future requests as authoritative.
     120              : const INITIAL_GENERATION: Generation = Generation::new(0);
     121              : 
     122              : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
     123              : /// up on unresponsive pageservers and proceed.
     124              : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     125              : 
     126              : /// How long a node may be unresponsive to heartbeats before we declare it offline.
     127              : /// This must be long enough to cover node restarts as well as normal operations: in future
     128              : pub const MAX_OFFLINE_INTERVAL_DEFAULT: Duration = Duration::from_secs(30);
     129              : 
     130              : /// How long a node may be unresponsive to heartbeats during start up before we declare it
     131              : /// offline.
     132              : ///
     133              : /// This is much more lenient than [`MAX_OFFLINE_INTERVAL_DEFAULT`] since the pageserver's
     134              : /// handling of the re-attach response may take a long time and blocks heartbeats from
     135              : /// being handled on the pageserver side.
     136              : pub const MAX_WARMING_UP_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
     137              : 
     138              : /// How often to send heartbeats to registered nodes?
     139              : pub const HEARTBEAT_INTERVAL_DEFAULT: Duration = Duration::from_secs(5);
     140              : 
     141              : /// How long is too long for a reconciliation?
     142              : pub const LONG_RECONCILE_THRESHOLD_DEFAULT: Duration = Duration::from_secs(120);
     143              : 
     144              : #[derive(Clone, strum_macros::Display)]
     145              : enum TenantOperations {
     146              :     Create,
     147              :     LocationConfig,
     148              :     ConfigSet,
     149              :     ConfigPatch,
     150              :     TimeTravelRemoteStorage,
     151              :     Delete,
     152              :     UpdatePolicy,
     153              :     ShardSplit,
     154              :     SecondaryDownload,
     155              :     TimelineCreate,
     156              :     TimelineDelete,
     157              :     AttachHook,
     158              :     TimelineArchivalConfig,
     159              :     TimelineDetachAncestor,
     160              :     TimelineGcBlockUnblock,
     161              :     DropDetached,
     162              :     DownloadHeatmapLayers,
     163              :     TimelineLsnLease,
     164              :     TimelineSafekeeperMigrate,
     165              : }
     166              : 
     167              : #[derive(Clone, strum_macros::Display)]
     168              : enum NodeOperations {
     169              :     Register,
     170              :     Configure,
     171              :     Delete,
     172              :     DeleteTombstone,
     173              : }
     174              : 
     175              : /// The leadership status for the storage controller process.
     176              : /// Allowed transitions are:
     177              : /// 1. Leader -> SteppedDown
     178              : /// 2. Candidate -> Leader
     179              : #[derive(
     180              :     Eq,
     181              :     PartialEq,
     182              :     Copy,
     183              :     Clone,
     184              :     strum_macros::Display,
     185              :     strum_macros::EnumIter,
     186              :     measured::FixedCardinalityLabel,
     187              : )]
     188              : #[strum(serialize_all = "snake_case")]
     189              : pub(crate) enum LeadershipStatus {
     190              :     /// This is the steady state where the storage controller can produce
     191              :     /// side effects in the cluster.
     192              :     Leader,
     193              :     /// We've been notified to step down by another candidate. No reconciliations
     194              :     /// take place in this state.
     195              :     SteppedDown,
     196              :     /// Initial state for a new storage controller instance. Will attempt to assume leadership.
     197              :     #[allow(unused)]
     198              :     Candidate,
     199              : }
     200              : 
     201              : enum ShardGenerationValidity {
     202              :     Valid,
     203              :     Mismatched {
     204              :         claimed: Generation,
     205              :         actual: Option<Generation>,
     206              :     },
     207              : }
     208              : 
     209              : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
     210              : pub const PRIORITY_RECONCILER_CONCURRENCY_DEFAULT: usize = 256;
     211              : pub const SAFEKEEPER_RECONCILER_CONCURRENCY_DEFAULT: usize = 32;
     212              : 
     213              : // Number of consecutive reconciliation errors, occured for one shard,
     214              : // after which the shard is ignored when considering to run optimizations.
     215              : const MAX_CONSECUTIVE_RECONCILIATION_ERRORS: usize = 5;
     216              : 
     217              : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
     218              : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
     219              : // than they're being pushed onto the queue.
     220              : const MAX_DELAYED_RECONCILES: usize = 10000;
     221              : 
     222              : // Top level state available to all HTTP handlers
     223              : struct ServiceState {
     224              :     leadership_status: LeadershipStatus,
     225              : 
     226              :     tenants: BTreeMap<TenantShardId, TenantShard>,
     227              : 
     228              :     nodes: Arc<HashMap<NodeId, Node>>,
     229              : 
     230              :     safekeepers: Arc<HashMap<NodeId, Safekeeper>>,
     231              : 
     232              :     safekeeper_reconcilers: SafekeeperReconcilers,
     233              : 
     234              :     scheduler: Scheduler,
     235              : 
     236              :     /// Ongoing background operation on the cluster if any is running.
     237              :     /// Note that only one such operation may run at any given time,
     238              :     /// hence the type choice.
     239              :     ongoing_operation: Option<OperationHandler>,
     240              : 
     241              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     242              :     delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     243              : 
     244              :     /// Tracks ongoing timeline import finalization tasks
     245              :     imports_finalizing: BTreeMap<(TenantId, TimelineId), FinalizingImport>,
     246              : }
     247              : 
     248              : /// Transform an error from a pageserver into an error to return to callers of a storage
     249              : /// controller API.
     250            0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
     251            0 :     match e {
     252            0 :         mgmt_api::Error::SendRequest(e) => {
     253              :             // Presume errors sending requests are connectivity/availability issues
     254            0 :             ApiError::ResourceUnavailable(format!("{node} error sending request: {e}").into())
     255              :         }
     256            0 :         mgmt_api::Error::ReceiveErrorBody(str) => {
     257              :             // Presume errors receiving body are connectivity/availability issues
     258            0 :             ApiError::ResourceUnavailable(
     259            0 :                 format!("{node} error receiving error body: {str}").into(),
     260            0 :             )
     261              :         }
     262            0 :         mgmt_api::Error::ReceiveBody(err) if err.is_decode() => {
     263              :             // Return 500 for decoding errors.
     264            0 :             ApiError::InternalServerError(anyhow::Error::from(err).context("error decoding body"))
     265              :         }
     266            0 :         mgmt_api::Error::ReceiveBody(err) => {
     267              :             // Presume errors receiving body are connectivity/availability issues except for decoding errors
     268            0 :             let src_str = err.source().map(|e| e.to_string()).unwrap_or_default();
     269            0 :             ApiError::ResourceUnavailable(
     270            0 :                 format!("{node} error receiving error body: {err} {src_str}").into(),
     271            0 :             )
     272              :         }
     273            0 :         mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
     274            0 :             ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
     275              :         }
     276            0 :         mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
     277            0 :             ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
     278              :         }
     279            0 :         mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
     280            0 :         | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
     281              :             // Auth errors talking to a pageserver are not auth errors for the caller: they are
     282              :             // internal server errors, showing that something is wrong with the pageserver or
     283              :             // storage controller's auth configuration.
     284            0 :             ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
     285              :         }
     286            0 :         mgmt_api::Error::ApiError(status @ StatusCode::TOO_MANY_REQUESTS, msg) => {
     287              :             // Pass through 429 errors: if pageserver is asking us to wait + retry, we in
     288              :             // turn ask our clients to wait + retry
     289            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     290              :         }
     291            0 :         mgmt_api::Error::ApiError(status, msg) => {
     292              :             // Presume general case of pageserver API errors is that we tried to do something
     293              :             // that can't be done right now.
     294            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     295              :         }
     296            0 :         mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
     297            0 :         mgmt_api::Error::Timeout(e) => ApiError::Timeout(e.into()),
     298              :     }
     299            0 : }
     300              : 
     301              : impl ServiceState {
     302            0 :     fn new(
     303            0 :         nodes: HashMap<NodeId, Node>,
     304            0 :         safekeepers: HashMap<NodeId, Safekeeper>,
     305            0 :         tenants: BTreeMap<TenantShardId, TenantShard>,
     306            0 :         scheduler: Scheduler,
     307            0 :         delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     308            0 :         initial_leadership_status: LeadershipStatus,
     309            0 :         reconcilers_cancel: CancellationToken,
     310            0 :     ) -> Self {
     311            0 :         metrics::update_leadership_status(initial_leadership_status);
     312              : 
     313            0 :         Self {
     314            0 :             leadership_status: initial_leadership_status,
     315            0 :             tenants,
     316            0 :             nodes: Arc::new(nodes),
     317            0 :             safekeepers: Arc::new(safekeepers),
     318            0 :             safekeeper_reconcilers: SafekeeperReconcilers::new(reconcilers_cancel),
     319            0 :             scheduler,
     320            0 :             ongoing_operation: None,
     321            0 :             delayed_reconcile_rx,
     322            0 :             imports_finalizing: Default::default(),
     323            0 :         }
     324            0 :     }
     325              : 
     326            0 :     fn parts_mut(
     327            0 :         &mut self,
     328            0 :     ) -> (
     329            0 :         &mut Arc<HashMap<NodeId, Node>>,
     330            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     331            0 :         &mut Scheduler,
     332            0 :     ) {
     333            0 :         (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
     334            0 :     }
     335              : 
     336              :     #[allow(clippy::type_complexity)]
     337            0 :     fn parts_mut_sk(
     338            0 :         &mut self,
     339            0 :     ) -> (
     340            0 :         &mut Arc<HashMap<NodeId, Node>>,
     341            0 :         &mut Arc<HashMap<NodeId, Safekeeper>>,
     342            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     343            0 :         &mut Scheduler,
     344            0 :     ) {
     345            0 :         (
     346            0 :             &mut self.nodes,
     347            0 :             &mut self.safekeepers,
     348            0 :             &mut self.tenants,
     349            0 :             &mut self.scheduler,
     350            0 :         )
     351            0 :     }
     352              : 
     353            0 :     fn get_leadership_status(&self) -> LeadershipStatus {
     354            0 :         self.leadership_status
     355            0 :     }
     356              : 
     357            0 :     fn step_down(&mut self) {
     358            0 :         self.leadership_status = LeadershipStatus::SteppedDown;
     359            0 :         metrics::update_leadership_status(self.leadership_status);
     360            0 :     }
     361              : 
     362            0 :     fn become_leader(&mut self) {
     363            0 :         self.leadership_status = LeadershipStatus::Leader;
     364            0 :         metrics::update_leadership_status(self.leadership_status);
     365            0 :     }
     366              : }
     367              : 
     368              : #[derive(Clone)]
     369              : pub struct Config {
     370              :     // All pageservers managed by one instance of this service must have
     371              :     // the same public key.  This JWT token will be used to authenticate
     372              :     // this service to the pageservers it manages.
     373              :     pub pageserver_jwt_token: Option<String>,
     374              : 
     375              :     // All safekeepers managed by one instance of this service must have
     376              :     // the same public key. This JWT token will be used to authenticate
     377              :     // this service to the safekeepers it manages.
     378              :     pub safekeeper_jwt_token: Option<String>,
     379              : 
     380              :     // This JWT token will be used to authenticate this service to the control plane.
     381              :     pub control_plane_jwt_token: Option<String>,
     382              : 
     383              :     // This JWT token will be used to authenticate with other storage controller instances
     384              :     pub peer_jwt_token: Option<String>,
     385              : 
     386              :     /// Prefix for storage API endpoints of the control plane. We use this prefix to compute
     387              :     /// URLs that we use to send pageserver and safekeeper attachment locations.
     388              :     /// If this is None, the compute hook will assume it is running in a test environment
     389              :     /// and try to invoke neon_local instead.
     390              :     pub control_plane_url: Option<String>,
     391              : 
     392              :     /// Grace period within which a pageserver does not respond to heartbeats, but is still
     393              :     /// considered active. Once the grace period elapses, the next heartbeat failure will
     394              :     /// mark the pagseserver offline.
     395              :     pub max_offline_interval: Duration,
     396              : 
     397              :     /// Extended grace period within which pageserver may not respond to heartbeats.
     398              :     /// This extended grace period kicks in after the node has been drained for restart
     399              :     /// and/or upon handling the re-attach request from a node.
     400              :     pub max_warming_up_interval: Duration,
     401              : 
     402              :     /// How many normal-priority Reconcilers may be spawned concurrently
     403              :     pub reconciler_concurrency: usize,
     404              : 
     405              :     /// How many high-priority Reconcilers may be spawned concurrently
     406              :     pub priority_reconciler_concurrency: usize,
     407              : 
     408              :     /// How many safekeeper reconciles may happen concurrently (per safekeeper)
     409              :     pub safekeeper_reconciler_concurrency: usize,
     410              : 
     411              :     /// How many API requests per second to allow per tenant, across all
     412              :     /// tenant-scoped API endpoints. Further API requests queue until ready.
     413              :     pub tenant_rate_limit: NonZeroU32,
     414              : 
     415              :     /// If a tenant shard's largest timeline (max_logical_size) exceeds this value, all tenant
     416              :     /// shards will be split in 2 until they fall below split_threshold (up to max_split_shards).
     417              :     ///
     418              :     /// This will greedily split into as many shards as necessary to fall below split_threshold, as
     419              :     /// powers of 2: if a tenant shard is 7 times larger than split_threshold, it will split into 8
     420              :     /// immediately, rather than first 2 then 4 then 8.
     421              :     ///
     422              :     /// None or 0 disables auto-splitting.
     423              :     ///
     424              :     /// TODO: consider using total logical size of all timelines instead.
     425              :     pub split_threshold: Option<u64>,
     426              : 
     427              :     /// The maximum number of shards a tenant can be split into during autosplits. Does not affect
     428              :     /// manual split requests. 0 or 1 disables autosplits, as we already have 1 shard.
     429              :     pub max_split_shards: u8,
     430              : 
     431              :     /// The size at which an unsharded tenant should initially split. Ingestion is significantly
     432              :     /// faster with multiple shards, so eagerly splitting below split_threshold will typically speed
     433              :     /// up initial ingestion of large tenants.
     434              :     ///
     435              :     /// This should be below split_threshold, but it is not required. If both split_threshold and
     436              :     /// initial_split_threshold qualify, the largest number of target shards will be used.
     437              :     ///
     438              :     /// Does not apply to already sharded tenants: changing initial_split_threshold or
     439              :     /// initial_split_shards is not retroactive for already-sharded tenants.
     440              :     ///
     441              :     /// None or 0 disables initial splits.
     442              :     pub initial_split_threshold: Option<u64>,
     443              : 
     444              :     /// The number of shards to split into when reaching initial_split_threshold. Will
     445              :     /// be clamped to max_split_shards.
     446              :     ///
     447              :     /// 0 or 1 disables initial splits. Has no effect if initial_split_threshold is disabled.
     448              :     pub initial_split_shards: u8,
     449              : 
     450              :     // TODO: make this cfg(feature  = "testing")
     451              :     pub neon_local_repo_dir: Option<PathBuf>,
     452              : 
     453              :     // Maximum acceptable download lag for the secondary location
     454              :     // while draining a node. If the secondary location is lagging
     455              :     // by more than the configured amount, then the secondary is not
     456              :     // upgraded to primary.
     457              :     pub max_secondary_lag_bytes: Option<u64>,
     458              : 
     459              :     pub heartbeat_interval: Duration,
     460              : 
     461              :     pub address_for_peers: Option<Uri>,
     462              : 
     463              :     pub start_as_candidate: bool,
     464              : 
     465              :     pub long_reconcile_threshold: Duration,
     466              : 
     467              :     pub use_https_pageserver_api: bool,
     468              : 
     469              :     pub use_https_safekeeper_api: bool,
     470              : 
     471              :     pub ssl_ca_certs: Vec<Certificate>,
     472              : 
     473              :     pub timelines_onto_safekeepers: bool,
     474              : 
     475              :     pub use_local_compute_notifications: bool,
     476              : 
     477              :     /// Number of safekeepers to choose for a timeline when creating it.
     478              :     /// Safekeepers will be choosen from different availability zones.
     479              :     pub timeline_safekeeper_count: usize,
     480              : 
     481              :     /// PostHog integration config
     482              :     pub posthog_config: Option<PostHogConfig>,
     483              : 
     484              :     /// When set, actively checks and initiates heatmap downloads/uploads.
     485              :     pub kick_secondary_downloads: bool,
     486              : }
     487              : 
     488              : impl From<DatabaseError> for ApiError {
     489            0 :     fn from(err: DatabaseError) -> ApiError {
     490            0 :         match err {
     491            0 :             DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
     492              :             // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
     493              :             DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
     494            0 :                 ApiError::ShuttingDown
     495              :             }
     496            0 :             DatabaseError::Logical(reason) | DatabaseError::Migration(reason) => {
     497            0 :                 ApiError::InternalServerError(anyhow::anyhow!(reason))
     498              :             }
     499            0 :             DatabaseError::Cas(reason) => ApiError::Conflict(reason),
     500              :         }
     501            0 :     }
     502              : }
     503              : 
     504              : enum InitialShardScheduleOutcome {
     505              :     Scheduled(TenantCreateResponseShard),
     506              :     NotScheduled,
     507              :     ShardScheduleError(ScheduleError),
     508              : }
     509              : 
     510              : pub struct Service {
     511              :     inner: Arc<std::sync::RwLock<ServiceState>>,
     512              :     config: Config,
     513              :     persistence: Arc<Persistence>,
     514              :     compute_hook: Arc<ComputeHook>,
     515              :     result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResultRequest>,
     516              : 
     517              :     heartbeater_ps: Heartbeater<Node, PageserverState>,
     518              :     heartbeater_sk: Heartbeater<Safekeeper, SafekeeperState>,
     519              : 
     520              :     // Channel for background cleanup from failed operations that require cleanup, such as shard split
     521              :     abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
     522              : 
     523              :     // Locking on a tenant granularity (covers all shards in the tenant):
     524              :     // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
     525              :     // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
     526              :     tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
     527              : 
     528              :     // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
     529              :     // that transition it to/from Active.
     530              :     node_op_locks: IdLockMap<NodeId, NodeOperations>,
     531              : 
     532              :     // Limit how many Reconcilers we will spawn concurrently for normal-priority tasks such as background reconciliations
     533              :     // and reconciliation on startup.
     534              :     reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     535              : 
     536              :     // Limit how many Reconcilers we will spawn concurrently for high-priority tasks such as tenant/timeline CRUD, which
     537              :     // a human user might be waiting for.
     538              :     priority_reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     539              : 
     540              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     541              :     /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
     542              :     ///
     543              :     /// Note that this state logically lives inside ServiceState, but carrying Sender here makes the code simpler
     544              :     /// by avoiding needing a &mut ref to something inside the ServiceState.  This could be optimized to
     545              :     /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
     546              :     delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
     547              : 
     548              :     // Process shutdown will fire this token
     549              :     cancel: CancellationToken,
     550              : 
     551              :     // Child token of [`Service::cancel`] used by reconcilers
     552              :     reconcilers_cancel: CancellationToken,
     553              : 
     554              :     // Background tasks will hold this gate
     555              :     gate: Gate,
     556              : 
     557              :     // Reconcilers background tasks will hold this gate
     558              :     reconcilers_gate: Gate,
     559              : 
     560              :     /// This waits for initial reconciliation with pageservers to complete.  Until this barrier
     561              :     /// passes, it isn't safe to do any actions that mutate tenants.
     562              :     pub(crate) startup_complete: Barrier,
     563              : 
     564              :     /// HTTP client with proper CA certs.
     565              :     http_client: reqwest::Client,
     566              : 
     567              :     /// Handle for the step down background task if one was ever requested
     568              :     step_down_barrier: OnceLock<tokio::sync::watch::Receiver<Option<GlobalObservedState>>>,
     569              : }
     570              : 
     571              : impl From<ReconcileWaitError> for ApiError {
     572            0 :     fn from(value: ReconcileWaitError) -> Self {
     573            0 :         match value {
     574            0 :             ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
     575            0 :             e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
     576            0 :             e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
     577              :         }
     578            0 :     }
     579              : }
     580              : 
     581              : impl From<OperationError> for ApiError {
     582            0 :     fn from(value: OperationError) -> Self {
     583            0 :         match value {
     584            0 :             OperationError::NodeStateChanged(err)
     585            0 :             | OperationError::FinalizeError(err)
     586            0 :             | OperationError::ImpossibleConstraint(err) => {
     587            0 :                 ApiError::InternalServerError(anyhow::anyhow!(err))
     588              :             }
     589            0 :             OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()),
     590              :         }
     591            0 :     }
     592              : }
     593              : 
     594              : #[allow(clippy::large_enum_variant)]
     595              : enum TenantCreateOrUpdate {
     596              :     Create(TenantCreateRequest),
     597              :     Update(Vec<ShardUpdate>),
     598              : }
     599              : 
     600              : struct ShardSplitParams {
     601              :     old_shard_count: ShardCount,
     602              :     new_shard_count: ShardCount,
     603              :     new_stripe_size: Option<ShardStripeSize>,
     604              :     targets: Vec<ShardSplitTarget>,
     605              :     policy: PlacementPolicy,
     606              :     config: TenantConfig,
     607              :     shard_ident: ShardIdentity,
     608              :     preferred_az_id: Option<AvailabilityZone>,
     609              : }
     610              : 
     611              : // When preparing for a shard split, we may either choose to proceed with the split,
     612              : // or find that the work is already done and return NoOp.
     613              : enum ShardSplitAction {
     614              :     Split(Box<ShardSplitParams>),
     615              :     NoOp(TenantShardSplitResponse),
     616              : }
     617              : 
     618              : // A parent shard which will be split
     619              : struct ShardSplitTarget {
     620              :     parent_id: TenantShardId,
     621              :     node: Node,
     622              :     child_ids: Vec<TenantShardId>,
     623              : }
     624              : 
     625              : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
     626              : /// might not be available.  We therefore use a queue of abort operations processed in the background.
     627              : struct TenantShardSplitAbort {
     628              :     tenant_id: TenantId,
     629              :     /// The target values from the request that failed
     630              :     new_shard_count: ShardCount,
     631              :     new_stripe_size: Option<ShardStripeSize>,
     632              :     /// Until this abort op is complete, no other operations may be done on the tenant
     633              :     _tenant_lock: TracingExclusiveGuard<TenantOperations>,
     634              :     /// The reconciler gate for the duration of the split operation, and any included abort.
     635              :     _gate: GateGuard,
     636              : }
     637              : 
     638              : #[derive(thiserror::Error, Debug)]
     639              : enum TenantShardSplitAbortError {
     640              :     #[error(transparent)]
     641              :     Database(#[from] DatabaseError),
     642              :     #[error(transparent)]
     643              :     Remote(#[from] mgmt_api::Error),
     644              :     #[error("Unavailable")]
     645              :     Unavailable,
     646              : }
     647              : 
     648              : /// Inputs for computing a target shard count for a tenant.
     649              : struct ShardSplitInputs {
     650              :     /// Current shard count.
     651              :     shard_count: ShardCount,
     652              :     /// Total size of largest timeline summed across all shards.
     653              :     max_logical_size: u64,
     654              :     /// Size-based split threshold. Zero if size-based splits are disabled.
     655              :     split_threshold: u64,
     656              :     /// Upper bound on target shards. 0 or 1 disables splits.
     657              :     max_split_shards: u8,
     658              :     /// Initial split threshold. Zero if initial splits are disabled.
     659              :     initial_split_threshold: u64,
     660              :     /// Number of shards for initial splits. 0 or 1 disables initial splits.
     661              :     initial_split_shards: u8,
     662              : }
     663              : 
     664              : struct ShardUpdate {
     665              :     tenant_shard_id: TenantShardId,
     666              :     placement_policy: PlacementPolicy,
     667              :     tenant_config: TenantConfig,
     668              : 
     669              :     /// If this is None, generation is not updated.
     670              :     generation: Option<Generation>,
     671              : 
     672              :     /// If this is None, scheduling policy is not updated.
     673              :     scheduling_policy: Option<ShardSchedulingPolicy>,
     674              : }
     675              : 
     676              : enum StopReconciliationsReason {
     677              :     ShuttingDown,
     678              :     SteppingDown,
     679              : }
     680              : 
     681              : impl std::fmt::Display for StopReconciliationsReason {
     682            0 :     fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result {
     683            0 :         let s = match self {
     684            0 :             Self::ShuttingDown => "Shutting down",
     685            0 :             Self::SteppingDown => "Stepping down",
     686              :         };
     687            0 :         write!(writer, "{s}")
     688            0 :     }
     689              : }
     690              : 
     691              : pub(crate) enum ReconcileResultRequest {
     692              :     ReconcileResult(ReconcileResult),
     693              :     Stop,
     694              : }
     695              : 
     696              : #[derive(Clone)]
     697              : struct MutationLocation {
     698              :     node: Node,
     699              :     generation: Generation,
     700              : }
     701              : 
     702              : #[derive(Clone)]
     703              : struct ShardMutationLocations {
     704              :     latest: MutationLocation,
     705              :     other: Vec<MutationLocation>,
     706              : }
     707              : 
     708              : #[derive(Default, Clone)]
     709              : struct TenantMutationLocations(BTreeMap<TenantShardId, ShardMutationLocations>);
     710              : 
     711              : struct ReconcileAllResult {
     712              :     spawned_reconciles: usize,
     713              :     keep_failing_reconciles: usize,
     714              :     has_delayed_reconciles: bool,
     715              : }
     716              : 
     717              : impl ReconcileAllResult {
     718            0 :     fn new(
     719            0 :         spawned_reconciles: usize,
     720            0 :         keep_failing_reconciles: usize,
     721            0 :         has_delayed_reconciles: bool,
     722            0 :     ) -> Self {
     723            0 :         assert!(
     724            0 :             spawned_reconciles >= keep_failing_reconciles,
     725            0 :             "It is impossible to have more keep-failing reconciles than spawned reconciles"
     726              :         );
     727            0 :         Self {
     728            0 :             spawned_reconciles,
     729            0 :             keep_failing_reconciles,
     730            0 :             has_delayed_reconciles,
     731            0 :         }
     732            0 :     }
     733              : 
     734              :     /// We can run optimizations only if we don't have any delayed reconciles and
     735              :     /// all spawned reconciles are also keep-failing reconciles.
     736            0 :     fn can_run_optimizations(&self) -> bool {
     737            0 :         !self.has_delayed_reconciles && self.spawned_reconciles == self.keep_failing_reconciles
     738            0 :     }
     739              : }
     740              : 
     741              : impl Service {
     742            0 :     pub fn get_config(&self) -> &Config {
     743            0 :         &self.config
     744            0 :     }
     745              : 
     746            0 :     pub fn get_http_client(&self) -> &reqwest::Client {
     747            0 :         &self.http_client
     748            0 :     }
     749              : 
     750              :     /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
     751              :     /// view of the world, and determine which pageservers are responsive.
     752              :     #[instrument(skip_all)]
     753              :     async fn startup_reconcile(
     754              :         self: &Arc<Service>,
     755              :         current_leader: Option<ControllerPersistence>,
     756              :         leader_step_down_state: Option<GlobalObservedState>,
     757              :         bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
     758              :             Result<(), (TenantShardId, NotifyError)>,
     759              :         >,
     760              :     ) {
     761              :         // Startup reconciliation does I/O to other services: whether they
     762              :         // are responsive or not, we should aim to finish within our deadline, because:
     763              :         // - If we don't, a k8s readiness hook watching /ready will kill us.
     764              :         // - While we're waiting for startup reconciliation, we are not fully
     765              :         //   available for end user operations like creating/deleting tenants and timelines.
     766              :         //
     767              :         // We set multiple deadlines to break up the time available between the phases of work: this is
     768              :         // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
     769              :         let start_at = Instant::now();
     770              :         let node_scan_deadline = start_at
     771              :             .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
     772              :             .expect("Reconcile timeout is a modest constant");
     773              : 
     774              :         let observed = if let Some(state) = leader_step_down_state {
     775              :             tracing::info!(
     776              :                 "Using observed state received from leader at {}",
     777              :                 current_leader.as_ref().unwrap().address
     778              :             );
     779              : 
     780              :             state
     781              :         } else {
     782              :             self.build_global_observed_state(node_scan_deadline).await
     783              :         };
     784              : 
     785              :         // Accumulate a list of any tenant locations that ought to be detached
     786              :         let mut cleanup = Vec::new();
     787              : 
     788              :         // Send initial heartbeat requests to all nodes loaded from the database
     789              :         let all_nodes = {
     790              :             let locked = self.inner.read().unwrap();
     791              :             locked.nodes.clone()
     792              :         };
     793              :         let (mut nodes_online, mut sks_online) =
     794              :             self.initial_heartbeat_round(all_nodes.keys()).await;
     795              : 
     796              :         // List of tenants for which we will attempt to notify compute of their location at startup
     797              :         let mut compute_notifications = Vec::new();
     798              : 
     799              :         // Populate intent and observed states for all tenants, based on reported state on pageservers
     800              :         tracing::info!("Populating tenant shards' states from initial pageserver scan...");
     801              :         let shard_count = {
     802              :             let mut locked = self.inner.write().unwrap();
     803              :             let (nodes, safekeepers, tenants, scheduler) = locked.parts_mut_sk();
     804              : 
     805              :             // Mark nodes online if they responded to us: nodes are offline by default after a restart.
     806              :             let mut new_nodes = (**nodes).clone();
     807              :             for (node_id, node) in new_nodes.iter_mut() {
     808              :                 if let Some(utilization) = nodes_online.remove(node_id) {
     809              :                     node.set_availability(NodeAvailability::Active(utilization));
     810              :                     scheduler.node_upsert(node);
     811              :                 }
     812              :             }
     813              :             *nodes = Arc::new(new_nodes);
     814              : 
     815              :             let mut new_sks = (**safekeepers).clone();
     816              :             for (node_id, node) in new_sks.iter_mut() {
     817              :                 if let Some((utilization, last_seen_at)) = sks_online.remove(node_id) {
     818              :                     node.set_availability(SafekeeperState::Available {
     819              :                         utilization,
     820              :                         last_seen_at,
     821              :                     });
     822              :                 }
     823              :             }
     824              :             *safekeepers = Arc::new(new_sks);
     825              : 
     826              :             for (tenant_shard_id, observed_state) in observed.0 {
     827              :                 let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
     828              :                     for node_id in observed_state.locations.keys() {
     829              :                         cleanup.push((tenant_shard_id, *node_id));
     830              :                     }
     831              : 
     832              :                     continue;
     833              :                 };
     834              : 
     835              :                 tenant_shard.observed = observed_state;
     836              :             }
     837              : 
     838              :             // Populate each tenant's intent state
     839              :             let mut schedule_context = ScheduleContext::default();
     840              :             for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
     841              :                 if tenant_shard_id.shard_number == ShardNumber(0) {
     842              :                     // Reset scheduling context each time we advance to the next Tenant
     843              :                     schedule_context = ScheduleContext::default();
     844              :                 }
     845              : 
     846              :                 tenant_shard.intent_from_observed(scheduler);
     847              :                 if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
     848              :                     // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
     849              :                     // not enough pageservers are available.  The tenant may well still be available
     850              :                     // to clients.
     851              :                     tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
     852              :                 } else {
     853              :                     // If we're both intending and observed to be attached at a particular node, we will
     854              :                     // emit a compute notification for this. In the case where our observed state does not
     855              :                     // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
     856              :                     if let Some(attached_at) = tenant_shard.stably_attached() {
     857              :                         compute_notifications.push(compute_hook::ShardUpdate {
     858              :                             tenant_shard_id: *tenant_shard_id,
     859              :                             node_id: attached_at,
     860              :                             stripe_size: tenant_shard.shard.stripe_size,
     861              :                             preferred_az: tenant_shard
     862              :                                 .preferred_az()
     863            0 :                                 .map(|az| Cow::Owned(az.clone())),
     864              :                         });
     865              :                     }
     866              :                 }
     867              :             }
     868              : 
     869              :             tenants.len()
     870              :         };
     871              : 
     872              :         // Before making any obeservable changes to the cluster, persist self
     873              :         // as leader in database and memory.
     874              :         let leadership = Leadership::new(
     875              :             self.persistence.clone(),
     876              :             self.config.clone(),
     877              :             self.cancel.child_token(),
     878              :         );
     879              : 
     880              :         if let Err(e) = leadership.become_leader(current_leader).await {
     881              :             tracing::error!("Failed to persist self as leader: {e}. Aborting start-up ...");
     882              :             std::process::exit(1);
     883              :         }
     884              : 
     885              :         let safekeepers = self.inner.read().unwrap().safekeepers.clone();
     886              :         let sk_schedule_requests =
     887              :             match safekeeper_reconciler::load_schedule_requests(self, &safekeepers).await {
     888              :                 Ok(v) => v,
     889              :                 Err(e) => {
     890              :                     tracing::warn!(
     891              :                         "Failed to load safekeeper pending ops at startup: {e}." // Don't abort for now: " Aborting start-up..."
     892              :                     );
     893              :                     // std::process::exit(1);
     894              :                     Vec::new()
     895              :                 }
     896              :             };
     897              : 
     898              :         {
     899              :             let mut locked = self.inner.write().unwrap();
     900              :             locked.become_leader();
     901              : 
     902              :             for (sk_id, _sk) in locked.safekeepers.clone().iter() {
     903              :                 locked.safekeeper_reconcilers.start_reconciler(*sk_id, self);
     904              :             }
     905              : 
     906              :             locked
     907              :                 .safekeeper_reconcilers
     908              :                 .schedule_request_vec(sk_schedule_requests);
     909              :         }
     910              : 
     911              :         // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
     912              :         // generation_pageserver in the database.
     913              : 
     914              :         // Emit compute hook notifications for all tenants which are already stably attached.  Other tenants
     915              :         // will emit compute hook notifications when they reconcile.
     916              :         //
     917              :         // Ordering: our calls to notify_attach_background synchronously establish a relative order for these notifications vs. any later
     918              :         // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
     919              :         // calls will be correctly ordered wrt these.
     920              :         //
     921              :         // Concurrency: we call notify_attach_background for all tenants, which will create O(N) tokio tasks, but almost all of them
     922              :         // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
     923              :         // unit and start doing I/O.
     924              :         tracing::info!(
     925              :             "Sending {} compute notifications",
     926              :             compute_notifications.len()
     927              :         );
     928              :         self.compute_hook.notify_attach_background(
     929              :             compute_notifications,
     930              :             bg_compute_notify_result_tx.clone(),
     931              :             &self.cancel,
     932              :         );
     933              : 
     934              :         // Finally, now that the service is up and running, launch reconcile operations for any tenants
     935              :         // which require it: under normal circumstances this should only include tenants that were in some
     936              :         // transient state before we restarted, or any tenants whose compute hooks failed above.
     937              :         tracing::info!("Checking for shards in need of reconciliation...");
     938              :         let reconcile_all_result = self.reconcile_all();
     939              :         // We will not wait for these reconciliation tasks to run here: we're now done with startup and
     940              :         // normal operations may proceed.
     941              : 
     942              :         // Clean up any tenants that were found on pageservers but are not known to us.  Do this in the
     943              :         // background because it does not need to complete in order to proceed with other work.
     944              :         if !cleanup.is_empty() {
     945              :             tracing::info!("Cleaning up {} locations in the background", cleanup.len());
     946              :             tokio::task::spawn({
     947              :                 let cleanup_self = self.clone();
     948            0 :                 async move { cleanup_self.cleanup_locations(cleanup).await }
     949              :             });
     950              :         }
     951              : 
     952              :         // Reconcile the timeline imports:
     953              :         // 1. Mark each tenant shard of tenants with an importing timeline as importing.
     954              :         // 2. Finalize the completed imports in the background. This handles the case where
     955              :         //    the previous storage controller instance shut down whilst finalizing imports.
     956              :         let imports = self.persistence.list_timeline_imports().await;
     957              :         match imports {
     958              :             Ok(mut imports) => {
     959              :                 {
     960              :                     let mut locked = self.inner.write().unwrap();
     961              :                     for import in &imports {
     962              :                         locked
     963              :                             .tenants
     964              :                             .range_mut(TenantShardId::tenant_range(import.tenant_id))
     965            0 :                             .for_each(|(_id, shard)| {
     966            0 :                                 shard.importing = TimelineImportState::Importing
     967            0 :                             });
     968              :                     }
     969              :                 }
     970              : 
     971            0 :                 imports.retain(|import| import.is_complete());
     972              :                 tokio::task::spawn({
     973              :                     let finalize_imports_self = self.clone();
     974            0 :                     async move {
     975            0 :                         finalize_imports_self
     976            0 :                             .finalize_timeline_imports(imports)
     977            0 :                             .await
     978            0 :                     }
     979              :                 });
     980              :             }
     981              :             Err(err) => {
     982              :                 tracing::error!("Could not retrieve completed imports from database: {err}");
     983              :             }
     984              :         }
     985              : 
     986              :         let spawned_reconciles = reconcile_all_result.spawned_reconciles;
     987              :         tracing::info!(
     988              :             "Startup complete, spawned {spawned_reconciles} reconciliation tasks ({shard_count} shards total)"
     989              :         );
     990              :     }
     991              : 
     992            0 :     async fn initial_heartbeat_round<'a>(
     993            0 :         &self,
     994            0 :         node_ids: impl Iterator<Item = &'a NodeId>,
     995            0 :     ) -> (
     996            0 :         HashMap<NodeId, PageserverUtilization>,
     997            0 :         HashMap<NodeId, (SafekeeperUtilization, Instant)>,
     998            0 :     ) {
     999            0 :         assert!(!self.startup_complete.is_ready());
    1000              : 
    1001            0 :         let all_nodes = {
    1002            0 :             let locked = self.inner.read().unwrap();
    1003            0 :             locked.nodes.clone()
    1004              :         };
    1005              : 
    1006            0 :         let mut nodes_to_heartbeat = HashMap::new();
    1007            0 :         for node_id in node_ids {
    1008            0 :             match all_nodes.get(node_id) {
    1009            0 :                 Some(node) => {
    1010            0 :                     nodes_to_heartbeat.insert(*node_id, node.clone());
    1011            0 :                 }
    1012              :                 None => {
    1013            0 :                     tracing::warn!("Node {node_id} was removed during start-up");
    1014              :                 }
    1015              :             }
    1016              :         }
    1017              : 
    1018            0 :         let all_sks = {
    1019            0 :             let locked = self.inner.read().unwrap();
    1020            0 :             locked.safekeepers.clone()
    1021              :         };
    1022              : 
    1023            0 :         tracing::info!("Sending initial heartbeats...");
    1024            0 :         let (res_ps, res_sk) = tokio::join!(
    1025            0 :             self.heartbeater_ps.heartbeat(Arc::new(nodes_to_heartbeat)),
    1026            0 :             self.heartbeater_sk.heartbeat(all_sks)
    1027              :         );
    1028              : 
    1029            0 :         let mut online_nodes = HashMap::new();
    1030            0 :         if let Ok(deltas) = res_ps {
    1031            0 :             for (node_id, status) in deltas.0 {
    1032            0 :                 match status {
    1033            0 :                     PageserverState::Available { utilization, .. } => {
    1034            0 :                         online_nodes.insert(node_id, utilization);
    1035            0 :                     }
    1036            0 :                     PageserverState::Offline => {}
    1037              :                     PageserverState::WarmingUp { .. } => {
    1038            0 :                         unreachable!("Nodes are never marked warming-up during startup reconcile")
    1039              :                     }
    1040              :                 }
    1041              :             }
    1042            0 :         }
    1043              : 
    1044            0 :         let mut online_sks = HashMap::new();
    1045            0 :         if let Ok(deltas) = res_sk {
    1046            0 :             for (node_id, status) in deltas.0 {
    1047            0 :                 match status {
    1048              :                     SafekeeperState::Available {
    1049            0 :                         utilization,
    1050            0 :                         last_seen_at,
    1051            0 :                     } => {
    1052            0 :                         online_sks.insert(node_id, (utilization, last_seen_at));
    1053            0 :                     }
    1054            0 :                     SafekeeperState::Offline => {}
    1055              :                 }
    1056              :             }
    1057            0 :         }
    1058              : 
    1059            0 :         (online_nodes, online_sks)
    1060            0 :     }
    1061              : 
    1062              :     /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
    1063              :     ///
    1064              :     /// The result includes only nodes which responded within the deadline
    1065            0 :     async fn scan_node_locations(
    1066            0 :         &self,
    1067            0 :         deadline: Instant,
    1068            0 :     ) -> HashMap<NodeId, LocationConfigListResponse> {
    1069            0 :         let nodes = {
    1070            0 :             let locked = self.inner.read().unwrap();
    1071            0 :             locked.nodes.clone()
    1072              :         };
    1073              : 
    1074            0 :         let mut node_results = HashMap::new();
    1075              : 
    1076            0 :         let mut node_list_futs = FuturesUnordered::new();
    1077              : 
    1078            0 :         tracing::info!("Scanning shards on {} nodes...", nodes.len());
    1079            0 :         for node in nodes.values() {
    1080            0 :             node_list_futs.push({
    1081            0 :                 async move {
    1082            0 :                     tracing::info!("Scanning shards on node {node}...");
    1083            0 :                     let timeout = Duration::from_secs(5);
    1084            0 :                     let response = node
    1085            0 :                         .with_client_retries(
    1086            0 :                             |client| async move { client.list_location_config().await },
    1087            0 :                             &self.http_client,
    1088            0 :                             &self.config.pageserver_jwt_token,
    1089              :                             1,
    1090              :                             5,
    1091            0 :                             timeout,
    1092            0 :                             &self.cancel,
    1093              :                         )
    1094            0 :                         .await;
    1095            0 :                     (node.get_id(), response)
    1096            0 :                 }
    1097              :             });
    1098              :         }
    1099              : 
    1100              :         loop {
    1101            0 :             let (node_id, result) = tokio::select! {
    1102            0 :                 next = node_list_futs.next() => {
    1103            0 :                     match next {
    1104            0 :                         Some(result) => result,
    1105              :                         None =>{
    1106              :                             // We got results for all our nodes
    1107            0 :                             break;
    1108              :                         }
    1109              : 
    1110              :                     }
    1111              :                 },
    1112            0 :                 _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
    1113              :                     // Give up waiting for anyone who hasn't responded: we will yield the results that we have
    1114            0 :                     tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
    1115            0 :                     break;
    1116              :                 }
    1117              :             };
    1118              : 
    1119            0 :             let Some(list_response) = result else {
    1120            0 :                 tracing::info!("Shutdown during startup_reconcile");
    1121            0 :                 break;
    1122              :             };
    1123              : 
    1124            0 :             match list_response {
    1125            0 :                 Err(e) => {
    1126            0 :                     tracing::warn!("Could not scan node {} ({e})", node_id);
    1127              :                 }
    1128            0 :                 Ok(listing) => {
    1129            0 :                     node_results.insert(node_id, listing);
    1130            0 :                 }
    1131              :             }
    1132              :         }
    1133              : 
    1134            0 :         node_results
    1135            0 :     }
    1136              : 
    1137            0 :     async fn build_global_observed_state(&self, deadline: Instant) -> GlobalObservedState {
    1138            0 :         let node_listings = self.scan_node_locations(deadline).await;
    1139            0 :         let mut observed = GlobalObservedState::default();
    1140              : 
    1141            0 :         for (node_id, location_confs) in node_listings {
    1142            0 :             tracing::info!(
    1143            0 :                 "Received {} shard statuses from pageserver {}",
    1144            0 :                 location_confs.tenant_shards.len(),
    1145              :                 node_id
    1146              :             );
    1147              : 
    1148            0 :             for (tid, location_conf) in location_confs.tenant_shards {
    1149            0 :                 let entry = observed.0.entry(tid).or_default();
    1150            0 :                 entry.locations.insert(
    1151            0 :                     node_id,
    1152            0 :                     ObservedStateLocation {
    1153            0 :                         conf: location_conf,
    1154            0 :                     },
    1155            0 :                 );
    1156            0 :             }
    1157              :         }
    1158              : 
    1159            0 :         observed
    1160            0 :     }
    1161              : 
    1162              :     /// Used during [`Self::startup_reconcile`] and shard splits: detach a list of unknown-to-us
    1163              :     /// tenants from pageservers.
    1164              :     ///
    1165              :     /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
    1166              :     /// tenants, then it is probably something incompletely deleted before: we will not fight with any
    1167              :     /// other task trying to attach it.
    1168              :     #[instrument(skip_all)]
    1169              :     async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
    1170              :         let nodes = self.inner.read().unwrap().nodes.clone();
    1171              : 
    1172              :         for (tenant_shard_id, node_id) in cleanup {
    1173              :             // A node reported a tenant_shard_id which is unknown to us: detach it.
    1174              :             let Some(node) = nodes.get(&node_id) else {
    1175              :                 // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
    1176              :                 // a location to clean up on a node that has since been removed.
    1177              :                 tracing::info!(
    1178              :                     "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
    1179              :                 );
    1180              :                 continue;
    1181              :             };
    1182              : 
    1183              :             if self.cancel.is_cancelled() {
    1184              :                 break;
    1185              :             }
    1186              : 
    1187              :             let client = PageserverClient::new(
    1188              :                 node.get_id(),
    1189              :                 self.http_client.clone(),
    1190              :                 node.base_url(),
    1191              :                 self.config.pageserver_jwt_token.as_deref(),
    1192              :             );
    1193              :             match client
    1194              :                 .location_config(
    1195              :                     tenant_shard_id,
    1196              :                     LocationConfig {
    1197              :                         mode: LocationConfigMode::Detached,
    1198              :                         generation: None,
    1199              :                         secondary_conf: None,
    1200              :                         shard_number: tenant_shard_id.shard_number.0,
    1201              :                         shard_count: tenant_shard_id.shard_count.literal(),
    1202              :                         shard_stripe_size: 0,
    1203              :                         tenant_conf: models::TenantConfig::default(),
    1204              :                     },
    1205              :                     None,
    1206              :                     false,
    1207              :                 )
    1208              :                 .await
    1209              :             {
    1210              :                 Ok(()) => {
    1211              :                     tracing::info!(
    1212              :                         "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
    1213              :                     );
    1214              :                 }
    1215              :                 Err(e) => {
    1216              :                     // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
    1217              :                     // break anything.
    1218              :                     tracing::error!(
    1219              :                         "Failed to detach unknown shard {tenant_shard_id} on pageserver {node_id}: {e}"
    1220              :                     );
    1221              :                 }
    1222              :             }
    1223              :         }
    1224              :     }
    1225              : 
    1226              :     /// Long running background task that periodically wakes up and looks for shards that need
    1227              :     /// reconciliation.  Reconciliation is fallible, so any reconciliation tasks that fail during
    1228              :     /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
    1229              :     /// for those retries.
    1230              :     #[instrument(skip_all)]
    1231              :     async fn background_reconcile(self: &Arc<Self>) {
    1232              :         self.startup_complete.clone().wait().await;
    1233              : 
    1234              :         const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
    1235              :         let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
    1236              :         while !self.reconcilers_cancel.is_cancelled() {
    1237              :             tokio::select! {
    1238              :               _ = interval.tick() => {
    1239              :                 let reconcile_all_result = self.reconcile_all();
    1240              :                 if reconcile_all_result.can_run_optimizations() {
    1241              :                     // Run optimizer only when we didn't find any other work to do
    1242              :                     self.optimize_all().await;
    1243              :                 }
    1244              :                 // Always attempt autosplits. Sharding is crucial for bulk ingest performance, so we
    1245              :                 // must be responsive when new projects begin ingesting and reach the threshold.
    1246              :                 self.autosplit_tenants().await;
    1247              :             }
    1248              :               _ = self.reconcilers_cancel.cancelled() => return
    1249              :             }
    1250              :         }
    1251              :     }
    1252              :     /// Heartbeat all storage nodes once in a while.
    1253              :     #[instrument(skip_all)]
    1254              :     async fn spawn_heartbeat_driver(self: &Arc<Self>) {
    1255              :         self.startup_complete.clone().wait().await;
    1256              : 
    1257              :         let mut interval = tokio::time::interval(self.config.heartbeat_interval);
    1258              :         while !self.cancel.is_cancelled() {
    1259              :             tokio::select! {
    1260              :               _ = interval.tick() => { }
    1261              :               _ = self.cancel.cancelled() => return
    1262              :             };
    1263              : 
    1264              :             let nodes = {
    1265              :                 let locked = self.inner.read().unwrap();
    1266              :                 locked.nodes.clone()
    1267              :             };
    1268              : 
    1269              :             let safekeepers = {
    1270              :                 let locked = self.inner.read().unwrap();
    1271              :                 locked.safekeepers.clone()
    1272              :             };
    1273              : 
    1274              :             let (res_ps, res_sk) = tokio::join!(
    1275              :                 self.heartbeater_ps.heartbeat(nodes),
    1276              :                 self.heartbeater_sk.heartbeat(safekeepers)
    1277              :             );
    1278              : 
    1279              :             if let Ok(deltas) = res_ps {
    1280              :                 let mut to_handle = Vec::default();
    1281              : 
    1282              :                 for (node_id, state) in deltas.0 {
    1283              :                     let new_availability = match state {
    1284              :                         PageserverState::Available { utilization, .. } => {
    1285              :                             NodeAvailability::Active(utilization)
    1286              :                         }
    1287              :                         PageserverState::WarmingUp { started_at } => {
    1288              :                             NodeAvailability::WarmingUp(started_at)
    1289              :                         }
    1290              :                         PageserverState::Offline => {
    1291              :                             // The node might have been placed in the WarmingUp state
    1292              :                             // while the heartbeat round was on-going. Hence, filter out
    1293              :                             // offline transitions for WarmingUp nodes that are still within
    1294              :                             // their grace period.
    1295              :                             if let Ok(NodeAvailability::WarmingUp(started_at)) = self
    1296              :                                 .get_node(node_id)
    1297              :                                 .await
    1298              :                                 .as_ref()
    1299            0 :                                 .map(|n| n.get_availability())
    1300              :                             {
    1301              :                                 let now = Instant::now();
    1302              :                                 if now - *started_at >= self.config.max_warming_up_interval {
    1303              :                                     NodeAvailability::Offline
    1304              :                                 } else {
    1305              :                                     NodeAvailability::WarmingUp(*started_at)
    1306              :                                 }
    1307              :                             } else {
    1308              :                                 NodeAvailability::Offline
    1309              :                             }
    1310              :                         }
    1311              :                     };
    1312              : 
    1313              :                     let node_lock = trace_exclusive_lock(
    1314              :                         &self.node_op_locks,
    1315              :                         node_id,
    1316              :                         NodeOperations::Configure,
    1317              :                     )
    1318              :                     .await;
    1319              : 
    1320              :                     pausable_failpoint!("heartbeat-pre-node-state-configure");
    1321              : 
    1322              :                     // This is the code path for geniune availability transitions (i.e node
    1323              :                     // goes unavailable and/or comes back online).
    1324              :                     let res = self
    1325              :                         .node_state_configure(node_id, Some(new_availability), None, &node_lock)
    1326              :                         .await;
    1327              : 
    1328              :                     match res {
    1329              :                         Ok(transition) => {
    1330              :                             // Keep hold of the lock until the availability transitions
    1331              :                             // have been handled in
    1332              :                             // [`Service::handle_node_availability_transitions`] in order avoid
    1333              :                             // racing with [`Service::external_node_configure`].
    1334              :                             to_handle.push((node_id, node_lock, transition));
    1335              :                         }
    1336              :                         Err(ApiError::NotFound(_)) => {
    1337              :                             // This should be rare, but legitimate since the heartbeats are done
    1338              :                             // on a snapshot of the nodes.
    1339              :                             tracing::info!("Node {} was not found after heartbeat round", node_id);
    1340              :                         }
    1341              :                         Err(ApiError::ShuttingDown) => {
    1342              :                             // No-op: we're shutting down, no need to try and update any nodes' statuses
    1343              :                         }
    1344              :                         Err(err) => {
    1345              :                             // Transition to active involves reconciling: if a node responds to a heartbeat then
    1346              :                             // becomes unavailable again, we may get an error here.
    1347              :                             tracing::error!(
    1348              :                                 "Failed to update node state {} after heartbeat round: {}",
    1349              :                                 node_id,
    1350              :                                 err
    1351              :                             );
    1352              :                         }
    1353              :                     }
    1354              :                 }
    1355              : 
    1356              :                 // We collected all the transitions above and now we handle them.
    1357              :                 let res = self.handle_node_availability_transitions(to_handle).await;
    1358              :                 if let Err(errs) = res {
    1359              :                     for (node_id, err) in errs {
    1360              :                         match err {
    1361              :                             ApiError::NotFound(_) => {
    1362              :                                 // This should be rare, but legitimate since the heartbeats are done
    1363              :                                 // on a snapshot of the nodes.
    1364              :                                 tracing::info!(
    1365              :                                     "Node {} was not found after heartbeat round",
    1366              :                                     node_id
    1367              :                                 );
    1368              :                             }
    1369              :                             err => {
    1370              :                                 tracing::error!(
    1371              :                                     "Failed to handle availability transition for {} after heartbeat round: {}",
    1372              :                                     node_id,
    1373              :                                     err
    1374              :                                 );
    1375              :                             }
    1376              :                         }
    1377              :                     }
    1378              :                 }
    1379              :             }
    1380              :             if let Ok(deltas) = res_sk {
    1381              :                 let mut to_activate = Vec::new();
    1382              :                 {
    1383              :                     let mut locked = self.inner.write().unwrap();
    1384              :                     let mut safekeepers = (*locked.safekeepers).clone();
    1385              : 
    1386              :                     for (id, state) in deltas.0 {
    1387              :                         let Some(sk) = safekeepers.get_mut(&id) else {
    1388              :                             tracing::info!(
    1389              :                                 "Couldn't update safekeeper safekeeper state for id {id} from heartbeat={state:?}"
    1390              :                             );
    1391              :                             continue;
    1392              :                         };
    1393              :                         if sk.scheduling_policy() == SkSchedulingPolicy::Activating
    1394              :                             && let SafekeeperState::Available { .. } = state
    1395              :                         {
    1396              :                             to_activate.push(id);
    1397              :                         }
    1398              :                         sk.set_availability(state);
    1399              :                     }
    1400              :                     locked.safekeepers = Arc::new(safekeepers);
    1401              :                 }
    1402              :                 for sk_id in to_activate {
    1403              :                     // TODO this can race with set_scheduling_policy (can create disjoint DB <-> in-memory state)
    1404              :                     tracing::info!("Activating safekeeper {sk_id}");
    1405              :                     match self.persistence.activate_safekeeper(sk_id.0 as i64).await {
    1406              :                         Ok(Some(())) => {}
    1407              :                         Ok(None) => {
    1408              :                             tracing::info!(
    1409              :                                 "safekeeper {sk_id} has been removed from db or has different scheduling policy than active or activating"
    1410              :                             );
    1411              :                         }
    1412              :                         Err(e) => {
    1413              :                             tracing::warn!("couldn't apply activation of {sk_id} to db: {e}");
    1414              :                             continue;
    1415              :                         }
    1416              :                     }
    1417              :                     if let Err(e) = self
    1418              :                         .set_safekeeper_scheduling_policy_in_mem(sk_id, SkSchedulingPolicy::Active)
    1419              :                         .await
    1420              :                     {
    1421              :                         tracing::info!("couldn't activate safekeeper {sk_id} in memory: {e}");
    1422              :                         continue;
    1423              :                     }
    1424              :                     tracing::info!("Activation of safekeeper {sk_id} done");
    1425              :                 }
    1426              :             }
    1427              :         }
    1428              :     }
    1429              : 
    1430              :     /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
    1431              :     /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
    1432              :     /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
    1433              :     /// will indicate that reconciliation is not needed.
    1434              :     #[instrument(skip_all, fields(
    1435              :         seq=%result.sequence,
    1436              :         tenant_id=%result.tenant_shard_id.tenant_id,
    1437              :         shard_id=%result.tenant_shard_id.shard_slug(),
    1438              :     ))]
    1439              :     fn process_result(&self, result: ReconcileResult) {
    1440              :         let mut locked = self.inner.write().unwrap();
    1441              :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    1442              :         let Some(tenant) = tenants.get_mut(&result.tenant_shard_id) else {
    1443              :             // A reconciliation result might race with removing a tenant: drop results for
    1444              :             // tenants that aren't in our map.
    1445              :             return;
    1446              :         };
    1447              : 
    1448              :         // Usually generation should only be updated via this path, so the max() isn't
    1449              :         // needed, but it is used to handle out-of-band updates via. e.g. test hook.
    1450              :         tenant.generation = std::cmp::max(tenant.generation, result.generation);
    1451              : 
    1452              :         // If the reconciler signals that it failed to notify compute, set this state on
    1453              :         // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
    1454              :         tenant.pending_compute_notification = result.pending_compute_notification;
    1455              : 
    1456              :         // Let the TenantShard know it is idle.
    1457              :         tenant.reconcile_complete(result.sequence);
    1458              : 
    1459              :         // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1460              :         // make to the tenant
    1461            0 :         let deltas = result.observed_deltas.into_iter().flat_map(|delta| {
    1462              :             // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1463              :             // make to the tenant
    1464            0 :             let node = nodes.get(delta.node_id())?;
    1465              : 
    1466            0 :             if node.is_available() {
    1467            0 :                 return Some(delta);
    1468            0 :             }
    1469              : 
    1470              :             // In case a node became unavailable concurrently with the reconcile, observed
    1471              :             // locations on it are now uncertain. By convention, set them to None in order
    1472              :             // for them to get refreshed when the node comes back online.
    1473            0 :             Some(ObservedStateDelta::Upsert(Box::new((
    1474            0 :                 node.get_id(),
    1475            0 :                 ObservedStateLocation { conf: None },
    1476            0 :             ))))
    1477            0 :         });
    1478              : 
    1479              :         match result.result {
    1480              :             Ok(()) => {
    1481              :                 tenant.consecutive_errors_count = 0;
    1482              :                 tenant.apply_observed_deltas(deltas);
    1483              :                 tenant.waiter.advance(result.sequence);
    1484              :             }
    1485              :             Err(e) => {
    1486              :                 match e {
    1487              :                     ReconcileError::Cancel => {
    1488              :                         tracing::info!("Reconciler was cancelled");
    1489              :                     }
    1490              :                     ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
    1491              :                         // This might be due to the reconciler getting cancelled, or it might
    1492              :                         // be due to the `Node` being marked offline.
    1493              :                         tracing::info!("Reconciler cancelled during pageserver API call");
    1494              :                     }
    1495              :                     _ => {
    1496              :                         tracing::warn!("Reconcile error: {}", e);
    1497              :                     }
    1498              :                 }
    1499              : 
    1500              :                 tenant.consecutive_errors_count = tenant.consecutive_errors_count.saturating_add(1);
    1501              : 
    1502              :                 // Ordering: populate last_error before advancing error_seq,
    1503              :                 // so that waiters will see the correct error after waiting.
    1504              :                 tenant.set_last_error(result.sequence, e);
    1505              : 
    1506              :                 // Skip deletions on reconcile failures
    1507              :                 let upsert_deltas =
    1508            0 :                     deltas.filter(|delta| matches!(delta, ObservedStateDelta::Upsert(_)));
    1509              :                 tenant.apply_observed_deltas(upsert_deltas);
    1510              :             }
    1511              :         }
    1512              : 
    1513              :         // If we just finished detaching all shards for a tenant, it might be time to drop it from memory.
    1514              :         if tenant.policy == PlacementPolicy::Detached {
    1515              :             // We may only drop a tenant from memory while holding the exclusive lock on the tenant ID: this protects us
    1516              :             // from concurrent execution wrt a request handler that might expect the tenant to remain in memory for the
    1517              :             // duration of the request.
    1518              :             let guard = self.tenant_op_locks.try_exclusive(
    1519              :                 tenant.tenant_shard_id.tenant_id,
    1520              :                 TenantOperations::DropDetached,
    1521              :             );
    1522              :             if let Some(guard) = guard {
    1523              :                 self.maybe_drop_tenant(tenant.tenant_shard_id.tenant_id, &mut locked, &guard);
    1524              :             }
    1525              :         }
    1526              : 
    1527              :         // Maybe some other work can proceed now that this job finished.
    1528              :         //
    1529              :         // Only bother with this if we have some semaphore units available in the normal-priority semaphore (these
    1530              :         // reconciles are scheduled at `[ReconcilerPriority::Normal]`).
    1531              :         if self.reconciler_concurrency.available_permits() > 0 {
    1532              :             while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
    1533              :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    1534              :                 if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
    1535              :                     shard.delayed_reconcile = false;
    1536              :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    1537              :                 }
    1538              : 
    1539              :                 if self.reconciler_concurrency.available_permits() == 0 {
    1540              :                     break;
    1541              :                 }
    1542              :             }
    1543              :         }
    1544              :     }
    1545              : 
    1546            0 :     async fn process_results(
    1547            0 :         &self,
    1548            0 :         mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResultRequest>,
    1549            0 :         mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
    1550            0 :             Result<(), (TenantShardId, NotifyError)>,
    1551            0 :         >,
    1552            0 :     ) {
    1553              :         loop {
    1554              :             // Wait for the next result, or for cancellation
    1555            0 :             tokio::select! {
    1556            0 :                 r = result_rx.recv() => {
    1557            0 :                     match r {
    1558            0 :                         Some(ReconcileResultRequest::ReconcileResult(result)) => {self.process_result(result);},
    1559            0 :                         None | Some(ReconcileResultRequest::Stop) => {break;}
    1560              :                     }
    1561              :                 }
    1562            0 :                 _ = async{
    1563            0 :                     match bg_compute_hook_result_rx.recv().await {
    1564            0 :                         Some(result) => {
    1565            0 :                             if let Err((tenant_shard_id, notify_error)) = result {
    1566            0 :                                 tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
    1567            0 :                                 let mut locked = self.inner.write().unwrap();
    1568            0 :                                 if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
    1569            0 :                                     shard.pending_compute_notification = true;
    1570            0 :                                 }
    1571              : 
    1572            0 :                             }
    1573              :                         },
    1574              :                         None => {
    1575              :                             // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
    1576            0 :                             self.cancel.cancelled().await;
    1577              :                         }
    1578              :                     }
    1579            0 :                 } => {},
    1580            0 :                 _ = self.cancel.cancelled() => {
    1581            0 :                     break;
    1582              :                 }
    1583              :             };
    1584              :         }
    1585            0 :     }
    1586              : 
    1587            0 :     async fn process_aborts(
    1588            0 :         &self,
    1589            0 :         mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
    1590            0 :     ) {
    1591              :         loop {
    1592              :             // Wait for the next result, or for cancellation
    1593            0 :             let op = tokio::select! {
    1594            0 :                 r = abort_rx.recv() => {
    1595            0 :                     match r {
    1596            0 :                         Some(op) => {op},
    1597            0 :                         None => {break;}
    1598              :                     }
    1599              :                 }
    1600            0 :                 _ = self.cancel.cancelled() => {
    1601            0 :                     break;
    1602              :                 }
    1603              :             };
    1604              : 
    1605              :             // Retry until shutdown: we must keep this request object alive until it is properly
    1606              :             // processed, as it holds a lock guard that prevents other operations trying to do things
    1607              :             // to the tenant while it is in a weird part-split state.
    1608            0 :             while !self.reconcilers_cancel.is_cancelled() {
    1609            0 :                 match self.abort_tenant_shard_split(&op).await {
    1610            0 :                     Ok(_) => break,
    1611            0 :                     Err(e) => {
    1612            0 :                         tracing::warn!(
    1613            0 :                             "Failed to abort shard split on {}, will retry: {e}",
    1614              :                             op.tenant_id
    1615              :                         );
    1616              : 
    1617              :                         // If a node is unavailable, we hope that it has been properly marked Offline
    1618              :                         // when we retry, so that the abort op will succeed.  If the abort op is failing
    1619              :                         // for some other reason, we will keep retrying forever, or until a human notices
    1620              :                         // and does something about it (either fixing a pageserver or restarting the controller).
    1621            0 :                         tokio::time::timeout(
    1622            0 :                             Duration::from_secs(5),
    1623            0 :                             self.reconcilers_cancel.cancelled(),
    1624            0 :                         )
    1625            0 :                         .await
    1626            0 :                         .ok();
    1627              :                     }
    1628              :                 }
    1629              :             }
    1630              :         }
    1631            0 :     }
    1632              : 
    1633            0 :     pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
    1634            0 :         let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
    1635            0 :         let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
    1636              : 
    1637            0 :         let leadership_cancel = CancellationToken::new();
    1638            0 :         let leadership = Leadership::new(persistence.clone(), config.clone(), leadership_cancel);
    1639            0 :         let (leader, leader_step_down_state) = leadership.step_down_current_leader().await?;
    1640              : 
    1641              :         // Apply the migrations **after** the current leader has stepped down
    1642              :         // (or we've given up waiting for it), but **before** reading from the
    1643              :         // database. The only exception is reading the current leader before
    1644              :         // migrating.
    1645            0 :         persistence.migration_run().await?;
    1646              : 
    1647            0 :         tracing::info!("Loading nodes from database...");
    1648            0 :         let nodes = persistence
    1649            0 :             .list_nodes()
    1650            0 :             .await?
    1651            0 :             .into_iter()
    1652            0 :             .map(|x| Node::from_persistent(x, config.use_https_pageserver_api))
    1653            0 :             .collect::<anyhow::Result<Vec<Node>>>()?;
    1654            0 :         let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
    1655            0 :         tracing::info!("Loaded {} nodes from database.", nodes.len());
    1656            0 :         metrics::METRICS_REGISTRY
    1657            0 :             .metrics_group
    1658            0 :             .storage_controller_pageserver_nodes
    1659            0 :             .set(nodes.len() as i64);
    1660            0 :         metrics::METRICS_REGISTRY
    1661            0 :             .metrics_group
    1662            0 :             .storage_controller_https_pageserver_nodes
    1663            0 :             .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    1664              : 
    1665            0 :         tracing::info!("Loading safekeepers from database...");
    1666            0 :         let safekeepers = persistence
    1667            0 :             .list_safekeepers()
    1668            0 :             .await?
    1669            0 :             .into_iter()
    1670            0 :             .map(|skp| {
    1671            0 :                 Safekeeper::from_persistence(
    1672            0 :                     skp,
    1673            0 :                     CancellationToken::new(),
    1674            0 :                     config.use_https_safekeeper_api,
    1675              :                 )
    1676            0 :             })
    1677            0 :             .collect::<anyhow::Result<Vec<_>>>()?;
    1678            0 :         let safekeepers: HashMap<NodeId, Safekeeper> =
    1679            0 :             safekeepers.into_iter().map(|n| (n.get_id(), n)).collect();
    1680            0 :         tracing::info!("Loaded {} safekeepers from database.", safekeepers.len());
    1681            0 :         metrics::METRICS_REGISTRY
    1682            0 :             .metrics_group
    1683            0 :             .storage_controller_safekeeper_nodes
    1684            0 :             .set(safekeepers.len() as i64);
    1685            0 :         metrics::METRICS_REGISTRY
    1686            0 :             .metrics_group
    1687            0 :             .storage_controller_https_safekeeper_nodes
    1688            0 :             .set(safekeepers.values().filter(|s| s.has_https_port()).count() as i64);
    1689              : 
    1690            0 :         tracing::info!("Loading shards from database...");
    1691            0 :         let mut tenant_shard_persistence = persistence.load_active_tenant_shards().await?;
    1692            0 :         tracing::info!(
    1693            0 :             "Loaded {} shards from database.",
    1694            0 :             tenant_shard_persistence.len()
    1695              :         );
    1696              : 
    1697              :         // If any shard splits were in progress, reset the database state to abort them
    1698            0 :         let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
    1699            0 :             HashMap::new();
    1700            0 :         for tsp in &mut tenant_shard_persistence {
    1701            0 :             let shard = tsp.get_shard_identity()?;
    1702            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1703            0 :             let entry = tenant_shard_count_min_max
    1704            0 :                 .entry(tenant_shard_id.tenant_id)
    1705            0 :                 .or_insert_with(|| (shard.count, shard.count));
    1706            0 :             entry.0 = std::cmp::min(entry.0, shard.count);
    1707            0 :             entry.1 = std::cmp::max(entry.1, shard.count);
    1708              :         }
    1709              : 
    1710            0 :         for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
    1711            0 :             if count_min != count_max {
    1712              :                 // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
    1713              :                 // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
    1714              :                 // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
    1715            0 :                 tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
    1716            0 :                 let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
    1717              : 
    1718              :                 // We may never see the Complete status here: if the split was complete, we wouldn't have
    1719              :                 // identified this tenant has having mismatching min/max counts.
    1720            0 :                 assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
    1721              : 
    1722              :                 // Clear the splitting status in-memory, to reflect that we just aborted in the database
    1723            0 :                 tenant_shard_persistence.iter_mut().for_each(|tsp| {
    1724              :                     // Set idle split state on those shards that we will retain.
    1725            0 :                     let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
    1726            0 :                     if tsp_tenant_id == tenant_id
    1727            0 :                         && tsp.get_shard_identity().unwrap().count == count_min
    1728            0 :                     {
    1729            0 :                         tsp.splitting = SplitState::Idle;
    1730            0 :                     } else if tsp_tenant_id == tenant_id {
    1731              :                         // Leave the splitting state on the child shards: this will be used next to
    1732              :                         // drop them.
    1733            0 :                         tracing::info!(
    1734            0 :                             "Shard {tsp_tenant_id} will be dropped after shard split abort",
    1735              :                         );
    1736            0 :                     }
    1737            0 :                 });
    1738              : 
    1739              :                 // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
    1740            0 :                 tenant_shard_persistence.retain(|tsp| {
    1741            0 :                     TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
    1742            0 :                         || tsp.splitting == SplitState::Idle
    1743            0 :                 });
    1744            0 :             }
    1745              :         }
    1746              : 
    1747            0 :         let mut tenants = BTreeMap::new();
    1748              : 
    1749            0 :         let mut scheduler = Scheduler::new(nodes.values());
    1750              : 
    1751              :         #[cfg(feature = "testing")]
    1752              :         {
    1753              :             use pageserver_api::controller_api::AvailabilityZone;
    1754              : 
    1755              :             // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
    1756              :             // tests only store the shards, not the nodes.  The nodes will be loaded shortly
    1757              :             // after when pageservers start up and register.
    1758            0 :             let mut node_ids = HashSet::new();
    1759            0 :             for tsp in &tenant_shard_persistence {
    1760            0 :                 if let Some(node_id) = tsp.generation_pageserver {
    1761            0 :                     node_ids.insert(node_id);
    1762            0 :                 }
    1763              :             }
    1764            0 :             for node_id in node_ids {
    1765            0 :                 tracing::info!("Creating node {} in scheduler for tests", node_id);
    1766            0 :                 let node = Node::new(
    1767            0 :                     NodeId(node_id as u64),
    1768            0 :                     "".to_string(),
    1769              :                     123,
    1770            0 :                     None,
    1771            0 :                     "".to_string(),
    1772              :                     123,
    1773            0 :                     None,
    1774            0 :                     None,
    1775            0 :                     AvailabilityZone("test_az".to_string()),
    1776              :                     false,
    1777              :                 )
    1778            0 :                 .unwrap();
    1779              : 
    1780            0 :                 scheduler.node_upsert(&node);
    1781              :             }
    1782              :         }
    1783            0 :         for tsp in tenant_shard_persistence {
    1784            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1785              : 
    1786              :             // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
    1787              :             // it with what we can infer: the node for which a generation was most recently issued.
    1788            0 :             let mut intent = IntentState::new(
    1789            0 :                 tsp.preferred_az_id
    1790            0 :                     .as_ref()
    1791            0 :                     .map(|az| AvailabilityZone(az.clone())),
    1792              :             );
    1793            0 :             if let Some(generation_pageserver) = tsp.generation_pageserver.map(|n| NodeId(n as u64))
    1794              :             {
    1795            0 :                 if nodes.contains_key(&generation_pageserver) {
    1796            0 :                     intent.set_attached(&mut scheduler, Some(generation_pageserver));
    1797            0 :                 } else {
    1798              :                     // If a node was removed before being completely drained, it is legal for it to leave behind a `generation_pageserver` referring
    1799              :                     // to a non-existent node, because node deletion doesn't block on completing the reconciliations that will issue new generations
    1800              :                     // on different pageservers.
    1801            0 :                     tracing::warn!(
    1802            0 :                         "Tenant shard {tenant_shard_id} references non-existent node {generation_pageserver} in database, will be rescheduled"
    1803              :                     );
    1804              :                 }
    1805            0 :             }
    1806            0 :             let new_tenant = TenantShard::from_persistent(tsp, intent)?;
    1807              : 
    1808            0 :             tenants.insert(tenant_shard_id, new_tenant);
    1809              :         }
    1810              : 
    1811            0 :         let (startup_completion, startup_complete) = utils::completion::channel();
    1812              : 
    1813              :         // This channel is continuously consumed by process_results, so doesn't need to be very large.
    1814            0 :         let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
    1815            0 :             tokio::sync::mpsc::channel(512);
    1816              : 
    1817            0 :         let (delayed_reconcile_tx, delayed_reconcile_rx) =
    1818            0 :             tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
    1819              : 
    1820            0 :         let cancel = CancellationToken::new();
    1821            0 :         let reconcilers_cancel = cancel.child_token();
    1822              : 
    1823            0 :         let mut http_client = reqwest::Client::builder();
    1824              :         // We intentionally disable the connection pool, so every request will create its own TCP connection.
    1825              :         // It's especially important for heartbeaters to notice more network problems.
    1826              :         //
    1827              :         // TODO: It makes sense to use this client only in heartbeaters and create a second one with
    1828              :         // connection pooling for everything else. But reqwest::Client may create a connection without
    1829              :         // ever using it (it uses hyper's Client under the hood):
    1830              :         // https://github.com/hyperium/hyper-util/blob/d51318df3461d40e5f5e5ca163cb3905ac960209/src/client/legacy/client.rs#L415
    1831              :         //
    1832              :         // Because of a bug in hyper0::Connection::graceful_shutdown such connections hang during
    1833              :         // graceful server shutdown: https://github.com/hyperium/hyper/issues/2730
    1834              :         //
    1835              :         // The bug has been fixed in hyper v1, so keep alive may be enabled only after we migrate to hyper1.
    1836            0 :         http_client = http_client.pool_max_idle_per_host(0);
    1837            0 :         for ssl_ca_cert in &config.ssl_ca_certs {
    1838            0 :             http_client = http_client.add_root_certificate(ssl_ca_cert.clone());
    1839            0 :         }
    1840            0 :         let http_client = http_client.build()?;
    1841              : 
    1842            0 :         let heartbeater_ps = Heartbeater::new(
    1843            0 :             http_client.clone(),
    1844            0 :             config.pageserver_jwt_token.clone(),
    1845            0 :             config.max_offline_interval,
    1846            0 :             config.max_warming_up_interval,
    1847            0 :             cancel.clone(),
    1848              :         );
    1849              : 
    1850            0 :         let heartbeater_sk = Heartbeater::new(
    1851            0 :             http_client.clone(),
    1852            0 :             config.safekeeper_jwt_token.clone(),
    1853            0 :             config.max_offline_interval,
    1854            0 :             config.max_warming_up_interval,
    1855            0 :             cancel.clone(),
    1856              :         );
    1857              : 
    1858            0 :         let initial_leadership_status = if config.start_as_candidate {
    1859            0 :             LeadershipStatus::Candidate
    1860              :         } else {
    1861            0 :             LeadershipStatus::Leader
    1862              :         };
    1863              : 
    1864            0 :         let this = Arc::new(Self {
    1865            0 :             inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
    1866            0 :                 nodes,
    1867            0 :                 safekeepers,
    1868            0 :                 tenants,
    1869            0 :                 scheduler,
    1870            0 :                 delayed_reconcile_rx,
    1871            0 :                 initial_leadership_status,
    1872            0 :                 reconcilers_cancel.clone(),
    1873              :             ))),
    1874            0 :             config: config.clone(),
    1875            0 :             persistence,
    1876            0 :             compute_hook: Arc::new(ComputeHook::new(config.clone())?),
    1877            0 :             result_tx,
    1878            0 :             heartbeater_ps,
    1879            0 :             heartbeater_sk,
    1880            0 :             reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1881            0 :                 config.reconciler_concurrency,
    1882              :             )),
    1883            0 :             priority_reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1884            0 :                 config.priority_reconciler_concurrency,
    1885              :             )),
    1886            0 :             delayed_reconcile_tx,
    1887            0 :             abort_tx,
    1888            0 :             startup_complete: startup_complete.clone(),
    1889            0 :             cancel,
    1890            0 :             reconcilers_cancel,
    1891            0 :             gate: Gate::default(),
    1892            0 :             reconcilers_gate: Gate::default(),
    1893            0 :             tenant_op_locks: Default::default(),
    1894            0 :             node_op_locks: Default::default(),
    1895            0 :             http_client,
    1896            0 :             step_down_barrier: Default::default(),
    1897              :         });
    1898              : 
    1899            0 :         let result_task_this = this.clone();
    1900            0 :         tokio::task::spawn(async move {
    1901              :             // Block shutdown until we're done (we must respect self.cancel)
    1902            0 :             if let Ok(_gate) = result_task_this.gate.enter() {
    1903            0 :                 result_task_this
    1904            0 :                     .process_results(result_rx, bg_compute_notify_result_rx)
    1905            0 :                     .await
    1906            0 :             }
    1907            0 :         });
    1908              : 
    1909            0 :         tokio::task::spawn({
    1910            0 :             let this = this.clone();
    1911            0 :             async move {
    1912              :                 // Block shutdown until we're done (we must respect self.cancel)
    1913            0 :                 if let Ok(_gate) = this.gate.enter() {
    1914            0 :                     this.process_aborts(abort_rx).await
    1915            0 :                 }
    1916            0 :             }
    1917              :         });
    1918              : 
    1919            0 :         tokio::task::spawn({
    1920            0 :             let this = this.clone();
    1921            0 :             async move {
    1922            0 :                 if let Ok(_gate) = this.gate.enter() {
    1923              :                     loop {
    1924            0 :                         tokio::select! {
    1925            0 :                             _ = this.cancel.cancelled() => {
    1926            0 :                                 break;
    1927              :                             },
    1928            0 :                             _ = tokio::time::sleep(Duration::from_secs(60)) => {}
    1929              :                         };
    1930            0 :                         this.tenant_op_locks.housekeeping();
    1931              :                     }
    1932            0 :                 }
    1933            0 :             }
    1934              :         });
    1935              : 
    1936            0 :         tokio::task::spawn({
    1937            0 :             let this = this.clone();
    1938              :             // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
    1939              :             // is done.
    1940            0 :             let startup_completion = startup_completion.clone();
    1941            0 :             async move {
    1942              :                 // Block shutdown until we're done (we must respect self.cancel)
    1943            0 :                 let Ok(_gate) = this.gate.enter() else {
    1944            0 :                     return;
    1945              :                 };
    1946              : 
    1947            0 :                 this.startup_reconcile(leader, leader_step_down_state, bg_compute_notify_result_tx)
    1948            0 :                     .await;
    1949              : 
    1950            0 :                 drop(startup_completion);
    1951            0 :             }
    1952              :         });
    1953              : 
    1954            0 :         tokio::task::spawn({
    1955            0 :             let this = this.clone();
    1956            0 :             let startup_complete = startup_complete.clone();
    1957            0 :             async move {
    1958            0 :                 startup_complete.wait().await;
    1959            0 :                 this.background_reconcile().await;
    1960            0 :             }
    1961              :         });
    1962              : 
    1963            0 :         tokio::task::spawn({
    1964            0 :             let this = this.clone();
    1965            0 :             let startup_complete = startup_complete.clone();
    1966            0 :             async move {
    1967            0 :                 startup_complete.wait().await;
    1968            0 :                 this.spawn_heartbeat_driver().await;
    1969            0 :             }
    1970              :         });
    1971              : 
    1972            0 :         Ok(this)
    1973            0 :     }
    1974              : 
    1975            0 :     pub(crate) async fn attach_hook(
    1976            0 :         &self,
    1977            0 :         attach_req: AttachHookRequest,
    1978            0 :     ) -> anyhow::Result<AttachHookResponse> {
    1979            0 :         let _tenant_lock = trace_exclusive_lock(
    1980            0 :             &self.tenant_op_locks,
    1981            0 :             attach_req.tenant_shard_id.tenant_id,
    1982            0 :             TenantOperations::AttachHook,
    1983            0 :         )
    1984            0 :         .await;
    1985              : 
    1986              :         // This is a test hook.  To enable using it on tenants that were created directly with
    1987              :         // the pageserver API (not via this service), we will auto-create any missing tenant
    1988              :         // shards with default state.
    1989            0 :         let insert = {
    1990            0 :             match self
    1991            0 :                 .maybe_load_tenant(attach_req.tenant_shard_id.tenant_id, &_tenant_lock)
    1992            0 :                 .await
    1993              :             {
    1994            0 :                 Ok(_) => false,
    1995            0 :                 Err(ApiError::NotFound(_)) => true,
    1996            0 :                 Err(e) => return Err(e.into()),
    1997              :             }
    1998              :         };
    1999              : 
    2000            0 :         if insert {
    2001            0 :             let config = attach_req.config.clone().unwrap_or_default();
    2002            0 :             let tsp = TenantShardPersistence {
    2003            0 :                 tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
    2004            0 :                 shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
    2005            0 :                 shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
    2006            0 :                 shard_stripe_size: 0,
    2007            0 :                 generation: attach_req.generation_override.or(Some(0)),
    2008            0 :                 generation_pageserver: None,
    2009            0 :                 placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
    2010            0 :                 config: serde_json::to_string(&config).unwrap(),
    2011            0 :                 splitting: SplitState::default(),
    2012            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    2013            0 :                     .unwrap(),
    2014            0 :                 preferred_az_id: None,
    2015            0 :             };
    2016              : 
    2017            0 :             match self.persistence.insert_tenant_shards(vec![tsp]).await {
    2018            0 :                 Err(e) => match e {
    2019              :                     DatabaseError::Query(diesel::result::Error::DatabaseError(
    2020              :                         DatabaseErrorKind::UniqueViolation,
    2021              :                         _,
    2022              :                     )) => {
    2023            0 :                         tracing::info!(
    2024            0 :                             "Raced with another request to insert tenant {}",
    2025              :                             attach_req.tenant_shard_id
    2026              :                         )
    2027              :                     }
    2028            0 :                     _ => return Err(e.into()),
    2029              :                 },
    2030              :                 Ok(()) => {
    2031            0 :                     tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
    2032              : 
    2033            0 :                     let mut shard = TenantShard::new(
    2034            0 :                         attach_req.tenant_shard_id,
    2035            0 :                         ShardIdentity::unsharded(),
    2036            0 :                         PlacementPolicy::Attached(0),
    2037            0 :                         None,
    2038              :                     );
    2039            0 :                     shard.config = config;
    2040              : 
    2041            0 :                     let mut locked = self.inner.write().unwrap();
    2042            0 :                     locked.tenants.insert(attach_req.tenant_shard_id, shard);
    2043            0 :                     tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
    2044              :                 }
    2045              :             }
    2046            0 :         }
    2047              : 
    2048            0 :         let new_generation = if let Some(req_node_id) = attach_req.node_id {
    2049            0 :             let maybe_tenant_conf = {
    2050            0 :                 let locked = self.inner.write().unwrap();
    2051            0 :                 locked
    2052            0 :                     .tenants
    2053            0 :                     .get(&attach_req.tenant_shard_id)
    2054            0 :                     .map(|t| t.config.clone())
    2055              :             };
    2056              : 
    2057            0 :             match maybe_tenant_conf {
    2058            0 :                 Some(conf) => {
    2059            0 :                     let new_generation = self
    2060            0 :                         .persistence
    2061            0 :                         .increment_generation(attach_req.tenant_shard_id, req_node_id)
    2062            0 :                         .await?;
    2063              : 
    2064              :                     // Persist the placement policy update. This is required
    2065              :                     // when we reattaching a detached tenant.
    2066            0 :                     self.persistence
    2067            0 :                         .update_tenant_shard(
    2068            0 :                             TenantFilter::Shard(attach_req.tenant_shard_id),
    2069            0 :                             Some(PlacementPolicy::Attached(0)),
    2070            0 :                             Some(conf),
    2071            0 :                             None,
    2072            0 :                             None,
    2073            0 :                         )
    2074            0 :                         .await?;
    2075            0 :                     Some(new_generation)
    2076              :                 }
    2077              :                 None => {
    2078            0 :                     anyhow::bail!("Attach hook handling raced with tenant removal")
    2079              :                 }
    2080              :             }
    2081              :         } else {
    2082            0 :             self.persistence.detach(attach_req.tenant_shard_id).await?;
    2083            0 :             None
    2084              :         };
    2085              : 
    2086            0 :         let mut locked = self.inner.write().unwrap();
    2087            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    2088              : 
    2089            0 :         let tenant_shard = tenants
    2090            0 :             .get_mut(&attach_req.tenant_shard_id)
    2091            0 :             .expect("Checked for existence above");
    2092              : 
    2093            0 :         if let Some(new_generation) = new_generation {
    2094            0 :             tenant_shard.generation = Some(new_generation);
    2095            0 :             tenant_shard.policy = PlacementPolicy::Attached(0);
    2096            0 :         } else {
    2097              :             // This is a detach notification.  We must update placement policy to avoid re-attaching
    2098              :             // during background scheduling/reconciliation, or during storage controller restart.
    2099            0 :             assert!(attach_req.node_id.is_none());
    2100            0 :             tenant_shard.policy = PlacementPolicy::Detached;
    2101              :         }
    2102              : 
    2103            0 :         if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
    2104            0 :             tracing::info!(
    2105              :                 tenant_id = %attach_req.tenant_shard_id,
    2106              :                 ps_id = %attaching_pageserver,
    2107              :                 generation = ?tenant_shard.generation,
    2108            0 :                 "issuing",
    2109              :             );
    2110            0 :         } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
    2111            0 :             tracing::info!(
    2112              :                 tenant_id = %attach_req.tenant_shard_id,
    2113              :                 %ps_id,
    2114              :                 generation = ?tenant_shard.generation,
    2115            0 :                 "dropping",
    2116              :             );
    2117              :         } else {
    2118            0 :             tracing::info!(
    2119              :             tenant_id = %attach_req.tenant_shard_id,
    2120            0 :             "no-op: tenant already has no pageserver");
    2121              :         }
    2122            0 :         tenant_shard
    2123            0 :             .intent
    2124            0 :             .set_attached(scheduler, attach_req.node_id);
    2125              : 
    2126            0 :         tracing::info!(
    2127            0 :             "attach_hook: tenant {} set generation {:?}, pageserver {}, config {:?}",
    2128              :             attach_req.tenant_shard_id,
    2129              :             tenant_shard.generation,
    2130              :             // TODO: this is an odd number of 0xf's
    2131            0 :             attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff)),
    2132              :             attach_req.config,
    2133              :         );
    2134              : 
    2135              :         // Trick the reconciler into not doing anything for this tenant: this helps
    2136              :         // tests that manually configure a tenant on the pagesrever, and then call this
    2137              :         // attach hook: they don't want background reconciliation to modify what they
    2138              :         // did to the pageserver.
    2139              :         #[cfg(feature = "testing")]
    2140              :         {
    2141            0 :             if let Some(node_id) = attach_req.node_id {
    2142            0 :                 tenant_shard.observed.locations = HashMap::from([(
    2143            0 :                     node_id,
    2144            0 :                     ObservedStateLocation {
    2145            0 :                         conf: Some(attached_location_conf(
    2146            0 :                             tenant_shard.generation.unwrap(),
    2147            0 :                             &tenant_shard.shard,
    2148            0 :                             &tenant_shard.config,
    2149            0 :                             &PlacementPolicy::Attached(0),
    2150            0 :                             tenant_shard.intent.get_secondary().len(),
    2151            0 :                         )),
    2152            0 :                     },
    2153            0 :                 )]);
    2154            0 :             } else {
    2155            0 :                 tenant_shard.observed.locations.clear();
    2156            0 :             }
    2157              :         }
    2158              : 
    2159              :         Ok(AttachHookResponse {
    2160            0 :             generation: attach_req
    2161            0 :                 .node_id
    2162            0 :                 .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
    2163              :         })
    2164            0 :     }
    2165              : 
    2166            0 :     pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
    2167            0 :         let locked = self.inner.read().unwrap();
    2168              : 
    2169            0 :         let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
    2170              : 
    2171              :         InspectResponse {
    2172            0 :             attachment: tenant_shard.and_then(|s| {
    2173            0 :                 s.intent
    2174            0 :                     .get_attached()
    2175            0 :                     .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
    2176            0 :             }),
    2177              :         }
    2178            0 :     }
    2179              : 
    2180              :     // When the availability state of a node transitions to active, we must do a full reconciliation
    2181              :     // of LocationConfigs on that node.  This is because while a node was offline:
    2182              :     // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
    2183              :     // - aborting a tenant shard split might have left rogue child shards behind on this node.
    2184              :     //
    2185              :     // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
    2186              :     // Reconcilers might communicate with the node, and these must not overlap with the work we do in
    2187              :     // this function.
    2188              :     //
    2189              :     // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
    2190              :     // for written for a single node rather than as a batch job for all nodes.
    2191              :     #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
    2192              :     async fn node_activate_reconcile(
    2193              :         &self,
    2194              :         mut node: Node,
    2195              :         _lock: &TracingExclusiveGuard<NodeOperations>,
    2196              :     ) -> Result<(), ApiError> {
    2197              :         // This Node is a mutable local copy: we will set it active so that we can use its
    2198              :         // API client to reconcile with the node.  The Node in [`Self::nodes`] will get updated
    2199              :         // later.
    2200              :         node.set_availability(NodeAvailability::Active(PageserverUtilization::full()));
    2201              : 
    2202              :         let configs = match node
    2203              :             .with_client_retries(
    2204            0 :                 |client| async move { client.list_location_config().await },
    2205              :                 &self.http_client,
    2206              :                 &self.config.pageserver_jwt_token,
    2207              :                 1,
    2208              :                 5,
    2209              :                 SHORT_RECONCILE_TIMEOUT,
    2210              :                 &self.cancel,
    2211              :             )
    2212              :             .await
    2213              :         {
    2214              :             None => {
    2215              :                 // We're shutting down (the Node's cancellation token can't have fired, because
    2216              :                 // we're the only scope that has a reference to it, and we didn't fire it).
    2217              :                 return Err(ApiError::ShuttingDown);
    2218              :             }
    2219              :             Some(Err(e)) => {
    2220              :                 // This node didn't succeed listing its locations: it may not proceed to active state
    2221              :                 // as it is apparently unavailable.
    2222              :                 return Err(ApiError::PreconditionFailed(
    2223              :                     format!("Failed to query node location configs, cannot activate ({e})").into(),
    2224              :                 ));
    2225              :             }
    2226              :             Some(Ok(configs)) => configs,
    2227              :         };
    2228              :         tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
    2229              : 
    2230              :         let mut cleanup = Vec::new();
    2231              :         let mut mismatched_locations = 0;
    2232              :         {
    2233              :             let mut locked = self.inner.write().unwrap();
    2234              : 
    2235              :             for (tenant_shard_id, reported) in configs.tenant_shards {
    2236              :                 let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
    2237              :                     cleanup.push(tenant_shard_id);
    2238              :                     continue;
    2239              :                 };
    2240              : 
    2241              :                 let on_record = &mut tenant_shard
    2242              :                     .observed
    2243              :                     .locations
    2244              :                     .entry(node.get_id())
    2245            0 :                     .or_insert_with(|| ObservedStateLocation { conf: None })
    2246              :                     .conf;
    2247              : 
    2248              :                 // If the location reported by the node does not match our observed state,
    2249              :                 // then we mark it as uncertain and let the background reconciliation loop
    2250              :                 // deal with it.
    2251              :                 //
    2252              :                 // Note that this also covers net new locations reported by the node.
    2253              :                 if *on_record != reported {
    2254              :                     mismatched_locations += 1;
    2255              :                     *on_record = None;
    2256              :                 }
    2257              :             }
    2258              :         }
    2259              : 
    2260              :         if mismatched_locations > 0 {
    2261              :             tracing::info!(
    2262              :                 "Set observed state to None for {mismatched_locations} mismatched locations"
    2263              :             );
    2264              :         }
    2265              : 
    2266              :         for tenant_shard_id in cleanup {
    2267              :             tracing::info!("Detaching {tenant_shard_id}");
    2268              :             match node
    2269              :                 .with_client_retries(
    2270            0 :                     |client| async move {
    2271            0 :                         let config = LocationConfig {
    2272            0 :                             mode: LocationConfigMode::Detached,
    2273            0 :                             generation: None,
    2274            0 :                             secondary_conf: None,
    2275            0 :                             shard_number: tenant_shard_id.shard_number.0,
    2276            0 :                             shard_count: tenant_shard_id.shard_count.literal(),
    2277            0 :                             shard_stripe_size: 0,
    2278            0 :                             tenant_conf: models::TenantConfig::default(),
    2279            0 :                         };
    2280            0 :                         client
    2281            0 :                             .location_config(tenant_shard_id, config, None, false)
    2282            0 :                             .await
    2283            0 :                     },
    2284              :                     &self.http_client,
    2285              :                     &self.config.pageserver_jwt_token,
    2286              :                     1,
    2287              :                     5,
    2288              :                     SHORT_RECONCILE_TIMEOUT,
    2289              :                     &self.cancel,
    2290              :                 )
    2291              :                 .await
    2292              :             {
    2293              :                 None => {
    2294              :                     // We're shutting down (the Node's cancellation token can't have fired, because
    2295              :                     // we're the only scope that has a reference to it, and we didn't fire it).
    2296              :                     return Err(ApiError::ShuttingDown);
    2297              :                 }
    2298              :                 Some(Err(e)) => {
    2299              :                     // Do not let the node proceed to Active state if it is not responsive to requests
    2300              :                     // to detach.  This could happen if e.g. a shutdown bug in the pageserver is preventing
    2301              :                     // detach completing: we should not let this node back into the set of nodes considered
    2302              :                     // okay for scheduling.
    2303              :                     return Err(ApiError::Conflict(format!(
    2304              :                         "Node {node} failed to detach {tenant_shard_id}: {e}"
    2305              :                     )));
    2306              :                 }
    2307              :                 Some(Ok(_)) => {}
    2308              :             };
    2309              :         }
    2310              : 
    2311              :         Ok(())
    2312              :     }
    2313              : 
    2314            0 :     pub(crate) async fn re_attach(
    2315            0 :         &self,
    2316            0 :         reattach_req: ReAttachRequest,
    2317            0 :     ) -> Result<ReAttachResponse, ApiError> {
    2318            0 :         if let Some(register_req) = reattach_req.register {
    2319            0 :             self.node_register(register_req).await?;
    2320            0 :         }
    2321              : 
    2322              :         // Ordering: we must persist generation number updates before making them visible in the in-memory state
    2323            0 :         let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
    2324              : 
    2325            0 :         tracing::info!(
    2326              :             node_id=%reattach_req.node_id,
    2327            0 :             "Incremented {} tenant shards' generations",
    2328            0 :             incremented_generations.len()
    2329              :         );
    2330              : 
    2331              :         // Apply the updated generation to our in-memory state, and
    2332              :         // gather discover secondary locations.
    2333            0 :         let mut locked = self.inner.write().unwrap();
    2334            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    2335              : 
    2336            0 :         let mut response = ReAttachResponse {
    2337            0 :             tenants: Vec::new(),
    2338            0 :         };
    2339              : 
    2340              :         // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
    2341              :         // to call location_conf API with an old generation.  Wait for cancellation to complete
    2342              :         // before responding to this request.  Requires well implemented CancellationToken logic
    2343              :         // all the way to where we call location_conf.  Even then, there can still be a location_conf
    2344              :         // request in flight over the network: TODO handle that by making location_conf API refuse
    2345              :         // to go backward in generations.
    2346              : 
    2347              :         // Scan through all shards, applying updates for ones where we updated generation
    2348              :         // and identifying shards that intend to have a secondary location on this node.
    2349            0 :         for (tenant_shard_id, shard) in tenants {
    2350            0 :             if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
    2351            0 :                 let new_gen = *new_gen;
    2352            0 :                 response.tenants.push(ReAttachResponseTenant {
    2353            0 :                     id: *tenant_shard_id,
    2354            0 :                     r#gen: Some(new_gen.into().unwrap()),
    2355            0 :                     // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
    2356            0 :                     // execution.  If a pageserver is restarted during that process, then the reconcile pass will
    2357            0 :                     // fail, and start from scratch, so it doesn't make sense for us to try and preserve
    2358            0 :                     // the stale/multi states at this point.
    2359            0 :                     mode: LocationConfigMode::AttachedSingle,
    2360            0 :                     stripe_size: shard.shard.stripe_size,
    2361            0 :                 });
    2362              : 
    2363            0 :                 shard.generation = std::cmp::max(shard.generation, Some(new_gen));
    2364            0 :                 if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
    2365              :                     // Why can we update `observed` even though we're not sure our response will be received
    2366              :                     // by the pageserver?  Because the pageserver will not proceed with startup until
    2367              :                     // it has processed response: if it loses it, we'll see another request and increment
    2368              :                     // generation again, avoiding any uncertainty about dirtiness of tenant's state.
    2369            0 :                     if let Some(conf) = observed.conf.as_mut() {
    2370            0 :                         conf.generation = new_gen.into();
    2371            0 :                     }
    2372            0 :                 } else {
    2373            0 :                     // This node has no observed state for the shard: perhaps it was offline
    2374            0 :                     // when the pageserver restarted.  Insert a None, so that the Reconciler
    2375            0 :                     // will be prompted to learn the location's state before it makes changes.
    2376            0 :                     shard
    2377            0 :                         .observed
    2378            0 :                         .locations
    2379            0 :                         .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
    2380            0 :                 }
    2381            0 :             } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
    2382            0 :                 // Ordering: pageserver will not accept /location_config requests until it has
    2383            0 :                 // finished processing the response from re-attach.  So we can update our in-memory state
    2384            0 :                 // now, and be confident that we are not stamping on the result of some later location config.
    2385            0 :                 // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
    2386            0 :                 // so we might update observed state here, and then get over-written by some racing
    2387            0 :                 // ReconcileResult.  The impact is low however, since we have set state on pageserver something
    2388            0 :                 // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
    2389            0 : 
    2390            0 :                 response.tenants.push(ReAttachResponseTenant {
    2391            0 :                     id: *tenant_shard_id,
    2392            0 :                     r#gen: None,
    2393            0 :                     mode: LocationConfigMode::Secondary,
    2394            0 :                     stripe_size: shard.shard.stripe_size,
    2395            0 :                 });
    2396            0 : 
    2397            0 :                 // We must not update observed, because we have no guarantee that our
    2398            0 :                 // response will be received by the pageserver. This could leave it
    2399            0 :                 // falsely dirty, but the resulting reconcile should be idempotent.
    2400            0 :             }
    2401              :         }
    2402              : 
    2403              :         // We consider a node Active once we have composed a re-attach response, but we
    2404              :         // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
    2405              :         // implicitly synchronizes the LocationConfigs on the node.
    2406              :         //
    2407              :         // Setting a node active unblocks any Reconcilers that might write to the location config API,
    2408              :         // but those requests will not be accepted by the node until it has finished processing
    2409              :         // the re-attach response.
    2410              :         //
    2411              :         // Additionally, reset the nodes scheduling policy to match the conditional update done
    2412              :         // in [`Persistence::re_attach`].
    2413            0 :         if let Some(node) = nodes.get(&reattach_req.node_id) {
    2414            0 :             let reset_scheduling = matches!(
    2415            0 :                 node.get_scheduling(),
    2416              :                 NodeSchedulingPolicy::PauseForRestart
    2417              :                     | NodeSchedulingPolicy::Draining
    2418              :                     | NodeSchedulingPolicy::Filling
    2419              :                     | NodeSchedulingPolicy::Deleting
    2420              :             );
    2421              : 
    2422            0 :             let mut new_nodes = (**nodes).clone();
    2423            0 :             if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
    2424            0 :                 if reset_scheduling {
    2425            0 :                     node.set_scheduling(NodeSchedulingPolicy::Active);
    2426            0 :                 }
    2427              : 
    2428            0 :                 tracing::info!("Marking {} warming-up on reattach", reattach_req.node_id);
    2429            0 :                 node.set_availability(NodeAvailability::WarmingUp(std::time::Instant::now()));
    2430              : 
    2431            0 :                 scheduler.node_upsert(node);
    2432            0 :                 let new_nodes = Arc::new(new_nodes);
    2433            0 :                 *nodes = new_nodes;
    2434              :             } else {
    2435            0 :                 tracing::error!(
    2436            0 :                     "Reattaching node {} was removed while processing the request",
    2437              :                     reattach_req.node_id
    2438              :                 );
    2439              :             }
    2440            0 :         }
    2441              : 
    2442            0 :         Ok(response)
    2443            0 :     }
    2444              : 
    2445            0 :     pub(crate) async fn validate(
    2446            0 :         &self,
    2447            0 :         validate_req: ValidateRequest,
    2448            0 :     ) -> Result<ValidateResponse, DatabaseError> {
    2449              :         // Fast in-memory check: we may reject validation on anything that doesn't match our
    2450              :         // in-memory generation for a shard
    2451            0 :         let in_memory_result = {
    2452            0 :             let mut in_memory_result = Vec::new();
    2453            0 :             let locked = self.inner.read().unwrap();
    2454            0 :             for req_tenant in validate_req.tenants {
    2455            0 :                 if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
    2456            0 :                     let valid = tenant_shard.generation == Some(Generation::new(req_tenant.r#gen));
    2457            0 :                     tracing::info!(
    2458            0 :                         "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
    2459              :                         req_tenant.id,
    2460              :                         req_tenant.r#gen,
    2461              :                         tenant_shard.generation
    2462              :                     );
    2463              : 
    2464            0 :                     in_memory_result.push((
    2465            0 :                         req_tenant.id,
    2466            0 :                         Generation::new(req_tenant.r#gen),
    2467            0 :                         valid,
    2468            0 :                     ));
    2469              :                 } else {
    2470              :                     // This is legal: for example during a shard split the pageserver may still
    2471              :                     // have deletions in its queue from the old pre-split shard, or after deletion
    2472              :                     // of a tenant that was busy with compaction/gc while being deleted.
    2473            0 :                     tracing::info!(
    2474            0 :                         "Refusing deletion validation for missing shard {}",
    2475              :                         req_tenant.id
    2476              :                     );
    2477              :                 }
    2478              :             }
    2479              : 
    2480            0 :             in_memory_result
    2481              :         };
    2482              : 
    2483              :         // Database calls to confirm validity for anything that passed the in-memory check.  We must do this
    2484              :         // in case of controller split-brain, where some other controller process might have incremented the generation.
    2485            0 :         let db_generations = self
    2486            0 :             .persistence
    2487            0 :             .shard_generations(
    2488            0 :                 in_memory_result
    2489            0 :                     .iter()
    2490            0 :                     .filter_map(|i| if i.2 { Some(&i.0) } else { None }),
    2491              :             )
    2492            0 :             .await?;
    2493            0 :         let db_generations = db_generations.into_iter().collect::<HashMap<_, _>>();
    2494              : 
    2495            0 :         let mut response = ValidateResponse {
    2496            0 :             tenants: Vec::new(),
    2497            0 :         };
    2498            0 :         for (tenant_shard_id, validate_generation, valid) in in_memory_result.into_iter() {
    2499            0 :             let valid = if valid {
    2500            0 :                 let db_generation = db_generations.get(&tenant_shard_id);
    2501            0 :                 db_generation == Some(&Some(validate_generation))
    2502              :             } else {
    2503              :                 // If in-memory state says it's invalid, trust that.  It's always safe to fail a validation, at worst
    2504              :                 // this prevents a pageserver from cleaning up an object in S3.
    2505            0 :                 false
    2506              :             };
    2507              : 
    2508            0 :             response.tenants.push(ValidateResponseTenant {
    2509            0 :                 id: tenant_shard_id,
    2510            0 :                 valid,
    2511            0 :             })
    2512              :         }
    2513              : 
    2514            0 :         Ok(response)
    2515            0 :     }
    2516              : 
    2517            0 :     pub(crate) async fn tenant_create(
    2518            0 :         &self,
    2519            0 :         create_req: TenantCreateRequest,
    2520            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    2521            0 :         let tenant_id = create_req.new_tenant_id.tenant_id;
    2522              : 
    2523              :         // Exclude any concurrent attempts to create/access the same tenant ID
    2524            0 :         let _tenant_lock = trace_exclusive_lock(
    2525            0 :             &self.tenant_op_locks,
    2526            0 :             create_req.new_tenant_id.tenant_id,
    2527            0 :             TenantOperations::Create,
    2528            0 :         )
    2529            0 :         .await;
    2530            0 :         let (response, waiters) = self.do_tenant_create(create_req).await?;
    2531              : 
    2532            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    2533              :             // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
    2534              :             // accept compute notifications while it is in the process of creating.  Reconciliation will
    2535              :             // be retried in the background.
    2536            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
    2537            0 :         }
    2538            0 :         Ok(response)
    2539            0 :     }
    2540              : 
    2541            0 :     pub(crate) async fn do_tenant_create(
    2542            0 :         &self,
    2543            0 :         create_req: TenantCreateRequest,
    2544            0 :     ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
    2545            0 :         let placement_policy = create_req
    2546            0 :             .placement_policy
    2547            0 :             .clone()
    2548              :             // As a default, zero secondaries is convenient for tests that don't choose a policy.
    2549            0 :             .unwrap_or(PlacementPolicy::Attached(0));
    2550              : 
    2551              :         // This service expects to handle sharding itself: it is an error to try and directly create
    2552              :         // a particular shard here.
    2553            0 :         let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
    2554            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    2555            0 :                 "Attempted to create a specific shard, this API is for creating the whole tenant"
    2556            0 :             )));
    2557              :         } else {
    2558            0 :             create_req.new_tenant_id.tenant_id
    2559              :         };
    2560              : 
    2561            0 :         tracing::info!(
    2562            0 :             "Creating tenant {}, shard_count={:?}",
    2563              :             create_req.new_tenant_id,
    2564              :             create_req.shard_parameters.count,
    2565              :         );
    2566              : 
    2567            0 :         let create_ids = (0..create_req.shard_parameters.count.count())
    2568            0 :             .map(|i| TenantShardId {
    2569            0 :                 tenant_id,
    2570            0 :                 shard_number: ShardNumber(i),
    2571            0 :                 shard_count: create_req.shard_parameters.count,
    2572            0 :             })
    2573            0 :             .collect::<Vec<_>>();
    2574              : 
    2575              :         // If the caller specifies a None generation, it means "start from default".  This is different
    2576              :         // to [`Self::tenant_location_config`], where a None generation is used to represent
    2577              :         // an incompletely-onboarded tenant.
    2578            0 :         let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
    2579            0 :             tracing::info!(
    2580            0 :                 "tenant_create: secondary mode, generation is_some={}",
    2581            0 :                 create_req.generation.is_some()
    2582              :             );
    2583            0 :             create_req.generation.map(Generation::new)
    2584              :         } else {
    2585            0 :             tracing::info!(
    2586            0 :                 "tenant_create: not secondary mode, generation is_some={}",
    2587            0 :                 create_req.generation.is_some()
    2588              :             );
    2589            0 :             Some(
    2590            0 :                 create_req
    2591            0 :                     .generation
    2592            0 :                     .map(Generation::new)
    2593            0 :                     .unwrap_or(INITIAL_GENERATION),
    2594            0 :             )
    2595              :         };
    2596              : 
    2597            0 :         let preferred_az_id = {
    2598            0 :             let locked = self.inner.read().unwrap();
    2599              :             // Idempotency: take the existing value if the tenant already exists
    2600            0 :             if let Some(shard) = locked.tenants.get(create_ids.first().unwrap()) {
    2601            0 :                 shard.preferred_az().cloned()
    2602              :             } else {
    2603            0 :                 locked.scheduler.get_az_for_new_tenant()
    2604              :             }
    2605              :         };
    2606              : 
    2607              :         // Ordering: we persist tenant shards before creating them on the pageserver.  This enables a caller
    2608              :         // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
    2609              :         // during the creation, rather than risking leaving orphan objects in S3.
    2610            0 :         let persist_tenant_shards = create_ids
    2611            0 :             .iter()
    2612            0 :             .map(|tenant_shard_id| TenantShardPersistence {
    2613            0 :                 tenant_id: tenant_shard_id.tenant_id.to_string(),
    2614            0 :                 shard_number: tenant_shard_id.shard_number.0 as i32,
    2615            0 :                 shard_count: tenant_shard_id.shard_count.literal() as i32,
    2616            0 :                 shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
    2617            0 :                 generation: initial_generation.map(|g| g.into().unwrap() as i32),
    2618              :                 // The pageserver is not known until scheduling happens: we will set this column when
    2619              :                 // incrementing the generation the first time we attach to a pageserver.
    2620            0 :                 generation_pageserver: None,
    2621            0 :                 placement_policy: serde_json::to_string(&placement_policy).unwrap(),
    2622            0 :                 config: serde_json::to_string(&create_req.config).unwrap(),
    2623            0 :                 splitting: SplitState::default(),
    2624            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    2625            0 :                     .unwrap(),
    2626            0 :                 preferred_az_id: preferred_az_id.as_ref().map(|az| az.to_string()),
    2627            0 :             })
    2628            0 :             .collect();
    2629              : 
    2630            0 :         match self
    2631            0 :             .persistence
    2632            0 :             .insert_tenant_shards(persist_tenant_shards)
    2633            0 :             .await
    2634              :         {
    2635            0 :             Ok(_) => {}
    2636              :             Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
    2637              :                 DatabaseErrorKind::UniqueViolation,
    2638              :                 _,
    2639              :             ))) => {
    2640              :                 // Unique key violation: this is probably a retry.  Because the shard count is part of the unique key,
    2641              :                 // if we see a unique key violation it means that the creation request's shard count matches the previous
    2642              :                 // creation's shard count.
    2643            0 :                 tracing::info!(
    2644            0 :                     "Tenant shards already present in database, proceeding with idempotent creation..."
    2645              :                 );
    2646              :             }
    2647              :             // Any other database error is unexpected and a bug.
    2648            0 :             Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
    2649              :         };
    2650              : 
    2651            0 :         let mut schedule_context = ScheduleContext::default();
    2652            0 :         let mut schedule_error = None;
    2653            0 :         let mut response_shards = Vec::new();
    2654            0 :         for tenant_shard_id in create_ids {
    2655            0 :             tracing::info!("Creating shard {tenant_shard_id}...");
    2656              : 
    2657            0 :             let outcome = self
    2658            0 :                 .do_initial_shard_scheduling(
    2659            0 :                     tenant_shard_id,
    2660            0 :                     initial_generation,
    2661            0 :                     create_req.shard_parameters,
    2662            0 :                     create_req.config.clone(),
    2663            0 :                     placement_policy.clone(),
    2664            0 :                     preferred_az_id.as_ref(),
    2665            0 :                     &mut schedule_context,
    2666            0 :                 )
    2667            0 :                 .await;
    2668              : 
    2669            0 :             match outcome {
    2670            0 :                 InitialShardScheduleOutcome::Scheduled(resp) => response_shards.push(resp),
    2671            0 :                 InitialShardScheduleOutcome::NotScheduled => {}
    2672            0 :                 InitialShardScheduleOutcome::ShardScheduleError(err) => {
    2673            0 :                     schedule_error = Some(err);
    2674            0 :                 }
    2675              :             }
    2676              :         }
    2677              : 
    2678              :         // If we failed to schedule shards, then they are still created in the controller,
    2679              :         // but we return an error to the requester to avoid a silent failure when someone
    2680              :         // tries to e.g. create a tenant whose placement policy requires more nodes than
    2681              :         // are present in the system.  We do this here rather than in the above loop, to
    2682              :         // avoid situations where we only create a subset of shards in the tenant.
    2683            0 :         if let Some(e) = schedule_error {
    2684            0 :             return Err(ApiError::Conflict(format!(
    2685            0 :                 "Failed to schedule shard(s): {e}"
    2686            0 :             )));
    2687            0 :         }
    2688              : 
    2689            0 :         let waiters = {
    2690            0 :             let mut locked = self.inner.write().unwrap();
    2691            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    2692            0 :             let config = ReconcilerConfigBuilder::new(ReconcilerPriority::High)
    2693            0 :                 .tenant_creation_hint(true)
    2694            0 :                 .build();
    2695            0 :             tenants
    2696            0 :                 .range_mut(TenantShardId::tenant_range(tenant_id))
    2697            0 :                 .filter_map(|(_shard_id, shard)| {
    2698            0 :                     self.maybe_configured_reconcile_shard(shard, nodes, config)
    2699            0 :                 })
    2700            0 :                 .collect::<Vec<_>>()
    2701              :         };
    2702              : 
    2703            0 :         Ok((
    2704            0 :             TenantCreateResponse {
    2705            0 :                 shards: response_shards,
    2706            0 :             },
    2707            0 :             waiters,
    2708            0 :         ))
    2709            0 :     }
    2710              : 
    2711              :     /// Helper for tenant creation that does the scheduling for an individual shard. Covers both the
    2712              :     /// case of a new tenant and a pre-existing one.
    2713              :     #[allow(clippy::too_many_arguments)]
    2714            0 :     async fn do_initial_shard_scheduling(
    2715            0 :         &self,
    2716            0 :         tenant_shard_id: TenantShardId,
    2717            0 :         initial_generation: Option<Generation>,
    2718            0 :         shard_params: ShardParameters,
    2719            0 :         config: TenantConfig,
    2720            0 :         placement_policy: PlacementPolicy,
    2721            0 :         preferred_az_id: Option<&AvailabilityZone>,
    2722            0 :         schedule_context: &mut ScheduleContext,
    2723            0 :     ) -> InitialShardScheduleOutcome {
    2724            0 :         let mut locked = self.inner.write().unwrap();
    2725            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    2726              : 
    2727              :         use std::collections::btree_map::Entry;
    2728            0 :         match tenants.entry(tenant_shard_id) {
    2729            0 :             Entry::Occupied(mut entry) => {
    2730            0 :                 tracing::info!("Tenant shard {tenant_shard_id} already exists while creating");
    2731              : 
    2732            0 :                 if let Err(err) = entry.get_mut().schedule(scheduler, schedule_context) {
    2733            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(err);
    2734            0 :                 }
    2735              : 
    2736            0 :                 if let Some(node_id) = entry.get().intent.get_attached() {
    2737            0 :                     let generation = entry
    2738            0 :                         .get()
    2739            0 :                         .generation
    2740            0 :                         .expect("Generation is set when in attached mode");
    2741            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2742            0 :                         shard_id: tenant_shard_id,
    2743            0 :                         node_id: *node_id,
    2744            0 :                         generation: generation.into().unwrap(),
    2745            0 :                     })
    2746              :                 } else {
    2747            0 :                     InitialShardScheduleOutcome::NotScheduled
    2748              :                 }
    2749              :             }
    2750            0 :             Entry::Vacant(entry) => {
    2751            0 :                 let state = entry.insert(TenantShard::new(
    2752            0 :                     tenant_shard_id,
    2753            0 :                     ShardIdentity::from_params(tenant_shard_id.shard_number, shard_params),
    2754            0 :                     placement_policy,
    2755            0 :                     preferred_az_id.cloned(),
    2756              :                 ));
    2757              : 
    2758            0 :                 state.generation = initial_generation;
    2759            0 :                 state.config = config;
    2760            0 :                 if let Err(e) = state.schedule(scheduler, schedule_context) {
    2761            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(e);
    2762            0 :                 }
    2763              : 
    2764              :                 // Only include shards in result if we are attaching: the purpose
    2765              :                 // of the response is to tell the caller where the shards are attached.
    2766            0 :                 if let Some(node_id) = state.intent.get_attached() {
    2767            0 :                     let generation = state
    2768            0 :                         .generation
    2769            0 :                         .expect("Generation is set when in attached mode");
    2770            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2771            0 :                         shard_id: tenant_shard_id,
    2772            0 :                         node_id: *node_id,
    2773            0 :                         generation: generation.into().unwrap(),
    2774            0 :                     })
    2775              :                 } else {
    2776            0 :                     InitialShardScheduleOutcome::NotScheduled
    2777              :                 }
    2778              :             }
    2779              :         }
    2780            0 :     }
    2781              : 
    2782              :     /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
    2783              :     /// wait for reconciliation to complete before responding.
    2784            0 :     async fn await_waiters(
    2785            0 :         &self,
    2786            0 :         waiters: Vec<ReconcilerWaiter>,
    2787            0 :         timeout: Duration,
    2788            0 :     ) -> Result<(), ReconcileWaitError> {
    2789            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2790            0 :         for waiter in waiters {
    2791            0 :             let timeout = deadline.duration_since(Instant::now());
    2792            0 :             waiter.wait_timeout(timeout).await?;
    2793              :         }
    2794              : 
    2795            0 :         Ok(())
    2796            0 :     }
    2797              : 
    2798              :     /// Same as [`Service::await_waiters`], but returns the waiters which are still
    2799              :     /// in progress
    2800            0 :     async fn await_waiters_remainder(
    2801            0 :         &self,
    2802            0 :         waiters: Vec<ReconcilerWaiter>,
    2803            0 :         timeout: Duration,
    2804            0 :     ) -> Vec<ReconcilerWaiter> {
    2805            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2806            0 :         for waiter in waiters.iter() {
    2807            0 :             let timeout = deadline.duration_since(Instant::now());
    2808            0 :             let _ = waiter.wait_timeout(timeout).await;
    2809              :         }
    2810              : 
    2811            0 :         waiters
    2812            0 :             .into_iter()
    2813            0 :             .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress))
    2814            0 :             .collect::<Vec<_>>()
    2815            0 :     }
    2816              : 
    2817              :     /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
    2818              :     /// and transform it into either a tenant creation of a series of shard updates.
    2819              :     ///
    2820              :     /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
    2821              :     /// still be returned.
    2822            0 :     fn tenant_location_config_prepare(
    2823            0 :         &self,
    2824            0 :         tenant_id: TenantId,
    2825            0 :         req: TenantLocationConfigRequest,
    2826            0 :     ) -> TenantCreateOrUpdate {
    2827            0 :         let mut updates = Vec::new();
    2828            0 :         let mut locked = self.inner.write().unwrap();
    2829            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    2830            0 :         let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2831              : 
    2832              :         // Use location config mode as an indicator of policy.
    2833            0 :         let placement_policy = match req.config.mode {
    2834            0 :             LocationConfigMode::Detached => PlacementPolicy::Detached,
    2835            0 :             LocationConfigMode::Secondary => PlacementPolicy::Secondary,
    2836              :             LocationConfigMode::AttachedMulti
    2837              :             | LocationConfigMode::AttachedSingle
    2838              :             | LocationConfigMode::AttachedStale => {
    2839            0 :                 if nodes.len() > 1 {
    2840            0 :                     PlacementPolicy::Attached(1)
    2841              :                 } else {
    2842              :                     // Convenience for dev/test: if we just have one pageserver, import
    2843              :                     // tenants into non-HA mode so that scheduling will succeed.
    2844            0 :                     PlacementPolicy::Attached(0)
    2845              :                 }
    2846              :             }
    2847              :         };
    2848              : 
    2849              :         // Ordinarily we do not update scheduling policy, but when making major changes
    2850              :         // like detaching or demoting to secondary-only, we need to force the scheduling
    2851              :         // mode to Active, or the caller's expected outcome (detach it) will not happen.
    2852            0 :         let scheduling_policy = match req.config.mode {
    2853              :             LocationConfigMode::Detached | LocationConfigMode::Secondary => {
    2854              :                 // Special case: when making major changes like detaching or demoting to secondary-only,
    2855              :                 // we need to force the scheduling mode to Active, or nothing will happen.
    2856            0 :                 Some(ShardSchedulingPolicy::Active)
    2857              :             }
    2858              :             LocationConfigMode::AttachedMulti
    2859              :             | LocationConfigMode::AttachedSingle
    2860              :             | LocationConfigMode::AttachedStale => {
    2861              :                 // While attached, continue to respect whatever the existing scheduling mode is.
    2862            0 :                 None
    2863              :             }
    2864              :         };
    2865              : 
    2866            0 :         let mut create = true;
    2867            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    2868              :             // Saw an existing shard: this is not a creation
    2869            0 :             create = false;
    2870              : 
    2871              :             // Shards may have initially been created by a Secondary request, where we
    2872              :             // would have left generation as None.
    2873              :             //
    2874              :             // We only update generation the first time we see an attached-mode request,
    2875              :             // and if there is no existing generation set. The caller is responsible for
    2876              :             // ensuring that no non-storage-controller pageserver ever uses a higher
    2877              :             // generation than they passed in here.
    2878              :             use LocationConfigMode::*;
    2879            0 :             let set_generation = match req.config.mode {
    2880            0 :                 AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
    2881            0 :                     req.config.generation.map(Generation::new)
    2882              :                 }
    2883            0 :                 _ => None,
    2884              :             };
    2885              : 
    2886            0 :             updates.push(ShardUpdate {
    2887            0 :                 tenant_shard_id: *shard_id,
    2888            0 :                 placement_policy: placement_policy.clone(),
    2889            0 :                 tenant_config: req.config.tenant_conf.clone(),
    2890            0 :                 generation: set_generation,
    2891            0 :                 scheduling_policy,
    2892            0 :             });
    2893              :         }
    2894              : 
    2895            0 :         if create {
    2896              :             use LocationConfigMode::*;
    2897            0 :             let generation = match req.config.mode {
    2898            0 :                 AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
    2899              :                 // If a caller provided a generation in a non-attached request, ignore it
    2900              :                 // and leave our generation as None: this enables a subsequent update to set
    2901              :                 // the generation when setting an attached mode for the first time.
    2902            0 :                 _ => None,
    2903              :             };
    2904              : 
    2905            0 :             TenantCreateOrUpdate::Create(
    2906            0 :                 // Synthesize a creation request
    2907            0 :                 TenantCreateRequest {
    2908            0 :                     new_tenant_id: tenant_shard_id,
    2909            0 :                     generation,
    2910            0 :                     shard_parameters: ShardParameters {
    2911            0 :                         count: tenant_shard_id.shard_count,
    2912            0 :                         // We only import un-sharded or single-sharded tenants, so stripe
    2913            0 :                         // size can be made up arbitrarily here.
    2914            0 :                         stripe_size: DEFAULT_STRIPE_SIZE,
    2915            0 :                     },
    2916            0 :                     placement_policy: Some(placement_policy),
    2917            0 :                     config: req.config.tenant_conf,
    2918            0 :                 },
    2919            0 :             )
    2920              :         } else {
    2921            0 :             assert!(!updates.is_empty());
    2922            0 :             TenantCreateOrUpdate::Update(updates)
    2923              :         }
    2924            0 :     }
    2925              : 
    2926              :     /// For APIs that might act on tenants with [`PlacementPolicy::Detached`], first check if
    2927              :     /// the tenant is present in memory. If not, load it from the database.  If it is found
    2928              :     /// in neither location, return a NotFound error.
    2929              :     ///
    2930              :     /// Caller must demonstrate they hold a lock guard, as otherwise two callers might try and load
    2931              :     /// it at the same time, or we might race with [`Self::maybe_drop_tenant`]
    2932            0 :     async fn maybe_load_tenant(
    2933            0 :         &self,
    2934            0 :         tenant_id: TenantId,
    2935            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    2936            0 :     ) -> Result<(), ApiError> {
    2937              :         // Check if the tenant is present in memory, and select an AZ to use when loading
    2938              :         // if we will load it.
    2939            0 :         let load_in_az = {
    2940            0 :             let locked = self.inner.read().unwrap();
    2941            0 :             let existing = locked
    2942            0 :                 .tenants
    2943            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    2944            0 :                 .next();
    2945              : 
    2946              :             // If the tenant is not present in memory, we expect to load it from database,
    2947              :             // so let's figure out what AZ to load it into while we have self.inner locked.
    2948            0 :             if existing.is_none() {
    2949            0 :                 locked
    2950            0 :                     .scheduler
    2951            0 :                     .get_az_for_new_tenant()
    2952            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    2953            0 :                         "No AZ with nodes found to load tenant"
    2954            0 :                     )))?
    2955              :             } else {
    2956              :                 // We already have this tenant in memory
    2957            0 :                 return Ok(());
    2958              :             }
    2959              :         };
    2960              : 
    2961            0 :         let tenant_shards = self.persistence.load_tenant(tenant_id).await?;
    2962            0 :         if tenant_shards.is_empty() {
    2963            0 :             return Err(ApiError::NotFound(
    2964            0 :                 anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    2965            0 :             ));
    2966            0 :         }
    2967              : 
    2968              :         // Update the persistent shards with the AZ that we are about to apply to in-memory state
    2969            0 :         self.persistence
    2970            0 :             .set_tenant_shard_preferred_azs(
    2971            0 :                 tenant_shards
    2972            0 :                     .iter()
    2973            0 :                     .map(|t| {
    2974            0 :                         (
    2975            0 :                             t.get_tenant_shard_id().expect("Corrupt shard in database"),
    2976            0 :                             Some(load_in_az.clone()),
    2977            0 :                         )
    2978            0 :                     })
    2979            0 :                     .collect(),
    2980              :             )
    2981            0 :             .await?;
    2982              : 
    2983            0 :         let mut locked = self.inner.write().unwrap();
    2984            0 :         tracing::info!(
    2985            0 :             "Loaded {} shards for tenant {}",
    2986            0 :             tenant_shards.len(),
    2987              :             tenant_id
    2988              :         );
    2989              : 
    2990            0 :         locked.tenants.extend(tenant_shards.into_iter().map(|p| {
    2991            0 :             let intent = IntentState::new(Some(load_in_az.clone()));
    2992            0 :             let shard =
    2993            0 :                 TenantShard::from_persistent(p, intent).expect("Corrupt shard row in database");
    2994              : 
    2995              :             // Sanity check: when loading on-demand, we should always be loaded something Detached
    2996            0 :             debug_assert!(shard.policy == PlacementPolicy::Detached);
    2997            0 :             if shard.policy != PlacementPolicy::Detached {
    2998            0 :                 tracing::error!(
    2999            0 :                     "Tenant shard {} loaded on-demand, but has non-Detached policy {:?}",
    3000              :                     shard.tenant_shard_id,
    3001              :                     shard.policy
    3002              :                 );
    3003            0 :             }
    3004              : 
    3005            0 :             (shard.tenant_shard_id, shard)
    3006            0 :         }));
    3007              : 
    3008            0 :         Ok(())
    3009            0 :     }
    3010              : 
    3011              :     /// If all shards for a tenant are detached, and in a fully quiescent state (no observed locations on pageservers),
    3012              :     /// and have no reconciler running, then we can drop the tenant from memory.  It will be reloaded on-demand
    3013              :     /// if we are asked to attach it again (see [`Self::maybe_load_tenant`]).
    3014              :     ///
    3015              :     /// Caller must demonstrate they hold a lock guard, as otherwise it is unsafe to drop a tenant from
    3016              :     /// memory while some other function might assume it continues to exist while not holding the lock on Self::inner.
    3017            0 :     fn maybe_drop_tenant(
    3018            0 :         &self,
    3019            0 :         tenant_id: TenantId,
    3020            0 :         locked: &mut std::sync::RwLockWriteGuard<ServiceState>,
    3021            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    3022            0 :     ) {
    3023            0 :         let mut tenant_shards = locked.tenants.range(TenantShardId::tenant_range(tenant_id));
    3024            0 :         if tenant_shards.all(|(_id, shard)| {
    3025            0 :             shard.policy == PlacementPolicy::Detached
    3026            0 :                 && shard.reconciler.is_none()
    3027            0 :                 && shard.observed.is_empty()
    3028            0 :         }) {
    3029            0 :             let keys = locked
    3030            0 :                 .tenants
    3031            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3032            0 :                 .map(|(id, _)| id)
    3033            0 :                 .copied()
    3034            0 :                 .collect::<Vec<_>>();
    3035            0 :             for key in keys {
    3036            0 :                 tracing::info!("Dropping detached tenant shard {} from memory", key);
    3037            0 :                 locked.tenants.remove(&key);
    3038              :             }
    3039            0 :         }
    3040            0 :     }
    3041              : 
    3042              :     /// This API is used by the cloud control plane to migrate unsharded tenants that it created
    3043              :     /// directly with pageservers into this service.
    3044              :     ///
    3045              :     /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
    3046              :     /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
    3047              :     /// Think of the first attempt to call this API as a transfer of absolute authority over the
    3048              :     /// tenant's source of generation numbers.
    3049              :     ///
    3050              :     /// The mode in this request coarse-grained control of tenants:
    3051              :     /// - Call with mode Attached* to upsert the tenant.
    3052              :     /// - Call with mode Secondary to either onboard a tenant without attaching it, or
    3053              :     ///   to set an existing tenant to PolicyMode::Secondary
    3054              :     /// - Call with mode Detached to switch to PolicyMode::Detached
    3055            0 :     pub(crate) async fn tenant_location_config(
    3056            0 :         &self,
    3057            0 :         tenant_shard_id: TenantShardId,
    3058            0 :         req: TenantLocationConfigRequest,
    3059            0 :     ) -> Result<TenantLocationConfigResponse, ApiError> {
    3060              :         // We require an exclusive lock, because we are updating both persistent and in-memory state
    3061            0 :         let _tenant_lock = trace_exclusive_lock(
    3062            0 :             &self.tenant_op_locks,
    3063            0 :             tenant_shard_id.tenant_id,
    3064            0 :             TenantOperations::LocationConfig,
    3065            0 :         )
    3066            0 :         .await;
    3067              : 
    3068            0 :         let tenant_id = if !tenant_shard_id.is_unsharded() {
    3069            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    3070            0 :                 "This API is for importing single-sharded or unsharded tenants"
    3071            0 :             )));
    3072              :         } else {
    3073            0 :             tenant_shard_id.tenant_id
    3074              :         };
    3075              : 
    3076              :         // In case we are waking up a Detached tenant
    3077            0 :         match self.maybe_load_tenant(tenant_id, &_tenant_lock).await {
    3078            0 :             Ok(()) | Err(ApiError::NotFound(_)) => {
    3079            0 :                 // This is a creation or an update
    3080            0 :             }
    3081            0 :             Err(e) => {
    3082            0 :                 return Err(e);
    3083              :             }
    3084              :         };
    3085              : 
    3086              :         // First check if this is a creation or an update
    3087            0 :         let create_or_update = self.tenant_location_config_prepare(tenant_id, req);
    3088              : 
    3089            0 :         let mut result = TenantLocationConfigResponse {
    3090            0 :             shards: Vec::new(),
    3091            0 :             stripe_size: None,
    3092            0 :         };
    3093            0 :         let waiters = match create_or_update {
    3094            0 :             TenantCreateOrUpdate::Create(create_req) => {
    3095            0 :                 let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
    3096            0 :                 result.shards = create_resp
    3097            0 :                     .shards
    3098            0 :                     .into_iter()
    3099            0 :                     .map(|s| TenantShardLocation {
    3100            0 :                         node_id: s.node_id,
    3101            0 :                         shard_id: s.shard_id,
    3102            0 :                     })
    3103            0 :                     .collect();
    3104            0 :                 waiters
    3105              :             }
    3106            0 :             TenantCreateOrUpdate::Update(updates) => {
    3107              :                 // Persist updates
    3108              :                 // Ordering: write to the database before applying changes in-memory, so that
    3109              :                 // we will not appear time-travel backwards on a restart.
    3110              : 
    3111            0 :                 let mut schedule_context = ScheduleContext::default();
    3112              :                 for ShardUpdate {
    3113            0 :                     tenant_shard_id,
    3114            0 :                     placement_policy,
    3115            0 :                     tenant_config,
    3116            0 :                     generation,
    3117            0 :                     scheduling_policy,
    3118            0 :                 } in &updates
    3119              :                 {
    3120            0 :                     self.persistence
    3121            0 :                         .update_tenant_shard(
    3122            0 :                             TenantFilter::Shard(*tenant_shard_id),
    3123            0 :                             Some(placement_policy.clone()),
    3124            0 :                             Some(tenant_config.clone()),
    3125            0 :                             *generation,
    3126            0 :                             *scheduling_policy,
    3127            0 :                         )
    3128            0 :                         .await?;
    3129              :                 }
    3130              : 
    3131              :                 // Apply updates in-memory
    3132            0 :                 let mut waiters = Vec::new();
    3133              :                 {
    3134            0 :                     let mut locked = self.inner.write().unwrap();
    3135            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    3136              : 
    3137              :                     for ShardUpdate {
    3138            0 :                         tenant_shard_id,
    3139            0 :                         placement_policy,
    3140            0 :                         tenant_config,
    3141            0 :                         generation: update_generation,
    3142            0 :                         scheduling_policy,
    3143            0 :                     } in updates
    3144              :                     {
    3145            0 :                         let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    3146            0 :                             tracing::warn!("Shard {tenant_shard_id} removed while updating");
    3147            0 :                             continue;
    3148              :                         };
    3149              : 
    3150              :                         // Update stripe size
    3151            0 :                         if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
    3152            0 :                             result.stripe_size = Some(shard.shard.stripe_size);
    3153            0 :                         }
    3154              : 
    3155            0 :                         shard.policy = placement_policy;
    3156            0 :                         shard.config = tenant_config;
    3157            0 :                         if let Some(generation) = update_generation {
    3158            0 :                             shard.generation = Some(generation);
    3159            0 :                         }
    3160              : 
    3161            0 :                         if let Some(scheduling_policy) = scheduling_policy {
    3162            0 :                             shard.set_scheduling_policy(scheduling_policy);
    3163            0 :                         }
    3164              : 
    3165            0 :                         shard.schedule(scheduler, &mut schedule_context)?;
    3166              : 
    3167            0 :                         let maybe_waiter =
    3168            0 :                             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3169            0 :                         if let Some(waiter) = maybe_waiter {
    3170            0 :                             waiters.push(waiter);
    3171            0 :                         }
    3172              : 
    3173            0 :                         if let Some(node_id) = shard.intent.get_attached() {
    3174            0 :                             result.shards.push(TenantShardLocation {
    3175            0 :                                 shard_id: tenant_shard_id,
    3176            0 :                                 node_id: *node_id,
    3177            0 :                             })
    3178            0 :                         }
    3179              :                     }
    3180              :                 }
    3181            0 :                 waiters
    3182              :             }
    3183              :         };
    3184              : 
    3185            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3186              :             // Do not treat a reconcile error as fatal: we have already applied any requested
    3187              :             // Intent changes, and the reconcile can fail for external reasons like unavailable
    3188              :             // compute notification API.  In these cases, it is important that we do not
    3189              :             // cause the cloud control plane to retry forever on this API.
    3190            0 :             tracing::warn!(
    3191            0 :                 "Failed to reconcile after /location_config: {e}, returning success anyway"
    3192              :             );
    3193            0 :         }
    3194              : 
    3195              :         // Logging the full result is useful because it lets us cross-check what the cloud control
    3196              :         // plane's tenant_shards table should contain.
    3197            0 :         tracing::info!("Complete, returning {result:?}");
    3198              : 
    3199            0 :         Ok(result)
    3200            0 :     }
    3201              : 
    3202            0 :     pub(crate) async fn tenant_config_patch(
    3203            0 :         &self,
    3204            0 :         req: TenantConfigPatchRequest,
    3205            0 :     ) -> Result<(), ApiError> {
    3206            0 :         let _tenant_lock = trace_exclusive_lock(
    3207            0 :             &self.tenant_op_locks,
    3208            0 :             req.tenant_id,
    3209            0 :             TenantOperations::ConfigPatch,
    3210            0 :         )
    3211            0 :         .await;
    3212              : 
    3213            0 :         let tenant_id = req.tenant_id;
    3214            0 :         let patch = req.config;
    3215              : 
    3216            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3217              : 
    3218            0 :         let base = {
    3219            0 :             let locked = self.inner.read().unwrap();
    3220            0 :             let shards = locked
    3221            0 :                 .tenants
    3222            0 :                 .range(TenantShardId::tenant_range(req.tenant_id));
    3223              : 
    3224            0 :             let mut configs = shards.map(|(_sid, shard)| &shard.config).peekable();
    3225              : 
    3226            0 :             let first = match configs.peek() {
    3227            0 :                 Some(first) => (*first).clone(),
    3228              :                 None => {
    3229            0 :                     return Err(ApiError::NotFound(
    3230            0 :                         anyhow::anyhow!("Tenant {} not found", req.tenant_id).into(),
    3231            0 :                     ));
    3232              :                 }
    3233              :             };
    3234              : 
    3235            0 :             if !configs.all_equal() {
    3236            0 :                 tracing::error!("Tenant configs for {} are mismatched. ", req.tenant_id);
    3237              :                 // This can't happen because we atomically update the database records
    3238              :                 // of all shards to the new value in [`Self::set_tenant_config_and_reconcile`].
    3239            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3240            0 :                     "Tenant configs for {} are mismatched",
    3241            0 :                     req.tenant_id
    3242            0 :                 )));
    3243            0 :             }
    3244              : 
    3245            0 :             first
    3246              :         };
    3247              : 
    3248            0 :         let updated_config = base
    3249            0 :             .apply_patch(patch)
    3250            0 :             .map_err(|err| ApiError::BadRequest(anyhow::anyhow!(err)))?;
    3251            0 :         self.set_tenant_config_and_reconcile(tenant_id, updated_config)
    3252            0 :             .await
    3253            0 :     }
    3254              : 
    3255            0 :     pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
    3256              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3257            0 :         let _tenant_lock = trace_exclusive_lock(
    3258            0 :             &self.tenant_op_locks,
    3259            0 :             req.tenant_id,
    3260            0 :             TenantOperations::ConfigSet,
    3261            0 :         )
    3262            0 :         .await;
    3263              : 
    3264            0 :         self.maybe_load_tenant(req.tenant_id, &_tenant_lock).await?;
    3265              : 
    3266            0 :         self.set_tenant_config_and_reconcile(req.tenant_id, req.config)
    3267            0 :             .await
    3268            0 :     }
    3269              : 
    3270            0 :     async fn set_tenant_config_and_reconcile(
    3271            0 :         &self,
    3272            0 :         tenant_id: TenantId,
    3273            0 :         config: TenantConfig,
    3274            0 :     ) -> Result<(), ApiError> {
    3275            0 :         self.persistence
    3276            0 :             .update_tenant_shard(
    3277            0 :                 TenantFilter::Tenant(tenant_id),
    3278            0 :                 None,
    3279            0 :                 Some(config.clone()),
    3280            0 :                 None,
    3281            0 :                 None,
    3282            0 :             )
    3283            0 :             .await?;
    3284              : 
    3285            0 :         let waiters = {
    3286            0 :             let mut waiters = Vec::new();
    3287            0 :             let mut locked = self.inner.write().unwrap();
    3288            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    3289            0 :             for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3290            0 :                 shard.config = config.clone();
    3291            0 :                 if let Some(waiter) =
    3292            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3293            0 :                 {
    3294            0 :                     waiters.push(waiter);
    3295            0 :                 }
    3296              :             }
    3297            0 :             waiters
    3298              :         };
    3299              : 
    3300            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3301              :             // Treat this as success because we have stored the configuration.  If e.g.
    3302              :             // a node was unavailable at this time, it should not stop us accepting a
    3303              :             // configuration change.
    3304            0 :             tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
    3305            0 :         }
    3306              : 
    3307            0 :         Ok(())
    3308            0 :     }
    3309              : 
    3310            0 :     pub(crate) fn tenant_config_get(
    3311            0 :         &self,
    3312            0 :         tenant_id: TenantId,
    3313            0 :     ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
    3314            0 :         let config = {
    3315            0 :             let locked = self.inner.read().unwrap();
    3316              : 
    3317            0 :             match locked
    3318            0 :                 .tenants
    3319            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3320            0 :                 .next()
    3321              :             {
    3322            0 :                 Some((_tenant_shard_id, shard)) => shard.config.clone(),
    3323              :                 None => {
    3324            0 :                     return Err(ApiError::NotFound(
    3325            0 :                         anyhow::anyhow!("Tenant not found").into(),
    3326            0 :                     ));
    3327              :                 }
    3328              :             }
    3329              :         };
    3330              : 
    3331              :         // Unlike the pageserver, we do not have a set of global defaults: the config is
    3332              :         // entirely per-tenant.  Therefore the distinction between `tenant_specific_overrides`
    3333              :         // and `effective_config` in the response is meaningless, but we retain that syntax
    3334              :         // in order to remain compatible with the pageserver API.
    3335              : 
    3336            0 :         let response = HashMap::from([
    3337              :             (
    3338              :                 "tenant_specific_overrides",
    3339            0 :                 serde_json::to_value(&config)
    3340            0 :                     .context("serializing tenant specific overrides")
    3341            0 :                     .map_err(ApiError::InternalServerError)?,
    3342              :             ),
    3343              :             (
    3344            0 :                 "effective_config",
    3345            0 :                 serde_json::to_value(&config)
    3346            0 :                     .context("serializing effective config")
    3347            0 :                     .map_err(ApiError::InternalServerError)?,
    3348              :             ),
    3349              :         ]);
    3350              : 
    3351            0 :         Ok(response)
    3352            0 :     }
    3353              : 
    3354            0 :     pub(crate) async fn tenant_time_travel_remote_storage(
    3355            0 :         &self,
    3356            0 :         time_travel_req: &TenantTimeTravelRequest,
    3357            0 :         tenant_id: TenantId,
    3358            0 :         timestamp: Cow<'_, str>,
    3359            0 :         done_if_after: Cow<'_, str>,
    3360            0 :     ) -> Result<(), ApiError> {
    3361            0 :         let _tenant_lock = trace_exclusive_lock(
    3362            0 :             &self.tenant_op_locks,
    3363            0 :             tenant_id,
    3364            0 :             TenantOperations::TimeTravelRemoteStorage,
    3365            0 :         )
    3366            0 :         .await;
    3367              : 
    3368            0 :         let node = {
    3369            0 :             let mut locked = self.inner.write().unwrap();
    3370              :             // Just a sanity check to prevent misuse: the API expects that the tenant is fully
    3371              :             // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
    3372              :             // but only at the start of the process, so it's really just to prevent operator
    3373              :             // mistakes.
    3374            0 :             for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
    3375            0 :                 if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
    3376              :                 {
    3377            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3378            0 :                         "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
    3379            0 :                     )));
    3380            0 :                 }
    3381            0 :                 let maybe_attached = shard
    3382            0 :                     .observed
    3383            0 :                     .locations
    3384            0 :                     .iter()
    3385            0 :                     .filter_map(|(node_id, observed_location)| {
    3386            0 :                         observed_location
    3387            0 :                             .conf
    3388            0 :                             .as_ref()
    3389            0 :                             .map(|loc| (node_id, observed_location, loc.mode))
    3390            0 :                     })
    3391            0 :                     .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
    3392            0 :                 if let Some((node_id, _observed_location, mode)) = maybe_attached {
    3393            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3394            0 :                         "We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}"
    3395            0 :                     )));
    3396            0 :                 }
    3397              :             }
    3398            0 :             let scheduler = &mut locked.scheduler;
    3399              :             // Right now we only perform the operation on a single node without parallelization
    3400              :             // TODO fan out the operation to multiple nodes for better performance
    3401            0 :             let node_id = scheduler.any_available_node()?;
    3402            0 :             let node = locked
    3403            0 :                 .nodes
    3404            0 :                 .get(&node_id)
    3405            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3406            0 :             node.clone()
    3407              :         };
    3408              : 
    3409              :         // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
    3410            0 :         let mut counts = time_travel_req
    3411            0 :             .shard_counts
    3412            0 :             .iter()
    3413            0 :             .copied()
    3414            0 :             .collect::<HashSet<_>>()
    3415            0 :             .into_iter()
    3416            0 :             .collect::<Vec<_>>();
    3417            0 :         counts.sort_unstable();
    3418              : 
    3419            0 :         for count in counts {
    3420            0 :             let shard_ids = (0..count.count())
    3421            0 :                 .map(|i| TenantShardId {
    3422            0 :                     tenant_id,
    3423            0 :                     shard_number: ShardNumber(i),
    3424            0 :                     shard_count: count,
    3425            0 :                 })
    3426            0 :                 .collect::<Vec<_>>();
    3427            0 :             for tenant_shard_id in shard_ids {
    3428            0 :                 let client = PageserverClient::new(
    3429            0 :                     node.get_id(),
    3430            0 :                     self.http_client.clone(),
    3431            0 :                     node.base_url(),
    3432            0 :                     self.config.pageserver_jwt_token.as_deref(),
    3433              :                 );
    3434              : 
    3435            0 :                 tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
    3436              : 
    3437            0 :                 client
    3438            0 :                     .tenant_time_travel_remote_storage(
    3439            0 :                         tenant_shard_id,
    3440            0 :                         &timestamp,
    3441            0 :                         &done_if_after,
    3442            0 :                     )
    3443            0 :                     .await
    3444            0 :                     .map_err(|e| {
    3445            0 :                         ApiError::InternalServerError(anyhow::anyhow!(
    3446            0 :                             "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
    3447            0 :                             node
    3448            0 :                         ))
    3449            0 :                     })?;
    3450              :             }
    3451              :         }
    3452            0 :         Ok(())
    3453            0 :     }
    3454              : 
    3455            0 :     pub(crate) async fn tenant_secondary_download(
    3456            0 :         &self,
    3457            0 :         tenant_id: TenantId,
    3458            0 :         wait: Option<Duration>,
    3459            0 :     ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
    3460            0 :         let _tenant_lock = trace_shared_lock(
    3461            0 :             &self.tenant_op_locks,
    3462            0 :             tenant_id,
    3463            0 :             TenantOperations::SecondaryDownload,
    3464            0 :         )
    3465            0 :         .await;
    3466              : 
    3467              :         // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
    3468            0 :         let targets = {
    3469            0 :             let locked = self.inner.read().unwrap();
    3470            0 :             let mut targets = Vec::new();
    3471              : 
    3472            0 :             for (tenant_shard_id, shard) in
    3473            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    3474              :             {
    3475            0 :                 for node_id in shard.intent.get_secondary() {
    3476            0 :                     let node = locked
    3477            0 :                         .nodes
    3478            0 :                         .get(node_id)
    3479            0 :                         .expect("Pageservers may not be deleted while referenced");
    3480            0 : 
    3481            0 :                     targets.push((*tenant_shard_id, node.clone()));
    3482            0 :                 }
    3483              :             }
    3484            0 :             targets
    3485              :         };
    3486              : 
    3487              :         // Issue concurrent requests to all shards' locations
    3488            0 :         let mut futs = FuturesUnordered::new();
    3489            0 :         for (tenant_shard_id, node) in targets {
    3490            0 :             let client = PageserverClient::new(
    3491            0 :                 node.get_id(),
    3492            0 :                 self.http_client.clone(),
    3493            0 :                 node.base_url(),
    3494            0 :                 self.config.pageserver_jwt_token.as_deref(),
    3495              :             );
    3496            0 :             futs.push(async move {
    3497            0 :                 let result = client
    3498            0 :                     .tenant_secondary_download(tenant_shard_id, wait)
    3499            0 :                     .await;
    3500            0 :                 (result, node, tenant_shard_id)
    3501            0 :             })
    3502              :         }
    3503              : 
    3504              :         // Handle any errors returned by pageservers.  This includes cases like this request racing with
    3505              :         // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
    3506              :         // well as more general cases like 503s, 500s, or timeouts.
    3507            0 :         let mut aggregate_progress = SecondaryProgress::default();
    3508            0 :         let mut aggregate_status: Option<StatusCode> = None;
    3509            0 :         let mut error: Option<mgmt_api::Error> = None;
    3510            0 :         while let Some((result, node, tenant_shard_id)) = futs.next().await {
    3511            0 :             match result {
    3512            0 :                 Err(e) => {
    3513              :                     // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
    3514              :                     // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
    3515              :                     // than they had hoped for.
    3516            0 :                     tracing::warn!("Secondary download error from pageserver {node}: {e}",);
    3517            0 :                     error = Some(e)
    3518              :                 }
    3519            0 :                 Ok((status_code, progress)) => {
    3520            0 :                     tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
    3521            0 :                     aggregate_progress.layers_downloaded += progress.layers_downloaded;
    3522            0 :                     aggregate_progress.layers_total += progress.layers_total;
    3523            0 :                     aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
    3524            0 :                     aggregate_progress.bytes_total += progress.bytes_total;
    3525            0 :                     aggregate_progress.heatmap_mtime =
    3526            0 :                         std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
    3527            0 :                     aggregate_status = match aggregate_status {
    3528            0 :                         None => Some(status_code),
    3529            0 :                         Some(StatusCode::OK) => Some(status_code),
    3530            0 :                         Some(cur) => {
    3531              :                             // Other status codes (e.g. 202) -- do not overwrite.
    3532            0 :                             Some(cur)
    3533              :                         }
    3534              :                     };
    3535              :                 }
    3536              :             }
    3537              :         }
    3538              : 
    3539              :         // If any of the shards return 202, indicate our result as 202.
    3540            0 :         match aggregate_status {
    3541              :             None => {
    3542            0 :                 match error {
    3543            0 :                     Some(e) => {
    3544              :                         // No successes, and an error: surface it
    3545            0 :                         Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
    3546              :                     }
    3547              :                     None => {
    3548              :                         // No shards found
    3549            0 :                         Err(ApiError::NotFound(
    3550            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    3551            0 :                         ))
    3552              :                     }
    3553              :                 }
    3554              :             }
    3555            0 :             Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
    3556              :         }
    3557            0 :     }
    3558              : 
    3559            0 :     pub(crate) async fn tenant_delete(
    3560            0 :         self: &Arc<Self>,
    3561            0 :         tenant_id: TenantId,
    3562            0 :     ) -> Result<StatusCode, ApiError> {
    3563            0 :         let _tenant_lock =
    3564            0 :             trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
    3565              : 
    3566            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3567              : 
    3568              :         // Detach all shards. This also deletes local pageserver shard data.
    3569            0 :         let (detach_waiters, node) = {
    3570            0 :             let mut detach_waiters = Vec::new();
    3571            0 :             let mut locked = self.inner.write().unwrap();
    3572            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    3573            0 :             for (_, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3574              :                 // Update the tenant's intent to remove all attachments
    3575            0 :                 shard.policy = PlacementPolicy::Detached;
    3576            0 :                 shard
    3577            0 :                     .schedule(scheduler, &mut ScheduleContext::default())
    3578            0 :                     .expect("De-scheduling is infallible");
    3579            0 :                 debug_assert!(shard.intent.get_attached().is_none());
    3580            0 :                 debug_assert!(shard.intent.get_secondary().is_empty());
    3581              : 
    3582            0 :                 if let Some(waiter) =
    3583            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3584            0 :                 {
    3585            0 :                     detach_waiters.push(waiter);
    3586            0 :                 }
    3587              :             }
    3588              : 
    3589              :             // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant
    3590              :             // was attached, just has to be able to see the S3 content)
    3591            0 :             let node_id = scheduler.any_available_node()?;
    3592            0 :             let node = nodes
    3593            0 :                 .get(&node_id)
    3594            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3595            0 :             (detach_waiters, node.clone())
    3596              :         };
    3597              : 
    3598              :         // This reconcile wait can fail in a few ways:
    3599              :         //  A there is a very long queue for the reconciler semaphore
    3600              :         //  B some pageserver is failing to handle a detach promptly
    3601              :         //  C some pageserver goes offline right at the moment we send it a request.
    3602              :         //
    3603              :         // A and C are transient: the semaphore will eventually become available, and once a node is marked offline
    3604              :         // the next attempt to reconcile will silently skip detaches for an offline node and succeed.  If B happens,
    3605              :         // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while
    3606              :         // deleting the underlying data).
    3607            0 :         self.await_waiters(detach_waiters, RECONCILE_TIMEOUT)
    3608            0 :             .await?;
    3609              : 
    3610              :         // Delete the entire tenant (all shards) from remote storage via a random pageserver.
    3611              :         // Passing an unsharded tenant ID will cause the pageserver to remove all remote paths with
    3612              :         // the tenant ID prefix, including all shards (even possibly stale ones).
    3613            0 :         match node
    3614            0 :             .with_client_retries(
    3615            0 :                 |client| async move {
    3616            0 :                     client
    3617            0 :                         .tenant_delete(TenantShardId::unsharded(tenant_id))
    3618            0 :                         .await
    3619            0 :                 },
    3620            0 :                 &self.http_client,
    3621            0 :                 &self.config.pageserver_jwt_token,
    3622              :                 1,
    3623              :                 3,
    3624              :                 RECONCILE_TIMEOUT,
    3625            0 :                 &self.cancel,
    3626              :             )
    3627            0 :             .await
    3628            0 :             .unwrap_or(Err(mgmt_api::Error::Cancelled))
    3629              :         {
    3630            0 :             Ok(_) => {}
    3631              :             Err(mgmt_api::Error::Cancelled) => {
    3632            0 :                 return Err(ApiError::ShuttingDown);
    3633              :             }
    3634            0 :             Err(e) => {
    3635              :                 // This is unexpected: remote deletion should be infallible, unless the object store
    3636              :                 // at large is unavailable.
    3637            0 :                 tracing::error!("Error deleting via node {node}: {e}");
    3638            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    3639              :             }
    3640              :         }
    3641              : 
    3642              :         // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
    3643              :         // our in-memory state and database state.
    3644              : 
    3645              :         // Ordering: we delete persistent state first: if we then
    3646              :         // crash, we will drop the in-memory state.
    3647              : 
    3648              :         // Drop persistent state.
    3649            0 :         self.persistence.delete_tenant(tenant_id).await?;
    3650              : 
    3651              :         // Drop in-memory state
    3652              :         {
    3653            0 :             let mut locked = self.inner.write().unwrap();
    3654            0 :             let (_nodes, tenants, scheduler) = locked.parts_mut();
    3655              : 
    3656              :             // Dereference Scheduler from shards before dropping them
    3657            0 :             for (_tenant_shard_id, shard) in
    3658            0 :                 tenants.range_mut(TenantShardId::tenant_range(tenant_id))
    3659            0 :             {
    3660            0 :                 shard.intent.clear(scheduler);
    3661            0 :             }
    3662              : 
    3663            0 :             tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
    3664            0 :             tracing::info!(
    3665            0 :                 "Deleted tenant {tenant_id}, now have {} tenants",
    3666            0 :                 locked.tenants.len()
    3667              :             );
    3668              :         };
    3669              : 
    3670              :         // Delete the tenant from safekeepers (if needed)
    3671            0 :         self.tenant_delete_safekeepers(tenant_id)
    3672            0 :             .instrument(tracing::info_span!("tenant_delete_safekeepers", %tenant_id))
    3673            0 :             .await?;
    3674              : 
    3675              :         // Success is represented as 404, to imitate the existing pageserver deletion API
    3676            0 :         Ok(StatusCode::NOT_FOUND)
    3677            0 :     }
    3678              : 
    3679              :     /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
    3680              :     /// for a tenant.  The TenantConfig is passed through to pageservers, whereas this function modifies
    3681              :     /// the tenant's policies (configuration) within the storage controller
    3682            0 :     pub(crate) async fn tenant_update_policy(
    3683            0 :         &self,
    3684            0 :         tenant_id: TenantId,
    3685            0 :         req: TenantPolicyRequest,
    3686            0 :     ) -> Result<(), ApiError> {
    3687              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3688            0 :         let _tenant_lock = trace_exclusive_lock(
    3689            0 :             &self.tenant_op_locks,
    3690            0 :             tenant_id,
    3691            0 :             TenantOperations::UpdatePolicy,
    3692            0 :         )
    3693            0 :         .await;
    3694              : 
    3695            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3696              : 
    3697            0 :         failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
    3698              : 
    3699              :         let TenantPolicyRequest {
    3700            0 :             placement,
    3701            0 :             mut scheduling,
    3702            0 :         } = req;
    3703              : 
    3704            0 :         if let Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) = placement {
    3705              :             // When someone configures a tenant to detach, we force the scheduling policy to enable
    3706              :             // this to take effect.
    3707            0 :             if scheduling.is_none() {
    3708            0 :                 scheduling = Some(ShardSchedulingPolicy::Active);
    3709            0 :             }
    3710            0 :         }
    3711              : 
    3712            0 :         self.persistence
    3713            0 :             .update_tenant_shard(
    3714            0 :                 TenantFilter::Tenant(tenant_id),
    3715            0 :                 placement.clone(),
    3716            0 :                 None,
    3717            0 :                 None,
    3718            0 :                 scheduling,
    3719            0 :             )
    3720            0 :             .await?;
    3721              : 
    3722            0 :         let mut schedule_context = ScheduleContext::default();
    3723            0 :         let mut locked = self.inner.write().unwrap();
    3724            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    3725            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3726            0 :             if let Some(placement) = &placement {
    3727            0 :                 shard.policy = placement.clone();
    3728              : 
    3729            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3730            0 :                                "Updated placement policy to {placement:?}");
    3731            0 :             }
    3732              : 
    3733            0 :             if let Some(scheduling) = &scheduling {
    3734            0 :                 shard.set_scheduling_policy(*scheduling);
    3735              : 
    3736            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3737            0 :                                "Updated scheduling policy to {scheduling:?}");
    3738            0 :             }
    3739              : 
    3740              :             // In case scheduling is being switched back on, try it now.
    3741            0 :             shard.schedule(scheduler, &mut schedule_context).ok();
    3742            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3743              :         }
    3744              : 
    3745            0 :         Ok(())
    3746            0 :     }
    3747              : 
    3748            0 :     pub(crate) async fn tenant_timeline_create_pageservers(
    3749            0 :         &self,
    3750            0 :         tenant_id: TenantId,
    3751            0 :         mut create_req: TimelineCreateRequest,
    3752            0 :     ) -> Result<TimelineInfo, ApiError> {
    3753            0 :         tracing::info!(
    3754            0 :             "Creating timeline {}/{}",
    3755              :             tenant_id,
    3756              :             create_req.new_timeline_id,
    3757              :         );
    3758              : 
    3759            0 :         self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    3760            0 :             if targets.0.is_empty() {
    3761            0 :                 return Err(ApiError::NotFound(
    3762            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3763            0 :                 ));
    3764            0 :             };
    3765              : 
    3766            0 :             let (shard_zero_tid, shard_zero_locations) =
    3767            0 :                 targets.0.pop_first().expect("Must have at least one shard");
    3768            0 :             assert!(shard_zero_tid.is_shard_zero());
    3769              : 
    3770            0 :             async fn create_one(
    3771            0 :                 tenant_shard_id: TenantShardId,
    3772            0 :                 locations: ShardMutationLocations,
    3773            0 :                 http_client: reqwest::Client,
    3774            0 :                 jwt: Option<String>,
    3775            0 :                 mut create_req: TimelineCreateRequest,
    3776            0 :             ) -> Result<TimelineInfo, ApiError> {
    3777            0 :                 let latest = locations.latest.node;
    3778              : 
    3779            0 :                 tracing::info!(
    3780            0 :                     "Creating timeline on shard {}/{}, attached to node {latest} in generation {:?}",
    3781              :                     tenant_shard_id,
    3782              :                     create_req.new_timeline_id,
    3783              :                     locations.latest.generation
    3784              :                 );
    3785              : 
    3786            0 :                 let client =
    3787            0 :                     PageserverClient::new(latest.get_id(), http_client.clone(), latest.base_url(), jwt.as_deref());
    3788              : 
    3789            0 :                 let timeline_info = client
    3790            0 :                     .timeline_create(tenant_shard_id, &create_req)
    3791            0 :                     .await
    3792            0 :                     .map_err(|e| passthrough_api_error(&latest, e))?;
    3793              : 
    3794              :                 // If we are going to create the timeline on some stale locations for shard 0, then ask them to re-use
    3795              :                 // the initdb generated by the latest location, rather than generating their own.  This avoids racing uploads
    3796              :                 // of initdb to S3 which might not be binary-identical if different pageservers have different postgres binaries.
    3797            0 :                 if tenant_shard_id.is_shard_zero() {
    3798            0 :                     if let models::TimelineCreateRequestMode::Bootstrap { existing_initdb_timeline_id, .. } = &mut create_req.mode {
    3799            0 :                         *existing_initdb_timeline_id = Some(create_req.new_timeline_id);
    3800            0 :                     }
    3801            0 :                 }
    3802              : 
    3803              :                 // We propagate timeline creations to all attached locations such that a compute
    3804              :                 // for the new timeline is able to start regardless of the current state of the
    3805              :                 // tenant shard reconciliation.
    3806            0 :                 for location in locations.other {
    3807            0 :                     tracing::info!(
    3808            0 :                         "Creating timeline on shard {}/{}, stale attached to node {} in generation {:?}",
    3809              :                         tenant_shard_id,
    3810              :                         create_req.new_timeline_id,
    3811              :                         location.node,
    3812              :                         location.generation
    3813              :                     );
    3814              : 
    3815            0 :                     let client = PageserverClient::new(
    3816            0 :                         location.node.get_id(),
    3817            0 :                         http_client.clone(),
    3818            0 :                         location.node.base_url(),
    3819            0 :                         jwt.as_deref(),
    3820              :                     );
    3821              : 
    3822            0 :                     let res = client
    3823            0 :                         .timeline_create(tenant_shard_id, &create_req)
    3824            0 :                         .await;
    3825              : 
    3826            0 :                     if let Err(e) = res {
    3827            0 :                         match e {
    3828            0 :                             mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _) => {
    3829            0 :                                 // Tenant might have been detached from the stale location,
    3830            0 :                                 // so ignore 404s.
    3831            0 :                             },
    3832              :                             _ => {
    3833            0 :                                 return Err(passthrough_api_error(&location.node, e));
    3834              :                             }
    3835              :                         }
    3836            0 :                     }
    3837              :                 }
    3838              : 
    3839            0 :                 Ok(timeline_info)
    3840            0 :             }
    3841              : 
    3842              :             // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
    3843              :             // use whatever LSN that shard picked when creating on subsequent shards.  We arbitrarily use shard zero as the shard
    3844              :             // that will get the first creation request, and propagate the LSN to all the >0 shards.
    3845              :             //
    3846              :             // This also enables non-zero shards to use the initdb that shard 0 generated and uploaded to S3, rather than
    3847              :             // independently generating their own initdb.  This guarantees that shards cannot end up with different initial
    3848              :             // states if e.g. they have different postgres binary versions.
    3849            0 :             let timeline_info = create_one(
    3850            0 :                 shard_zero_tid,
    3851            0 :                 shard_zero_locations,
    3852            0 :                 self.http_client.clone(),
    3853            0 :                 self.config.pageserver_jwt_token.clone(),
    3854            0 :                 create_req.clone(),
    3855            0 :             )
    3856            0 :             .await?;
    3857              : 
    3858              :             // Update the create request for shards >= 0
    3859            0 :             match &mut create_req.mode {
    3860            0 :                 models::TimelineCreateRequestMode::Branch { ancestor_start_lsn, .. } if ancestor_start_lsn.is_none() => {
    3861            0 :                     // Propagate the LSN that shard zero picked, if caller didn't provide one
    3862            0 :                     *ancestor_start_lsn = timeline_info.ancestor_lsn;
    3863            0 :                 },
    3864            0 :                 models::TimelineCreateRequestMode::Bootstrap { existing_initdb_timeline_id, .. } => {
    3865              :                     // For shards >= 0, do not run initdb: use the one that shard 0 uploaded to S3
    3866            0 :                     *existing_initdb_timeline_id = Some(create_req.new_timeline_id)
    3867              :                 }
    3868            0 :                 _ => {}
    3869              :             }
    3870              : 
    3871              :             // Create timeline on remaining shards with number >0
    3872            0 :             if !targets.0.is_empty() {
    3873              :                 // If we had multiple shards, issue requests for the remainder now.
    3874            0 :                 let jwt = &self.config.pageserver_jwt_token;
    3875            0 :                 self.tenant_for_shards(
    3876            0 :                     targets
    3877            0 :                         .0
    3878            0 :                         .iter()
    3879            0 :                         .map(|t| (*t.0, t.1.latest.node.clone()))
    3880            0 :                         .collect(),
    3881            0 :                     |tenant_shard_id: TenantShardId, _node: Node| {
    3882            0 :                         let create_req = create_req.clone();
    3883            0 :                         let mutation_locations = targets.0.remove(&tenant_shard_id).unwrap();
    3884            0 :                         Box::pin(create_one(
    3885            0 :                             tenant_shard_id,
    3886            0 :                             mutation_locations,
    3887            0 :                             self.http_client.clone(),
    3888            0 :                             jwt.clone(),
    3889            0 :                             create_req,
    3890            0 :                         ))
    3891            0 :                     },
    3892              :                 )
    3893            0 :                 .await?;
    3894            0 :             }
    3895              : 
    3896            0 :             Ok(timeline_info)
    3897            0 :         })
    3898            0 :         .await?
    3899            0 :     }
    3900              : 
    3901            0 :     pub(crate) async fn tenant_timeline_create(
    3902            0 :         self: &Arc<Self>,
    3903            0 :         tenant_id: TenantId,
    3904            0 :         create_req: TimelineCreateRequest,
    3905            0 :     ) -> Result<TimelineCreateResponseStorcon, ApiError> {
    3906            0 :         let safekeepers = self.config.timelines_onto_safekeepers;
    3907            0 :         let timeline_id = create_req.new_timeline_id;
    3908              : 
    3909            0 :         tracing::info!(
    3910            0 :             mode=%create_req.mode_tag(),
    3911              :             %safekeepers,
    3912            0 :             "Creating timeline {}/{}",
    3913              :             tenant_id,
    3914              :             timeline_id,
    3915              :         );
    3916              : 
    3917            0 :         let _tenant_lock = trace_shared_lock(
    3918            0 :             &self.tenant_op_locks,
    3919            0 :             tenant_id,
    3920            0 :             TenantOperations::TimelineCreate,
    3921            0 :         )
    3922            0 :         .await;
    3923            0 :         failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock");
    3924            0 :         let is_import = create_req.is_import();
    3925            0 :         let read_only = matches!(
    3926            0 :             create_req.mode,
    3927              :             models::TimelineCreateRequestMode::Branch {
    3928              :                 read_only: true,
    3929              :                 ..
    3930              :             }
    3931              :         );
    3932              : 
    3933            0 :         if is_import {
    3934              :             // Ensure that there is no split on-going.
    3935              :             // [`Self::tenant_shard_split`] holds the exclusive tenant lock
    3936              :             // for the duration of the split, but here we handle the case
    3937              :             // where we restarted and the split is being aborted.
    3938            0 :             let locked = self.inner.read().unwrap();
    3939            0 :             let splitting = locked
    3940            0 :                 .tenants
    3941            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3942            0 :                 .any(|(_id, shard)| shard.splitting != SplitState::Idle);
    3943              : 
    3944            0 :             if splitting {
    3945            0 :                 return Err(ApiError::Conflict("Tenant is splitting shard".to_string()));
    3946            0 :             }
    3947            0 :         }
    3948              : 
    3949            0 :         let timeline_info = self
    3950            0 :             .tenant_timeline_create_pageservers(tenant_id, create_req)
    3951            0 :             .await?;
    3952              : 
    3953            0 :         let selected_safekeepers = if is_import {
    3954            0 :             let shards = {
    3955            0 :                 let locked = self.inner.read().unwrap();
    3956            0 :                 locked
    3957            0 :                     .tenants
    3958            0 :                     .range(TenantShardId::tenant_range(tenant_id))
    3959            0 :                     .map(|(ts_id, _)| ts_id.to_index())
    3960            0 :                     .collect::<Vec<_>>()
    3961              :             };
    3962              : 
    3963            0 :             if !shards
    3964            0 :                 .iter()
    3965            0 :                 .map(|shard_index| shard_index.shard_count)
    3966            0 :                 .all_equal()
    3967              :             {
    3968            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3969            0 :                     "Inconsistent shard count"
    3970            0 :                 )));
    3971            0 :             }
    3972              : 
    3973            0 :             let import = TimelineImport {
    3974            0 :                 tenant_id,
    3975            0 :                 timeline_id,
    3976            0 :                 shard_statuses: ShardImportStatuses::new(shards),
    3977            0 :             };
    3978              : 
    3979            0 :             let inserted = self
    3980            0 :                 .persistence
    3981            0 :                 .insert_timeline_import(import.to_persistent())
    3982            0 :                 .await
    3983            0 :                 .context("timeline import insert")
    3984            0 :                 .map_err(ApiError::InternalServerError)?;
    3985              : 
    3986              :             // Set the importing flag on the tenant shards
    3987            0 :             self.inner
    3988            0 :                 .write()
    3989            0 :                 .unwrap()
    3990            0 :                 .tenants
    3991            0 :                 .range_mut(TenantShardId::tenant_range(tenant_id))
    3992            0 :                 .for_each(|(_id, shard)| shard.importing = TimelineImportState::Importing);
    3993              : 
    3994            0 :             match inserted {
    3995              :                 true => {
    3996            0 :                     tracing::info!(%tenant_id, %timeline_id, "Inserted timeline import");
    3997              :                 }
    3998              :                 false => {
    3999            0 :                     tracing::info!(%tenant_id, %timeline_id, "Timeline import entry already present");
    4000              :                 }
    4001              :             }
    4002              : 
    4003            0 :             None
    4004            0 :         } else if safekeepers || read_only {
    4005              :             // Note that for imported timelines, we do not create the timeline on the safekeepers
    4006              :             // straight away. Instead, we do it once the import finalized such that we know what
    4007              :             // start LSN to provide for the safekeepers. This is done in
    4008              :             // [`Self::finalize_timeline_import`].
    4009            0 :             let res = self
    4010            0 :                 .tenant_timeline_create_safekeepers(tenant_id, &timeline_info, read_only)
    4011            0 :                 .instrument(tracing::info_span!("timeline_create_safekeepers", %tenant_id, timeline_id=%timeline_info.timeline_id))
    4012            0 :                 .await?;
    4013            0 :             Some(res)
    4014              :         } else {
    4015            0 :             None
    4016              :         };
    4017              : 
    4018            0 :         Ok(TimelineCreateResponseStorcon {
    4019            0 :             timeline_info,
    4020            0 :             safekeepers: selected_safekeepers,
    4021            0 :         })
    4022            0 :     }
    4023              : 
    4024              :     #[instrument(skip_all, fields(
    4025              :         tenant_id=%req.tenant_shard_id.tenant_id,
    4026              :         shard_id=%req.tenant_shard_id.shard_slug(),
    4027              :         timeline_id=%req.timeline_id,
    4028              :     ))]
    4029              :     pub(crate) async fn handle_timeline_shard_import_progress(
    4030              :         self: &Arc<Self>,
    4031              :         req: TimelineImportStatusRequest,
    4032              :     ) -> Result<ShardImportStatus, ApiError> {
    4033              :         let validity = self
    4034              :             .validate_shard_generation(req.tenant_shard_id, req.generation)
    4035              :             .await?;
    4036              :         match validity {
    4037              :             ShardGenerationValidity::Valid => {
    4038              :                 // fallthrough
    4039              :             }
    4040              :             ShardGenerationValidity::Mismatched { claimed, actual } => {
    4041              :                 tracing::info!(
    4042              :                     claimed=?claimed.into(),
    4043            0 :                     actual=?actual.and_then(|g| g.into()),
    4044              :                     "Rejecting import progress fetch from stale generation"
    4045              :                 );
    4046              : 
    4047              :                 return Err(ApiError::BadRequest(anyhow::anyhow!("Invalid generation")));
    4048              :             }
    4049              :         }
    4050              : 
    4051              :         let maybe_import = self
    4052              :             .persistence
    4053              :             .get_timeline_import(req.tenant_shard_id.tenant_id, req.timeline_id)
    4054              :             .await?;
    4055              : 
    4056            0 :         let import = maybe_import.ok_or_else(|| {
    4057            0 :             ApiError::NotFound(
    4058            0 :                 format!(
    4059            0 :                     "import for {}/{} not found",
    4060            0 :                     req.tenant_shard_id.tenant_id, req.timeline_id
    4061            0 :                 )
    4062            0 :                 .into(),
    4063            0 :             )
    4064            0 :         })?;
    4065              : 
    4066              :         import
    4067              :             .shard_statuses
    4068              :             .0
    4069              :             .get(&req.tenant_shard_id.to_index())
    4070              :             .cloned()
    4071            0 :             .ok_or_else(|| {
    4072            0 :                 ApiError::NotFound(
    4073            0 :                     format!("shard {} not found", req.tenant_shard_id.shard_slug()).into(),
    4074            0 :                 )
    4075            0 :             })
    4076              :     }
    4077              : 
    4078              :     #[instrument(skip_all, fields(
    4079              :         tenant_id=%req.tenant_shard_id.tenant_id,
    4080              :         shard_id=%req.tenant_shard_id.shard_slug(),
    4081              :         timeline_id=%req.timeline_id,
    4082              :     ))]
    4083              :     pub(crate) async fn handle_timeline_shard_import_progress_upcall(
    4084              :         self: &Arc<Self>,
    4085              :         req: PutTimelineImportStatusRequest,
    4086              :     ) -> Result<(), ApiError> {
    4087              :         let validity = self
    4088              :             .validate_shard_generation(req.tenant_shard_id, req.generation)
    4089              :             .await?;
    4090              :         match validity {
    4091              :             ShardGenerationValidity::Valid => {
    4092              :                 // fallthrough
    4093              :             }
    4094              :             ShardGenerationValidity::Mismatched { claimed, actual } => {
    4095              :                 tracing::info!(
    4096              :                     claimed=?claimed.into(),
    4097            0 :                     actual=?actual.and_then(|g| g.into()),
    4098              :                     "Rejecting import progress update from stale generation"
    4099              :                 );
    4100              : 
    4101              :                 return Err(ApiError::PreconditionFailed("Invalid generation".into()));
    4102              :             }
    4103              :         }
    4104              : 
    4105              :         let res = self
    4106              :             .persistence
    4107              :             .update_timeline_import(req.tenant_shard_id, req.timeline_id, req.status)
    4108              :             .await;
    4109              :         let timeline_import = match res {
    4110              :             Ok(Ok(Some(timeline_import))) => timeline_import,
    4111              :             Ok(Ok(None)) => {
    4112              :                 // Idempotency: we've already seen and handled this update.
    4113              :                 return Ok(());
    4114              :             }
    4115              :             Ok(Err(logical_err)) => {
    4116              :                 return Err(logical_err.into());
    4117              :             }
    4118              :             Err(db_err) => {
    4119              :                 return Err(db_err.into());
    4120              :             }
    4121              :         };
    4122              : 
    4123              :         tracing::info!(
    4124              :             tenant_id=%req.tenant_shard_id.tenant_id,
    4125              :             timeline_id=%req.timeline_id,
    4126              :             shard_id=%req.tenant_shard_id.shard_slug(),
    4127              :             "Updated timeline import status to: {timeline_import:?}");
    4128              : 
    4129              :         if timeline_import.is_complete() {
    4130              :             tokio::task::spawn({
    4131              :                 let this = self.clone();
    4132            0 :                 async move { this.finalize_timeline_import(timeline_import).await }
    4133              :             });
    4134              :         }
    4135              : 
    4136              :         Ok(())
    4137              :     }
    4138              : 
    4139              :     /// Check that a provided generation for some tenant shard is the most recent one.
    4140              :     ///
    4141              :     /// Validate with the in-mem state first, and, if that passes, validate with the
    4142              :     /// database state which is authoritative.
    4143            0 :     async fn validate_shard_generation(
    4144            0 :         self: &Arc<Self>,
    4145            0 :         tenant_shard_id: TenantShardId,
    4146            0 :         generation: Generation,
    4147            0 :     ) -> Result<ShardGenerationValidity, ApiError> {
    4148              :         {
    4149            0 :             let locked = self.inner.read().unwrap();
    4150            0 :             let tenant_shard =
    4151            0 :                 locked
    4152            0 :                     .tenants
    4153            0 :                     .get(&tenant_shard_id)
    4154            0 :                     .ok_or(ApiError::InternalServerError(anyhow::anyhow!(
    4155            0 :                         "{} shard not found",
    4156            0 :                         tenant_shard_id
    4157            0 :                     )))?;
    4158              : 
    4159            0 :             if tenant_shard.generation != Some(generation) {
    4160            0 :                 return Ok(ShardGenerationValidity::Mismatched {
    4161            0 :                     claimed: generation,
    4162            0 :                     actual: tenant_shard.generation,
    4163            0 :                 });
    4164            0 :             }
    4165              :         }
    4166              : 
    4167            0 :         let mut db_generations = self
    4168            0 :             .persistence
    4169            0 :             .shard_generations(std::iter::once(&tenant_shard_id))
    4170            0 :             .await?;
    4171            0 :         let (_tid, db_generation) =
    4172            0 :             db_generations
    4173            0 :                 .pop()
    4174            0 :                 .ok_or(ApiError::InternalServerError(anyhow::anyhow!(
    4175            0 :                     "{} shard not found",
    4176            0 :                     tenant_shard_id
    4177            0 :                 )))?;
    4178              : 
    4179            0 :         if db_generation != Some(generation) {
    4180            0 :             return Ok(ShardGenerationValidity::Mismatched {
    4181            0 :                 claimed: generation,
    4182            0 :                 actual: db_generation,
    4183            0 :             });
    4184            0 :         }
    4185              : 
    4186            0 :         Ok(ShardGenerationValidity::Valid)
    4187            0 :     }
    4188              : 
    4189              :     /// Finalize the import of a timeline
    4190              :     ///
    4191              :     /// This method should be called once all shards have reported that the import is complete.
    4192              :     /// Firstly, it polls the post import timeline activation endpoint exposed by the pageserver.
    4193              :     /// Once the timeline is active on all shards, the timeline also gets created on the
    4194              :     /// safekeepers. Finally, notify cplane of the import completion (whether failed or
    4195              :     /// successful), and remove the import from the database and in-memory.
    4196              :     ///
    4197              :     /// If this method gets pre-empted by shut down, it will be called again at start-up (on-going
    4198              :     /// imports are stored in the database).
    4199              :     ///
    4200              :     /// # Cancel-Safety
    4201              :     /// Not cancel safe.
    4202              :     /// If the caller stops polling, the import will not be removed from
    4203              :     /// [`ServiceState::imports_finalizing`].
    4204              :     #[instrument(skip_all, fields(
    4205              :         tenant_id=%import.tenant_id,
    4206              :         timeline_id=%import.timeline_id,
    4207              :     ))]
    4208              : 
    4209              :     async fn finalize_timeline_import(
    4210              :         self: &Arc<Self>,
    4211              :         import: TimelineImport,
    4212              :     ) -> Result<(), TimelineImportFinalizeError> {
    4213              :         let tenant_timeline = (import.tenant_id, import.timeline_id);
    4214              : 
    4215              :         let (_finalize_import_guard, cancel) = {
    4216              :             let mut locked = self.inner.write().unwrap();
    4217              :             let gate = Gate::default();
    4218              :             let cancel = CancellationToken::default();
    4219              : 
    4220              :             let guard = gate.enter().unwrap();
    4221              : 
    4222              :             locked.imports_finalizing.insert(
    4223              :                 tenant_timeline,
    4224              :                 FinalizingImport {
    4225              :                     gate,
    4226              :                     cancel: cancel.clone(),
    4227              :                 },
    4228              :             );
    4229              : 
    4230              :             (guard, cancel)
    4231              :         };
    4232              : 
    4233              :         let res = tokio::select! {
    4234              :             res = self.finalize_timeline_import_impl(import) => {
    4235              :                 res
    4236              :             },
    4237              :             _ = cancel.cancelled() => {
    4238              :                 Err(TimelineImportFinalizeError::Cancelled)
    4239              :             }
    4240              :         };
    4241              : 
    4242              :         let mut locked = self.inner.write().unwrap();
    4243              :         locked.imports_finalizing.remove(&tenant_timeline);
    4244              : 
    4245              :         res
    4246              :     }
    4247              : 
    4248            0 :     async fn finalize_timeline_import_impl(
    4249            0 :         self: &Arc<Self>,
    4250            0 :         import: TimelineImport,
    4251            0 :     ) -> Result<(), TimelineImportFinalizeError> {
    4252            0 :         tracing::info!("Finalizing timeline import");
    4253              : 
    4254            0 :         pausable_failpoint!("timeline-import-pre-cplane-notification");
    4255              : 
    4256            0 :         let tenant_id = import.tenant_id;
    4257            0 :         let timeline_id = import.timeline_id;
    4258              : 
    4259            0 :         let import_error = import.completion_error();
    4260            0 :         match import_error {
    4261            0 :             Some(err) => {
    4262            0 :                 self.notify_cplane_and_delete_import(tenant_id, timeline_id, Err(err))
    4263            0 :                     .await?;
    4264            0 :                 tracing::warn!("Timeline import completed with shard errors");
    4265            0 :                 Ok(())
    4266              :             }
    4267            0 :             None => match self.activate_timeline_post_import(&import).await {
    4268            0 :                 Ok(timeline_info) => {
    4269            0 :                     tracing::info!("Post import timeline activation complete");
    4270              : 
    4271            0 :                     if self.config.timelines_onto_safekeepers {
    4272              :                         // Now that we know the start LSN of this timeline, create it on the
    4273              :                         // safekeepers.
    4274            0 :                         self.tenant_timeline_create_safekeepers_until_success(
    4275            0 :                             import.tenant_id,
    4276            0 :                             timeline_info,
    4277            0 :                         )
    4278            0 :                         .await?;
    4279            0 :                     }
    4280              : 
    4281            0 :                     self.notify_cplane_and_delete_import(tenant_id, timeline_id, Ok(()))
    4282            0 :                         .await?;
    4283              : 
    4284            0 :                     tracing::info!("Timeline import completed successfully");
    4285            0 :                     Ok(())
    4286              :                 }
    4287              :                 Err(TimelineImportFinalizeError::ShuttingDown) => {
    4288              :                     // We got pre-empted by shut down and will resume after the restart.
    4289            0 :                     Err(TimelineImportFinalizeError::ShuttingDown)
    4290              :                 }
    4291            0 :                 Err(err) => {
    4292              :                     // Any finalize error apart from shut down is permanent and requires us to notify
    4293              :                     // cplane such that it can clean up.
    4294            0 :                     tracing::error!("Import finalize failed with permanent error: {err}");
    4295            0 :                     self.notify_cplane_and_delete_import(
    4296            0 :                         tenant_id,
    4297            0 :                         timeline_id,
    4298            0 :                         Err(err.to_string()),
    4299            0 :                     )
    4300            0 :                     .await?;
    4301            0 :                     Err(err)
    4302              :                 }
    4303              :             },
    4304              :         }
    4305            0 :     }
    4306              : 
    4307            0 :     async fn notify_cplane_and_delete_import(
    4308            0 :         self: &Arc<Self>,
    4309            0 :         tenant_id: TenantId,
    4310            0 :         timeline_id: TimelineId,
    4311            0 :         import_result: ImportResult,
    4312            0 :     ) -> Result<(), TimelineImportFinalizeError> {
    4313            0 :         let import_failed = import_result.is_err();
    4314            0 :         tracing::info!(%import_failed, "Notifying cplane of import completion");
    4315              : 
    4316            0 :         let client = UpcallClient::new(self.get_config(), self.cancel.child_token());
    4317            0 :         client
    4318            0 :             .notify_import_complete(tenant_id, timeline_id, import_result)
    4319            0 :             .await
    4320            0 :             .map_err(|_err| TimelineImportFinalizeError::ShuttingDown)?;
    4321              : 
    4322            0 :         if let Err(err) = self
    4323            0 :             .persistence
    4324            0 :             .delete_timeline_import(tenant_id, timeline_id)
    4325            0 :             .await
    4326              :         {
    4327            0 :             tracing::warn!("Failed to delete timeline import entry from database: {err}");
    4328            0 :         }
    4329              : 
    4330            0 :         self.inner
    4331            0 :             .write()
    4332            0 :             .unwrap()
    4333            0 :             .tenants
    4334            0 :             .range_mut(TenantShardId::tenant_range(tenant_id))
    4335            0 :             .for_each(|(_id, shard)| shard.importing = TimelineImportState::Idle);
    4336              : 
    4337            0 :         Ok(())
    4338            0 :     }
    4339              : 
    4340              :     /// Activate an imported timeline on all shards once the import is complete.
    4341              :     /// Returns the [`TimelineInfo`] reported by shard zero.
    4342            0 :     async fn activate_timeline_post_import(
    4343            0 :         self: &Arc<Self>,
    4344            0 :         import: &TimelineImport,
    4345            0 :     ) -> Result<TimelineInfo, TimelineImportFinalizeError> {
    4346              :         const TIMELINE_ACTIVATE_TIMEOUT: Duration = Duration::from_millis(128);
    4347              : 
    4348            0 :         let mut shards_to_activate: HashSet<ShardIndex> =
    4349            0 :             import.shard_statuses.0.keys().cloned().collect();
    4350            0 :         let mut shard_zero_timeline_info = None;
    4351              : 
    4352            0 :         while !shards_to_activate.is_empty() {
    4353            0 :             if self.cancel.is_cancelled() {
    4354            0 :                 return Err(TimelineImportFinalizeError::ShuttingDown);
    4355            0 :             }
    4356              : 
    4357            0 :             let targets = {
    4358            0 :                 let locked = self.inner.read().unwrap();
    4359            0 :                 let mut targets = Vec::new();
    4360              : 
    4361            0 :                 for (tenant_shard_id, shard) in locked
    4362            0 :                     .tenants
    4363            0 :                     .range(TenantShardId::tenant_range(import.tenant_id))
    4364              :                 {
    4365            0 :                     if !import
    4366            0 :                         .shard_statuses
    4367            0 :                         .0
    4368            0 :                         .contains_key(&tenant_shard_id.to_index())
    4369              :                     {
    4370            0 :                         return Err(TimelineImportFinalizeError::MismatchedShards(
    4371            0 :                             tenant_shard_id.to_index(),
    4372            0 :                         ));
    4373            0 :                     }
    4374              : 
    4375            0 :                     if let Some(node_id) = shard.intent.get_attached() {
    4376            0 :                         let node = locked
    4377            0 :                             .nodes
    4378            0 :                             .get(node_id)
    4379            0 :                             .expect("Pageservers may not be deleted while referenced");
    4380            0 :                         targets.push((*tenant_shard_id, node.clone()));
    4381            0 :                     }
    4382              :                 }
    4383              : 
    4384            0 :                 targets
    4385              :             };
    4386              : 
    4387            0 :             let targeted_tenant_shards: Vec<_> = targets.iter().map(|(tid, _node)| *tid).collect();
    4388              : 
    4389            0 :             let results = self
    4390            0 :                 .tenant_for_shards_api(
    4391            0 :                     targets,
    4392            0 :                     |tenant_shard_id, client| async move {
    4393            0 :                         client
    4394            0 :                             .activate_post_import(
    4395            0 :                                 tenant_shard_id,
    4396            0 :                                 import.timeline_id,
    4397            0 :                                 TIMELINE_ACTIVATE_TIMEOUT,
    4398            0 :                             )
    4399            0 :                             .await
    4400            0 :                     },
    4401              :                     1,
    4402              :                     1,
    4403              :                     SHORT_RECONCILE_TIMEOUT,
    4404            0 :                     &self.cancel,
    4405              :                 )
    4406            0 :                 .await;
    4407              : 
    4408            0 :             let mut failed = 0;
    4409            0 :             for (tid, result) in targeted_tenant_shards.iter().zip(results.into_iter()) {
    4410            0 :                 match result {
    4411            0 :                     Ok(ok) => {
    4412            0 :                         if tid.is_shard_zero() {
    4413            0 :                             shard_zero_timeline_info = Some(ok);
    4414            0 :                         }
    4415              : 
    4416            0 :                         shards_to_activate.remove(&tid.to_index());
    4417              :                     }
    4418            0 :                     Err(_err) => {
    4419            0 :                         failed += 1;
    4420            0 :                     }
    4421              :                 }
    4422              :             }
    4423              : 
    4424            0 :             if failed > 0 {
    4425            0 :                 tracing::info!(
    4426            0 :                     "Failed to activate timeline on {failed} shards post import. Will retry"
    4427              :                 );
    4428            0 :             }
    4429              : 
    4430            0 :             tokio::select! {
    4431            0 :                 _ = tokio::time::sleep(Duration::from_millis(250)) => {},
    4432            0 :                 _ = self.cancel.cancelled() => {
    4433            0 :                     return Err(TimelineImportFinalizeError::ShuttingDown);
    4434              :                 }
    4435              :             }
    4436              :         }
    4437              : 
    4438            0 :         Ok(shard_zero_timeline_info.expect("All shards replied"))
    4439            0 :     }
    4440              : 
    4441            0 :     async fn finalize_timeline_imports(self: &Arc<Self>, imports: Vec<TimelineImport>) {
    4442            0 :         futures::future::join_all(
    4443            0 :             imports
    4444            0 :                 .into_iter()
    4445            0 :                 .map(|import| self.finalize_timeline_import(import)),
    4446              :         )
    4447            0 :         .await;
    4448            0 :     }
    4449              : 
    4450              :     /// Delete a timeline import if it exists
    4451              :     ///
    4452              :     /// Firstly, delete the entry from the database. Any updates
    4453              :     /// from pageservers after the update will fail with a 404, so the
    4454              :     /// import cannot progress into finalizing state if it's not there already.
    4455              :     /// Secondly, cancel the finalization if one is in progress.
    4456            0 :     pub(crate) async fn maybe_delete_timeline_import(
    4457            0 :         self: &Arc<Self>,
    4458            0 :         tenant_id: TenantId,
    4459            0 :         timeline_id: TimelineId,
    4460            0 :     ) -> Result<(), DatabaseError> {
    4461            0 :         let tenant_has_ongoing_import = {
    4462            0 :             let locked = self.inner.read().unwrap();
    4463            0 :             locked
    4464            0 :                 .tenants
    4465            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4466            0 :                 .any(|(_tid, shard)| shard.importing == TimelineImportState::Importing)
    4467              :         };
    4468              : 
    4469            0 :         if !tenant_has_ongoing_import {
    4470            0 :             return Ok(());
    4471            0 :         }
    4472              : 
    4473            0 :         self.persistence
    4474            0 :             .delete_timeline_import(tenant_id, timeline_id)
    4475            0 :             .await?;
    4476              : 
    4477            0 :         let maybe_finalizing = {
    4478            0 :             let mut locked = self.inner.write().unwrap();
    4479            0 :             locked.imports_finalizing.remove(&(tenant_id, timeline_id))
    4480              :         };
    4481              : 
    4482            0 :         if let Some(finalizing) = maybe_finalizing {
    4483            0 :             finalizing.cancel.cancel();
    4484            0 :             finalizing.gate.close().await;
    4485            0 :         }
    4486              : 
    4487            0 :         Ok(())
    4488            0 :     }
    4489              : 
    4490            0 :     pub(crate) async fn tenant_timeline_archival_config(
    4491            0 :         &self,
    4492            0 :         tenant_id: TenantId,
    4493            0 :         timeline_id: TimelineId,
    4494            0 :         req: TimelineArchivalConfigRequest,
    4495            0 :     ) -> Result<(), ApiError> {
    4496            0 :         tracing::info!(
    4497            0 :             "Setting archival config of timeline {tenant_id}/{timeline_id} to '{:?}'",
    4498              :             req.state
    4499              :         );
    4500              : 
    4501            0 :         let _tenant_lock = trace_shared_lock(
    4502            0 :             &self.tenant_op_locks,
    4503            0 :             tenant_id,
    4504            0 :             TenantOperations::TimelineArchivalConfig,
    4505            0 :         )
    4506            0 :         .await;
    4507              : 
    4508            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    4509            0 :             if targets.0.is_empty() {
    4510            0 :                 return Err(ApiError::NotFound(
    4511            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4512            0 :                 ));
    4513            0 :             }
    4514            0 :             async fn config_one(
    4515            0 :                 tenant_shard_id: TenantShardId,
    4516            0 :                 timeline_id: TimelineId,
    4517            0 :                 node: Node,
    4518            0 :                 http_client: reqwest::Client,
    4519            0 :                 jwt: Option<String>,
    4520            0 :                 req: TimelineArchivalConfigRequest,
    4521            0 :             ) -> Result<(), ApiError> {
    4522            0 :                 tracing::info!(
    4523            0 :                     "Setting archival config of timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    4524              :                 );
    4525              : 
    4526            0 :                 let client = PageserverClient::new(node.get_id(),  http_client, node.base_url(), jwt.as_deref());
    4527              : 
    4528            0 :                 client
    4529            0 :                     .timeline_archival_config(tenant_shard_id, timeline_id, &req)
    4530            0 :                     .await
    4531            0 :                     .map_err(|e| match e {
    4532            0 :                         mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg) => {
    4533            0 :                             ApiError::PreconditionFailed(msg.into_boxed_str())
    4534              :                         }
    4535            0 :                         _ => passthrough_api_error(&node, e),
    4536            0 :                     })
    4537            0 :             }
    4538              : 
    4539              :             // no shard needs to go first/last; the operation should be idempotent
    4540              :             // TODO: it would be great to ensure that all shards return the same error
    4541            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    4542            0 :             let results = self
    4543            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    4544            0 :                     futures::FutureExt::boxed(config_one(
    4545            0 :                         tenant_shard_id,
    4546            0 :                         timeline_id,
    4547            0 :                         node,
    4548            0 :                         self.http_client.clone(),
    4549            0 :                         self.config.pageserver_jwt_token.clone(),
    4550            0 :                         req.clone(),
    4551            0 :                     ))
    4552            0 :                 })
    4553            0 :                 .await?;
    4554            0 :             assert!(!results.is_empty(), "must have at least one result");
    4555              : 
    4556            0 :             Ok(())
    4557            0 :         }).await?
    4558            0 :     }
    4559              : 
    4560            0 :     pub(crate) async fn tenant_timeline_detach_ancestor(
    4561            0 :         &self,
    4562            0 :         tenant_id: TenantId,
    4563            0 :         timeline_id: TimelineId,
    4564            0 :         behavior: Option<DetachBehavior>,
    4565            0 :     ) -> Result<models::detach_ancestor::AncestorDetached, ApiError> {
    4566            0 :         tracing::info!("Detaching timeline {tenant_id}/{timeline_id}",);
    4567              : 
    4568            0 :         let _tenant_lock = trace_shared_lock(
    4569            0 :             &self.tenant_op_locks,
    4570            0 :             tenant_id,
    4571            0 :             TenantOperations::TimelineDetachAncestor,
    4572            0 :         )
    4573            0 :         .await;
    4574              : 
    4575            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    4576            0 :             if targets.0.is_empty() {
    4577            0 :                 return Err(ApiError::NotFound(
    4578            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4579            0 :                 ));
    4580            0 :             }
    4581              : 
    4582            0 :             async fn detach_one(
    4583            0 :                 tenant_shard_id: TenantShardId,
    4584            0 :                 timeline_id: TimelineId,
    4585            0 :                 node: Node,
    4586            0 :                 http_client: reqwest::Client,
    4587            0 :                 jwt: Option<String>,
    4588            0 :                 behavior: Option<DetachBehavior>,
    4589            0 :             ) -> Result<(ShardNumber, models::detach_ancestor::AncestorDetached), ApiError> {
    4590            0 :                 tracing::info!(
    4591            0 :                     "Detaching timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    4592              :                 );
    4593              : 
    4594            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    4595              : 
    4596            0 :                 client
    4597            0 :                     .timeline_detach_ancestor(tenant_shard_id, timeline_id, behavior)
    4598            0 :                     .await
    4599            0 :                     .map_err(|e| {
    4600              :                         use mgmt_api::Error;
    4601              : 
    4602            0 :                         match e {
    4603              :                             // no ancestor (ever)
    4604            0 :                             Error::ApiError(StatusCode::CONFLICT, msg) => ApiError::Conflict(format!(
    4605            0 :                                 "{node}: {}",
    4606            0 :                                 msg.strip_prefix("Conflict: ").unwrap_or(&msg)
    4607            0 :                             )),
    4608              :                             // too many ancestors
    4609            0 :                             Error::ApiError(StatusCode::BAD_REQUEST, msg) => {
    4610            0 :                                 ApiError::BadRequest(anyhow::anyhow!("{node}: {msg}"))
    4611              :                             }
    4612            0 :                             Error::ApiError(StatusCode::INTERNAL_SERVER_ERROR, msg) => {
    4613              :                                 // avoid turning these into conflicts to remain compatible with
    4614              :                                 // pageservers, 500 errors are sadly retryable with timeline ancestor
    4615              :                                 // detach
    4616            0 :                                 ApiError::InternalServerError(anyhow::anyhow!("{node}: {msg}"))
    4617              :                             }
    4618              :                             // rest can be mapped as usual
    4619            0 :                             other => passthrough_api_error(&node, other),
    4620              :                         }
    4621            0 :                     })
    4622            0 :                     .map(|res| (tenant_shard_id.shard_number, res))
    4623            0 :             }
    4624              : 
    4625              :             // no shard needs to go first/last; the operation should be idempotent
    4626            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    4627            0 :             let mut results = self
    4628            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    4629            0 :                     futures::FutureExt::boxed(detach_one(
    4630            0 :                         tenant_shard_id,
    4631            0 :                         timeline_id,
    4632            0 :                         node,
    4633            0 :                         self.http_client.clone(),
    4634            0 :                         self.config.pageserver_jwt_token.clone(),
    4635            0 :                         behavior,
    4636            0 :                     ))
    4637            0 :                 })
    4638            0 :                 .await?;
    4639              : 
    4640            0 :             let any = results.pop().expect("we must have at least one response");
    4641              : 
    4642            0 :             let mismatching = results
    4643            0 :                 .iter()
    4644            0 :                 .filter(|(_, res)| res != &any.1)
    4645            0 :                 .collect::<Vec<_>>();
    4646            0 :             if !mismatching.is_empty() {
    4647              :                 // this can be hit by races which should not happen because operation lock on cplane
    4648            0 :                 let matching = results.len() - mismatching.len();
    4649            0 :                 tracing::error!(
    4650              :                     matching,
    4651              :                     compared_against=?any,
    4652              :                     ?mismatching,
    4653            0 :                     "shards returned different results"
    4654              :                 );
    4655              : 
    4656            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!("pageservers returned mixed results for ancestor detach; manual intervention is required.")));
    4657            0 :             }
    4658              : 
    4659            0 :             Ok(any.1)
    4660            0 :         }).await?
    4661            0 :     }
    4662              : 
    4663            0 :     pub(crate) async fn tenant_timeline_block_unblock_gc(
    4664            0 :         &self,
    4665            0 :         tenant_id: TenantId,
    4666            0 :         timeline_id: TimelineId,
    4667            0 :         dir: BlockUnblock,
    4668            0 :     ) -> Result<(), ApiError> {
    4669            0 :         let _tenant_lock = trace_shared_lock(
    4670            0 :             &self.tenant_op_locks,
    4671            0 :             tenant_id,
    4672            0 :             TenantOperations::TimelineGcBlockUnblock,
    4673            0 :         )
    4674            0 :         .await;
    4675              : 
    4676            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    4677            0 :             if targets.0.is_empty() {
    4678            0 :                 return Err(ApiError::NotFound(
    4679            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4680            0 :                 ));
    4681            0 :             }
    4682              : 
    4683            0 :             async fn do_one(
    4684            0 :                 tenant_shard_id: TenantShardId,
    4685            0 :                 timeline_id: TimelineId,
    4686            0 :                 node: Node,
    4687            0 :                 http_client: reqwest::Client,
    4688            0 :                 jwt: Option<String>,
    4689            0 :                 dir: BlockUnblock,
    4690            0 :             ) -> Result<(), ApiError> {
    4691            0 :                 let client = PageserverClient::new(
    4692            0 :                     node.get_id(),
    4693            0 :                     http_client,
    4694            0 :                     node.base_url(),
    4695            0 :                     jwt.as_deref(),
    4696              :                 );
    4697              : 
    4698            0 :                 client
    4699            0 :                     .timeline_block_unblock_gc(tenant_shard_id, timeline_id, dir)
    4700            0 :                     .await
    4701            0 :                     .map_err(|e| passthrough_api_error(&node, e))
    4702            0 :             }
    4703              : 
    4704              :             // no shard needs to go first/last; the operation should be idempotent
    4705            0 :             let locations = targets
    4706            0 :                 .0
    4707            0 :                 .iter()
    4708            0 :                 .map(|t| (*t.0, t.1.latest.node.clone()))
    4709            0 :                 .collect();
    4710            0 :             self.tenant_for_shards(locations, |tenant_shard_id, node| {
    4711            0 :                 futures::FutureExt::boxed(do_one(
    4712            0 :                     tenant_shard_id,
    4713            0 :                     timeline_id,
    4714            0 :                     node,
    4715            0 :                     self.http_client.clone(),
    4716            0 :                     self.config.pageserver_jwt_token.clone(),
    4717            0 :                     dir,
    4718            0 :                 ))
    4719            0 :             })
    4720            0 :             .await
    4721            0 :         })
    4722            0 :         .await??;
    4723            0 :         Ok(())
    4724            0 :     }
    4725              : 
    4726            0 :     pub(crate) async fn tenant_timeline_lsn_lease(
    4727            0 :         &self,
    4728            0 :         tenant_id: TenantId,
    4729            0 :         timeline_id: TimelineId,
    4730            0 :         lsn: Lsn,
    4731            0 :     ) -> Result<LsnLease, ApiError> {
    4732            0 :         let _tenant_lock = trace_shared_lock(
    4733            0 :             &self.tenant_op_locks,
    4734            0 :             tenant_id,
    4735            0 :             TenantOperations::TimelineLsnLease,
    4736            0 :         )
    4737            0 :         .await;
    4738              : 
    4739            0 :         let targets = {
    4740            0 :             let locked = self.inner.read().unwrap();
    4741            0 :             let mut targets = Vec::new();
    4742              : 
    4743              :             // If the request got an unsharded tenant id, then apply
    4744              :             // the operation to all shards. Otherwise, apply it to a specific shard.
    4745            0 :             let shards_range = TenantShardId::tenant_range(tenant_id);
    4746              : 
    4747            0 :             for (tenant_shard_id, shard) in locked.tenants.range(shards_range) {
    4748            0 :                 if let Some(node_id) = shard.intent.get_attached() {
    4749            0 :                     let node = locked
    4750            0 :                         .nodes
    4751            0 :                         .get(node_id)
    4752            0 :                         .expect("Pageservers may not be deleted while referenced");
    4753            0 : 
    4754            0 :                     targets.push((*tenant_shard_id, node.clone()));
    4755            0 :                 }
    4756              :             }
    4757            0 :             targets
    4758              :         };
    4759              : 
    4760            0 :         let res = self
    4761            0 :             .tenant_for_shards_api(
    4762            0 :                 targets,
    4763            0 :                 |tenant_shard_id, client| async move {
    4764            0 :                     client
    4765            0 :                         .timeline_lease_lsn(tenant_shard_id, timeline_id, lsn)
    4766            0 :                         .await
    4767            0 :                 },
    4768              :                 1,
    4769              :                 1,
    4770              :                 SHORT_RECONCILE_TIMEOUT,
    4771            0 :                 &self.cancel,
    4772              :             )
    4773            0 :             .await;
    4774              : 
    4775            0 :         let mut valid_until = None;
    4776            0 :         for r in res {
    4777            0 :             match r {
    4778            0 :                 Ok(lease) => {
    4779            0 :                     if let Some(ref mut valid_until) = valid_until {
    4780            0 :                         *valid_until = std::cmp::min(*valid_until, lease.valid_until);
    4781            0 :                     } else {
    4782            0 :                         valid_until = Some(lease.valid_until);
    4783            0 :                     }
    4784              :                 }
    4785            0 :                 Err(e) => {
    4786            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    4787              :                 }
    4788              :             }
    4789              :         }
    4790            0 :         Ok(LsnLease {
    4791            0 :             valid_until: valid_until.unwrap_or_else(SystemTime::now),
    4792            0 :         })
    4793            0 :     }
    4794              : 
    4795            0 :     pub(crate) async fn tenant_timeline_download_heatmap_layers(
    4796            0 :         &self,
    4797            0 :         tenant_shard_id: TenantShardId,
    4798            0 :         timeline_id: TimelineId,
    4799            0 :         concurrency: Option<usize>,
    4800            0 :         recurse: bool,
    4801            0 :     ) -> Result<(), ApiError> {
    4802            0 :         let _tenant_lock = trace_shared_lock(
    4803            0 :             &self.tenant_op_locks,
    4804            0 :             tenant_shard_id.tenant_id,
    4805            0 :             TenantOperations::DownloadHeatmapLayers,
    4806            0 :         )
    4807            0 :         .await;
    4808              : 
    4809            0 :         let targets = {
    4810            0 :             let locked = self.inner.read().unwrap();
    4811            0 :             let mut targets = Vec::new();
    4812              : 
    4813              :             // If the request got an unsharded tenant id, then apply
    4814              :             // the operation to all shards. Otherwise, apply it to a specific shard.
    4815            0 :             let shards_range = if tenant_shard_id.is_unsharded() {
    4816            0 :                 TenantShardId::tenant_range(tenant_shard_id.tenant_id)
    4817              :             } else {
    4818            0 :                 tenant_shard_id.range()
    4819              :             };
    4820              : 
    4821            0 :             for (tenant_shard_id, shard) in locked.tenants.range(shards_range) {
    4822            0 :                 if let Some(node_id) = shard.intent.get_attached() {
    4823            0 :                     let node = locked
    4824            0 :                         .nodes
    4825            0 :                         .get(node_id)
    4826            0 :                         .expect("Pageservers may not be deleted while referenced");
    4827            0 : 
    4828            0 :                     targets.push((*tenant_shard_id, node.clone()));
    4829            0 :                 }
    4830              :             }
    4831            0 :             targets
    4832              :         };
    4833              : 
    4834            0 :         self.tenant_for_shards_api(
    4835            0 :             targets,
    4836            0 :             |tenant_shard_id, client| async move {
    4837            0 :                 client
    4838            0 :                     .timeline_download_heatmap_layers(
    4839            0 :                         tenant_shard_id,
    4840            0 :                         timeline_id,
    4841            0 :                         concurrency,
    4842            0 :                         recurse,
    4843            0 :                     )
    4844            0 :                     .await
    4845            0 :             },
    4846              :             1,
    4847              :             1,
    4848              :             SHORT_RECONCILE_TIMEOUT,
    4849            0 :             &self.cancel,
    4850              :         )
    4851            0 :         .await;
    4852              : 
    4853            0 :         Ok(())
    4854            0 :     }
    4855              : 
    4856              :     /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
    4857              :     ///
    4858              :     /// On success, the returned vector contains exactly the same number of elements as the input `locations`
    4859              :     /// and returned element at index `i` is the result for `req_fn(op(locations[i])`.
    4860            0 :     async fn tenant_for_shards<F, R>(
    4861            0 :         &self,
    4862            0 :         locations: Vec<(TenantShardId, Node)>,
    4863            0 :         mut req_fn: F,
    4864            0 :     ) -> Result<Vec<R>, ApiError>
    4865            0 :     where
    4866            0 :         F: FnMut(
    4867            0 :             TenantShardId,
    4868            0 :             Node,
    4869            0 :         )
    4870            0 :             -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
    4871            0 :     {
    4872            0 :         let mut futs = FuturesUnordered::new();
    4873            0 :         let mut results = Vec::with_capacity(locations.len());
    4874              : 
    4875            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    4876            0 :             let fut = req_fn(tenant_shard_id, node);
    4877            0 :             futs.push(async move { (idx, fut.await) });
    4878              :         }
    4879              : 
    4880            0 :         while let Some((idx, r)) = futs.next().await {
    4881            0 :             results.push((idx, r?));
    4882              :         }
    4883              : 
    4884            0 :         results.sort_by_key(|(idx, _)| *idx);
    4885            0 :         Ok(results.into_iter().map(|(_, r)| r).collect())
    4886            0 :     }
    4887              : 
    4888              :     /// Concurrently invoke a pageserver API call on many shards at once.
    4889              :     ///
    4890              :     /// The returned Vec has the same length as the `locations` Vec,
    4891              :     /// and returned element at index `i` is the result for `op(locations[i])`.
    4892            0 :     pub(crate) async fn tenant_for_shards_api<T, O, F>(
    4893            0 :         &self,
    4894            0 :         locations: Vec<(TenantShardId, Node)>,
    4895            0 :         op: O,
    4896            0 :         warn_threshold: u32,
    4897            0 :         max_retries: u32,
    4898            0 :         timeout: Duration,
    4899            0 :         cancel: &CancellationToken,
    4900            0 :     ) -> Vec<mgmt_api::Result<T>>
    4901            0 :     where
    4902            0 :         O: Fn(TenantShardId, PageserverClient) -> F + Copy,
    4903            0 :         F: std::future::Future<Output = mgmt_api::Result<T>>,
    4904            0 :     {
    4905            0 :         let mut futs = FuturesUnordered::new();
    4906            0 :         let mut results = Vec::with_capacity(locations.len());
    4907              : 
    4908            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    4909            0 :             futs.push(async move {
    4910            0 :                 let r = node
    4911            0 :                     .with_client_retries(
    4912            0 :                         |client| op(tenant_shard_id, client),
    4913            0 :                         &self.http_client,
    4914            0 :                         &self.config.pageserver_jwt_token,
    4915            0 :                         warn_threshold,
    4916            0 :                         max_retries,
    4917            0 :                         timeout,
    4918            0 :                         cancel,
    4919              :                     )
    4920            0 :                     .await;
    4921            0 :                 (idx, r)
    4922            0 :             });
    4923              :         }
    4924              : 
    4925            0 :         while let Some((idx, r)) = futs.next().await {
    4926            0 :             results.push((idx, r.unwrap_or(Err(mgmt_api::Error::Cancelled))));
    4927            0 :         }
    4928              : 
    4929            0 :         results.sort_by_key(|(idx, _)| *idx);
    4930            0 :         results.into_iter().map(|(_, r)| r).collect()
    4931            0 :     }
    4932              : 
    4933              :     /// Helper for safely working with the shards in a tenant remotely on pageservers, for example
    4934              :     /// when creating and deleting timelines:
    4935              :     /// - Makes sure shards are attached somewhere if they weren't already
    4936              :     /// - Looks up the shards and the nodes where they were most recently attached
    4937              :     /// - Guarantees that after the inner function returns, the shards' generations haven't moved on: this
    4938              :     ///   ensures that the remote operation acted on the most recent generation, and is therefore durable.
    4939            0 :     async fn tenant_remote_mutation<R, O, F>(
    4940            0 :         &self,
    4941            0 :         tenant_id: TenantId,
    4942            0 :         op: O,
    4943            0 :     ) -> Result<R, ApiError>
    4944            0 :     where
    4945            0 :         O: FnOnce(TenantMutationLocations) -> F,
    4946            0 :         F: std::future::Future<Output = R>,
    4947            0 :     {
    4948            0 :         let mutation_locations = {
    4949            0 :             let mut locations = TenantMutationLocations::default();
    4950              : 
    4951              :             // Load the currently attached pageservers for the latest generation of each shard.  This can
    4952              :             // run concurrently with reconciliations, and it is not guaranteed that the node we find here
    4953              :             // will still be the latest when we're done: we will check generations again at the end of
    4954              :             // this function to handle that.
    4955            0 :             let generations = self.persistence.tenant_generations(tenant_id).await?;
    4956              : 
    4957            0 :             if generations
    4958            0 :                 .iter()
    4959            0 :                 .any(|i| i.generation.is_none() || i.generation_pageserver.is_none())
    4960              :             {
    4961            0 :                 let shard_generations = generations
    4962            0 :                     .into_iter()
    4963            0 :                     .map(|i| (i.tenant_shard_id, (i.generation, i.generation_pageserver)))
    4964            0 :                     .collect::<HashMap<_, _>>();
    4965              : 
    4966              :                 // One or more shards has not been attached to a pageserver.  Check if this is because it's configured
    4967              :                 // to be detached (409: caller should give up), or because it's meant to be attached but isn't yet (503: caller should retry)
    4968            0 :                 let locked = self.inner.read().unwrap();
    4969            0 :                 for (shard_id, shard) in
    4970            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    4971              :                 {
    4972            0 :                     match shard.policy {
    4973              :                         PlacementPolicy::Attached(_) => {
    4974              :                             // This shard is meant to be attached: the caller is not wrong to try and
    4975              :                             // use this function, but we can't service the request right now.
    4976            0 :                             let Some(generation) = shard_generations.get(shard_id) else {
    4977              :                                 // This can only happen if there is a split brain controller modifying the database.  This should
    4978              :                                 // never happen when testing, and if it happens in production we can only log the issue.
    4979            0 :                                 debug_assert!(false);
    4980            0 :                                 tracing::error!(
    4981            0 :                                     "Shard {shard_id} not found in generation state!  Is another rogue controller running?"
    4982              :                                 );
    4983            0 :                                 continue;
    4984              :                             };
    4985            0 :                             let (generation, generation_pageserver) = generation;
    4986            0 :                             if let Some(generation) = generation {
    4987            0 :                                 if generation_pageserver.is_none() {
    4988              :                                     // This is legitimate only in a very narrow window where the shard was only just configured into
    4989              :                                     // Attached mode after being created in Secondary or Detached mode, and it has had its generation
    4990              :                                     // set but not yet had a Reconciler run (reconciler is the only thing that sets generation_pageserver).
    4991            0 :                                     tracing::warn!(
    4992            0 :                                         "Shard {shard_id} generation is set ({generation:?}) but generation_pageserver is None, reconciler not run yet?"
    4993              :                                     );
    4994            0 :                                 }
    4995              :                             } else {
    4996              :                                 // This should never happen: a shard with no generation is only permitted when it was created in some state
    4997              :                                 // other than PlacementPolicy::Attached (and generation is always written to DB before setting Attached in memory)
    4998            0 :                                 debug_assert!(false);
    4999            0 :                                 tracing::error!(
    5000            0 :                                     "Shard {shard_id} generation is None, but it is in PlacementPolicy::Attached mode!"
    5001              :                                 );
    5002            0 :                                 continue;
    5003              :                             }
    5004              :                         }
    5005              :                         PlacementPolicy::Secondary | PlacementPolicy::Detached => {
    5006            0 :                             return Err(ApiError::Conflict(format!(
    5007            0 :                                 "Shard {shard_id} tenant has policy {:?}",
    5008            0 :                                 shard.policy
    5009            0 :                             )));
    5010              :                         }
    5011              :                     }
    5012              :                 }
    5013              : 
    5014            0 :                 return Err(ApiError::ResourceUnavailable(
    5015            0 :                     "One or more shards in tenant is not yet attached".into(),
    5016            0 :                 ));
    5017            0 :             }
    5018              : 
    5019            0 :             let locked = self.inner.read().unwrap();
    5020              :             for ShardGenerationState {
    5021            0 :                 tenant_shard_id,
    5022            0 :                 generation,
    5023            0 :                 generation_pageserver,
    5024            0 :             } in generations
    5025              :             {
    5026            0 :                 let node_id = generation_pageserver.expect("We checked for None above");
    5027            0 :                 let node = locked
    5028            0 :                     .nodes
    5029            0 :                     .get(&node_id)
    5030            0 :                     .ok_or(ApiError::Conflict(format!(
    5031            0 :                         "Raced with removal of node {node_id}"
    5032            0 :                     )))?;
    5033            0 :                 let generation = generation.expect("Checked above");
    5034              : 
    5035            0 :                 let tenant = locked.tenants.get(&tenant_shard_id);
    5036              : 
    5037              :                 // TODO(vlad): Abstract the logic that finds stale attached locations
    5038              :                 // from observed state into a [`Service`] method.
    5039            0 :                 let other_locations = match tenant {
    5040            0 :                     Some(tenant) => {
    5041            0 :                         let mut other = tenant.attached_locations();
    5042            0 :                         let latest_location_index =
    5043            0 :                             other.iter().position(|&l| l == (node.get_id(), generation));
    5044            0 :                         if let Some(idx) = latest_location_index {
    5045            0 :                             other.remove(idx);
    5046            0 :                         }
    5047              : 
    5048            0 :                         other
    5049              :                     }
    5050            0 :                     None => Vec::default(),
    5051              :                 };
    5052              : 
    5053            0 :                 let location = ShardMutationLocations {
    5054            0 :                     latest: MutationLocation {
    5055            0 :                         node: node.clone(),
    5056            0 :                         generation,
    5057            0 :                     },
    5058            0 :                     other: other_locations
    5059            0 :                         .into_iter()
    5060            0 :                         .filter_map(|(node_id, generation)| {
    5061            0 :                             let node = locked.nodes.get(&node_id)?;
    5062              : 
    5063            0 :                             Some(MutationLocation {
    5064            0 :                                 node: node.clone(),
    5065            0 :                                 generation,
    5066            0 :                             })
    5067            0 :                         })
    5068            0 :                         .collect(),
    5069              :                 };
    5070            0 :                 locations.0.insert(tenant_shard_id, location);
    5071              :             }
    5072              : 
    5073            0 :             locations
    5074              :         };
    5075              : 
    5076            0 :         let result = op(mutation_locations.clone()).await;
    5077              : 
    5078              :         // Post-check: are all the generations of all the shards the same as they were initially?  This proves that
    5079              :         // our remote operation executed on the latest generation and is therefore persistent.
    5080              :         {
    5081            0 :             let latest_generations = self.persistence.tenant_generations(tenant_id).await?;
    5082            0 :             if latest_generations
    5083            0 :                 .into_iter()
    5084            0 :                 .map(
    5085              :                     |ShardGenerationState {
    5086              :                          tenant_shard_id,
    5087              :                          generation,
    5088              :                          generation_pageserver: _,
    5089            0 :                      }| (tenant_shard_id, generation),
    5090              :                 )
    5091            0 :                 .collect::<Vec<_>>()
    5092            0 :                 != mutation_locations
    5093            0 :                     .0
    5094            0 :                     .into_iter()
    5095            0 :                     .map(|i| (i.0, Some(i.1.latest.generation)))
    5096            0 :                     .collect::<Vec<_>>()
    5097              :             {
    5098              :                 // We raced with something that incremented the generation, and therefore cannot be
    5099              :                 // confident that our actions are persistent (they might have hit an old generation).
    5100              :                 //
    5101              :                 // This is safe but requires a retry: ask the client to do that by giving them a 503 response.
    5102            0 :                 return Err(ApiError::ResourceUnavailable(
    5103            0 :                     "Tenant attachment changed, please retry".into(),
    5104            0 :                 ));
    5105            0 :             }
    5106              :         }
    5107              : 
    5108            0 :         Ok(result)
    5109            0 :     }
    5110              : 
    5111            0 :     pub(crate) async fn tenant_timeline_delete(
    5112            0 :         self: &Arc<Self>,
    5113            0 :         tenant_id: TenantId,
    5114            0 :         timeline_id: TimelineId,
    5115            0 :     ) -> Result<StatusCode, ApiError> {
    5116            0 :         tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
    5117            0 :         let _tenant_lock = trace_shared_lock(
    5118            0 :             &self.tenant_op_locks,
    5119            0 :             tenant_id,
    5120            0 :             TenantOperations::TimelineDelete,
    5121            0 :         )
    5122            0 :         .await;
    5123              : 
    5124            0 :         let status_code = self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    5125            0 :             if targets.0.is_empty() {
    5126            0 :                 return Err(ApiError::NotFound(
    5127            0 :                     anyhow::anyhow!("Tenant not found").into(),
    5128            0 :                 ));
    5129            0 :             }
    5130              : 
    5131            0 :             let (shard_zero_tid, shard_zero_locations) = targets.0.pop_first().expect("Must have at least one shard");
    5132            0 :             assert!(shard_zero_tid.is_shard_zero());
    5133              : 
    5134            0 :             async fn delete_one(
    5135            0 :                 tenant_shard_id: TenantShardId,
    5136            0 :                 timeline_id: TimelineId,
    5137            0 :                 node: Node,
    5138            0 :                 http_client: reqwest::Client,
    5139            0 :                 jwt: Option<String>,
    5140            0 :             ) -> Result<StatusCode, ApiError> {
    5141            0 :                 tracing::info!(
    5142            0 :                     "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    5143              :                 );
    5144              : 
    5145            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    5146            0 :                 let res = client
    5147            0 :                     .timeline_delete(tenant_shard_id, timeline_id)
    5148            0 :                     .await;
    5149              : 
    5150            0 :                 match res {
    5151            0 :                     Ok(ok) => Ok(ok),
    5152            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::CONFLICT, _)) => Ok(StatusCode::CONFLICT),
    5153            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg)) => Err(ApiError::ResourceUnavailable(msg.into())),
    5154            0 :                     Err(e) => {
    5155            0 :                         Err(
    5156            0 :                             ApiError::InternalServerError(anyhow::anyhow!(
    5157            0 :                                 "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
    5158            0 :                             ))
    5159            0 :                         )
    5160              :                     }
    5161              :                 }
    5162            0 :             }
    5163              : 
    5164            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    5165            0 :             let statuses = self
    5166            0 :                 .tenant_for_shards(locations, |tenant_shard_id: TenantShardId, node: Node| {
    5167            0 :                     Box::pin(delete_one(
    5168            0 :                         tenant_shard_id,
    5169            0 :                         timeline_id,
    5170            0 :                         node,
    5171            0 :                         self.http_client.clone(),
    5172            0 :                         self.config.pageserver_jwt_token.clone(),
    5173            0 :                     ))
    5174            0 :                 })
    5175            0 :                 .await?;
    5176              : 
    5177              :             // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero.
    5178              :             // We return 409 (Conflict) if deletion was already in progress on any of the shards
    5179              :             // and 202 (Accepted) if deletion was not already in progress on any of the shards.
    5180            0 :             if statuses.iter().any(|s| s == &StatusCode::CONFLICT) {
    5181            0 :                 return Ok(StatusCode::CONFLICT);
    5182            0 :             }
    5183              : 
    5184            0 :             if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
    5185            0 :                 return Ok(StatusCode::ACCEPTED);
    5186            0 :             }
    5187              : 
    5188              :             // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
    5189              :             // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
    5190            0 :             let shard_zero_status = delete_one(
    5191            0 :                 shard_zero_tid,
    5192            0 :                 timeline_id,
    5193            0 :                 shard_zero_locations.latest.node,
    5194            0 :                 self.http_client.clone(),
    5195            0 :                 self.config.pageserver_jwt_token.clone(),
    5196            0 :             )
    5197            0 :             .await?;
    5198            0 :             Ok(shard_zero_status)
    5199            0 :         }).await?;
    5200              : 
    5201            0 :         self.tenant_timeline_delete_safekeepers(tenant_id, timeline_id)
    5202            0 :             .await?;
    5203              : 
    5204            0 :         status_code
    5205            0 :     }
    5206              :     /// When you know the TenantId but not a specific shard, and would like to get the node holding shard 0.
    5207            0 :     pub(crate) async fn tenant_shard0_node(
    5208            0 :         &self,
    5209            0 :         tenant_id: TenantId,
    5210            0 :     ) -> Result<(Node, TenantShardId), ApiError> {
    5211            0 :         let tenant_shard_id = {
    5212            0 :             let locked = self.inner.read().unwrap();
    5213            0 :             let Some((tenant_shard_id, _shard)) = locked
    5214            0 :                 .tenants
    5215            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    5216            0 :                 .next()
    5217              :             else {
    5218            0 :                 return Err(ApiError::NotFound(
    5219            0 :                     anyhow::anyhow!("Tenant {tenant_id} not found").into(),
    5220            0 :                 ));
    5221              :             };
    5222              : 
    5223            0 :             *tenant_shard_id
    5224              :         };
    5225              : 
    5226            0 :         self.tenant_shard_node(tenant_shard_id)
    5227            0 :             .await
    5228            0 :             .map(|node| (node, tenant_shard_id))
    5229            0 :     }
    5230              : 
    5231              :     /// When you need to send an HTTP request to the pageserver that holds a shard of a tenant, this
    5232              :     /// function looks up and returns node. If the shard isn't found, returns Err(ApiError::NotFound)
    5233            0 :     pub(crate) async fn tenant_shard_node(
    5234            0 :         &self,
    5235            0 :         tenant_shard_id: TenantShardId,
    5236            0 :     ) -> Result<Node, ApiError> {
    5237              :         // Look up in-memory state and maybe use the node from there.
    5238              :         {
    5239            0 :             let locked = self.inner.read().unwrap();
    5240            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    5241            0 :                 return Err(ApiError::NotFound(
    5242            0 :                     anyhow::anyhow!("Tenant shard {tenant_shard_id} not found").into(),
    5243            0 :                 ));
    5244              :             };
    5245              : 
    5246            0 :             let Some(intent_node_id) = shard.intent.get_attached() else {
    5247            0 :                 tracing::warn!(
    5248            0 :                     tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
    5249            0 :                     "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
    5250              :                     shard.policy
    5251              :                 );
    5252            0 :                 return Err(ApiError::Conflict(
    5253            0 :                     "Cannot call timeline API on non-attached tenant".to_string(),
    5254            0 :                 ));
    5255              :             };
    5256              : 
    5257            0 :             if shard.reconciler.is_none() {
    5258              :                 // Optimization: while no reconcile is in flight, we may trust our in-memory state
    5259              :                 // to tell us which pageserver to use. Otherwise we will fall through and hit the database
    5260            0 :                 let Some(node) = locked.nodes.get(intent_node_id) else {
    5261              :                     // This should never happen
    5262            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5263            0 :                         "Shard refers to nonexistent node"
    5264            0 :                     )));
    5265              :                 };
    5266            0 :                 return Ok(node.clone());
    5267            0 :             }
    5268              :         };
    5269              : 
    5270              :         // Look up the latest attached pageserver location from the database
    5271              :         // generation state: this will reflect the progress of any ongoing migration.
    5272              :         // Note that it is not guaranteed to _stay_ here, our caller must still handle
    5273              :         // the case where they call through to the pageserver and get a 404.
    5274            0 :         let db_result = self
    5275            0 :             .persistence
    5276            0 :             .tenant_generations(tenant_shard_id.tenant_id)
    5277            0 :             .await?;
    5278              :         let Some(ShardGenerationState {
    5279              :             tenant_shard_id: _,
    5280              :             generation: _,
    5281            0 :             generation_pageserver: Some(node_id),
    5282            0 :         }) = db_result
    5283            0 :             .into_iter()
    5284            0 :             .find(|s| s.tenant_shard_id == tenant_shard_id)
    5285              :         else {
    5286              :             // This can happen if we raced with a tenant deletion or a shard split.  On a retry
    5287              :             // the caller will either succeed (shard split case), get a proper 404 (deletion case),
    5288              :             // or a conflict response (case where tenant was detached in background)
    5289            0 :             return Err(ApiError::ResourceUnavailable(
    5290            0 :                 format!("Shard {tenant_shard_id} not found in database, or is not attached").into(),
    5291            0 :             ));
    5292              :         };
    5293            0 :         let locked = self.inner.read().unwrap();
    5294            0 :         let Some(node) = locked.nodes.get(&node_id) else {
    5295              :             // This should never happen
    5296            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5297            0 :                 "Shard refers to nonexistent node"
    5298            0 :             )));
    5299              :         };
    5300              : 
    5301            0 :         Ok(node.clone())
    5302            0 :     }
    5303              : 
    5304            0 :     pub(crate) fn tenant_locate(
    5305            0 :         &self,
    5306            0 :         tenant_id: TenantId,
    5307            0 :     ) -> Result<TenantLocateResponse, ApiError> {
    5308            0 :         let locked = self.inner.read().unwrap();
    5309            0 :         tracing::info!("Locating shards for tenant {tenant_id}");
    5310              : 
    5311            0 :         let mut result = Vec::new();
    5312            0 :         let mut shard_params: Option<ShardParameters> = None;
    5313              : 
    5314            0 :         for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5315              :         {
    5316            0 :             let node_id =
    5317            0 :                 shard
    5318            0 :                     .intent
    5319            0 :                     .get_attached()
    5320            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    5321            0 :                         "Cannot locate a tenant that is not attached"
    5322            0 :                     )))?;
    5323              : 
    5324            0 :             let node = locked
    5325            0 :                 .nodes
    5326            0 :                 .get(&node_id)
    5327            0 :                 .expect("Pageservers may not be deleted while referenced");
    5328              : 
    5329            0 :             result.push(node.shard_location(*tenant_shard_id));
    5330              : 
    5331            0 :             match &shard_params {
    5332            0 :                 None => {
    5333            0 :                     shard_params = Some(ShardParameters {
    5334            0 :                         stripe_size: shard.shard.stripe_size,
    5335            0 :                         count: shard.shard.count,
    5336            0 :                     });
    5337            0 :                 }
    5338            0 :                 Some(params) => {
    5339            0 :                     if params.stripe_size != shard.shard.stripe_size {
    5340              :                         // This should never happen.  We enforce at runtime because it's simpler than
    5341              :                         // adding an extra per-tenant data structure to store the things that should be the same
    5342            0 :                         return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5343            0 :                             "Inconsistent shard stripe size parameters!"
    5344            0 :                         )));
    5345            0 :                     }
    5346              :                 }
    5347              :             }
    5348              :         }
    5349              : 
    5350            0 :         if result.is_empty() {
    5351            0 :             return Err(ApiError::NotFound(
    5352            0 :                 anyhow::anyhow!("No shards for this tenant ID found").into(),
    5353            0 :             ));
    5354            0 :         }
    5355            0 :         let shard_params = shard_params.expect("result is non-empty, therefore this is set");
    5356            0 :         tracing::info!(
    5357            0 :             "Located tenant {} with params {:?} on shards {}",
    5358              :             tenant_id,
    5359              :             shard_params,
    5360            0 :             result
    5361            0 :                 .iter()
    5362            0 :                 .map(|s| format!("{s:?}"))
    5363            0 :                 .collect::<Vec<_>>()
    5364            0 :                 .join(",")
    5365              :         );
    5366              : 
    5367            0 :         Ok(TenantLocateResponse {
    5368            0 :             shards: result,
    5369            0 :             shard_params,
    5370            0 :         })
    5371            0 :     }
    5372              : 
    5373              :     /// Returns None if the input iterator of shards does not include a shard with number=0
    5374            0 :     fn tenant_describe_impl<'a>(
    5375            0 :         &self,
    5376            0 :         shards: impl Iterator<Item = &'a TenantShard>,
    5377            0 :     ) -> Option<TenantDescribeResponse> {
    5378            0 :         let mut shard_zero = None;
    5379            0 :         let mut describe_shards = Vec::new();
    5380              : 
    5381            0 :         for shard in shards {
    5382            0 :             if shard.tenant_shard_id.is_shard_zero() {
    5383            0 :                 shard_zero = Some(shard);
    5384            0 :             }
    5385              : 
    5386            0 :             describe_shards.push(TenantDescribeResponseShard {
    5387            0 :                 tenant_shard_id: shard.tenant_shard_id,
    5388            0 :                 node_attached: *shard.intent.get_attached(),
    5389            0 :                 node_secondary: shard.intent.get_secondary().to_vec(),
    5390            0 :                 last_error: shard
    5391            0 :                     .last_error
    5392            0 :                     .lock()
    5393            0 :                     .unwrap()
    5394            0 :                     .as_ref()
    5395            0 :                     .map(|e| format!("{e}"))
    5396            0 :                     .unwrap_or("".to_string())
    5397            0 :                     .clone(),
    5398            0 :                 is_reconciling: shard.reconciler.is_some(),
    5399            0 :                 is_pending_compute_notification: shard.pending_compute_notification,
    5400            0 :                 is_splitting: matches!(shard.splitting, SplitState::Splitting),
    5401            0 :                 is_importing: shard.importing == TimelineImportState::Importing,
    5402            0 :                 scheduling_policy: shard.get_scheduling_policy(),
    5403            0 :                 preferred_az_id: shard.preferred_az().map(ToString::to_string),
    5404              :             })
    5405              :         }
    5406              : 
    5407            0 :         let shard_zero = shard_zero?;
    5408              : 
    5409            0 :         Some(TenantDescribeResponse {
    5410            0 :             tenant_id: shard_zero.tenant_shard_id.tenant_id,
    5411            0 :             shards: describe_shards,
    5412            0 :             stripe_size: shard_zero.shard.stripe_size,
    5413            0 :             policy: shard_zero.policy.clone(),
    5414            0 :             config: shard_zero.config.clone(),
    5415            0 :         })
    5416            0 :     }
    5417              : 
    5418            0 :     pub(crate) fn tenant_describe(
    5419            0 :         &self,
    5420            0 :         tenant_id: TenantId,
    5421            0 :     ) -> Result<TenantDescribeResponse, ApiError> {
    5422            0 :         let locked = self.inner.read().unwrap();
    5423              : 
    5424            0 :         self.tenant_describe_impl(
    5425            0 :             locked
    5426            0 :                 .tenants
    5427            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    5428            0 :                 .map(|(_k, v)| v),
    5429              :         )
    5430            0 :         .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
    5431            0 :     }
    5432              : 
    5433              :     /// limit & offset are pagination parameters. Since we are walking an in-memory HashMap, `offset` does not
    5434              :     /// avoid traversing data, it just avoid returning it. This is suitable for our purposes, since our in memory
    5435              :     /// maps are small enough to traverse fast, our pagination is just to avoid serializing huge JSON responses
    5436              :     /// in our external API.
    5437            0 :     pub(crate) fn tenant_list(
    5438            0 :         &self,
    5439            0 :         limit: Option<usize>,
    5440            0 :         start_after: Option<TenantId>,
    5441            0 :     ) -> Vec<TenantDescribeResponse> {
    5442            0 :         let locked = self.inner.read().unwrap();
    5443              : 
    5444              :         // Apply start_from parameter
    5445            0 :         let shard_range = match start_after {
    5446            0 :             None => locked.tenants.range(..),
    5447            0 :             Some(tenant_id) => locked.tenants.range(
    5448            0 :                 TenantShardId {
    5449            0 :                     tenant_id,
    5450            0 :                     shard_number: ShardNumber(u8::MAX),
    5451            0 :                     shard_count: ShardCount(u8::MAX),
    5452            0 :                 }..,
    5453              :             ),
    5454              :         };
    5455              : 
    5456            0 :         let mut result = Vec::new();
    5457            0 :         for (_tenant_id, tenant_shards) in &shard_range.group_by(|(id, _shard)| id.tenant_id) {
    5458            0 :             result.push(
    5459            0 :                 self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
    5460            0 :                     .expect("Groups are always non-empty"),
    5461              :             );
    5462              : 
    5463              :             // Enforce `limit` parameter
    5464            0 :             if let Some(limit) = limit {
    5465            0 :                 if result.len() >= limit {
    5466            0 :                     break;
    5467            0 :                 }
    5468            0 :             }
    5469              :         }
    5470              : 
    5471            0 :         result
    5472            0 :     }
    5473              : 
    5474              :     #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
    5475              :     async fn abort_tenant_shard_split(
    5476              :         &self,
    5477              :         op: &TenantShardSplitAbort,
    5478              :     ) -> Result<(), TenantShardSplitAbortError> {
    5479              :         // Cleaning up a split:
    5480              :         // - Parent shards are not destroyed during a split, just detached.
    5481              :         // - Failed pageserver split API calls can leave the remote node with just the parent attached,
    5482              :         //   just the children attached, or both.
    5483              :         //
    5484              :         // Therefore our work to do is to:
    5485              :         // 1. Clean up storage controller's internal state to just refer to parents, no children
    5486              :         // 2. Call out to pageservers to ensure that children are detached
    5487              :         // 3. Call out to pageservers to ensure that parents are attached.
    5488              :         //
    5489              :         // Crash safety:
    5490              :         // - If the storage controller stops running during this cleanup *after* clearing the splitting state
    5491              :         //   from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
    5492              :         //   and detach them.
    5493              :         // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
    5494              :         //   from our database, then we will re-enter this cleanup routine on startup.
    5495              : 
    5496              :         let TenantShardSplitAbort {
    5497              :             tenant_id,
    5498              :             new_shard_count,
    5499              :             new_stripe_size,
    5500              :             ..
    5501              :         } = op;
    5502              : 
    5503              :         // First abort persistent state, if any exists.
    5504              :         match self
    5505              :             .persistence
    5506              :             .abort_shard_split(*tenant_id, *new_shard_count)
    5507              :             .await?
    5508              :         {
    5509              :             AbortShardSplitStatus::Aborted => {
    5510              :                 // Proceed to roll back any child shards created on pageservers
    5511              :             }
    5512              :             AbortShardSplitStatus::Complete => {
    5513              :                 // The split completed (we might hit that path if e.g. our database transaction
    5514              :                 // to write the completion landed in the database, but we dropped connection
    5515              :                 // before seeing the result).
    5516              :                 //
    5517              :                 // We must update in-memory state to reflect the successful split.
    5518              :                 self.tenant_shard_split_commit_inmem(
    5519              :                     *tenant_id,
    5520              :                     *new_shard_count,
    5521              :                     *new_stripe_size,
    5522              :                 );
    5523              :                 return Ok(());
    5524              :             }
    5525              :         }
    5526              : 
    5527              :         // Clean up in-memory state, and accumulate the list of child locations that need detaching
    5528              :         let detach_locations: Vec<(Node, TenantShardId)> = {
    5529              :             let mut detach_locations = Vec::new();
    5530              :             let mut locked = self.inner.write().unwrap();
    5531              :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5532              : 
    5533              :             for (tenant_shard_id, shard) in
    5534              :                 tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
    5535              :             {
    5536              :                 if shard.shard.count == op.new_shard_count {
    5537              :                     // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
    5538              :                     // is infallible, so if we got an error we shouldn't have got that far.
    5539              :                     tracing::warn!(
    5540              :                         "During split abort, child shard {tenant_shard_id} found in-memory"
    5541              :                     );
    5542              :                     continue;
    5543              :                 }
    5544              : 
    5545              :                 // Add the children of this shard to this list of things to detach
    5546              :                 if let Some(node_id) = shard.intent.get_attached() {
    5547              :                     for child_id in tenant_shard_id.split(*new_shard_count) {
    5548              :                         detach_locations.push((
    5549              :                             nodes
    5550              :                                 .get(node_id)
    5551              :                                 .expect("Intent references nonexistent node")
    5552              :                                 .clone(),
    5553              :                             child_id,
    5554              :                         ));
    5555              :                     }
    5556              :                 } else {
    5557              :                     tracing::warn!(
    5558              :                         "During split abort, shard {tenant_shard_id} has no attached location"
    5559              :                     );
    5560              :                 }
    5561              : 
    5562              :                 tracing::info!("Restoring parent shard {tenant_shard_id}");
    5563              : 
    5564              :                 // Drop any intents that refer to unavailable nodes, to enable this abort to proceed even
    5565              :                 // if the original attachment location is offline.
    5566              :                 if let Some(node_id) = shard.intent.get_attached() {
    5567              :                     if !nodes.get(node_id).unwrap().is_available() {
    5568              :                         tracing::info!(
    5569              :                             "Demoting attached intent for {tenant_shard_id} on unavailable node {node_id}"
    5570              :                         );
    5571              :                         shard.intent.demote_attached(scheduler, *node_id);
    5572              :                     }
    5573              :                 }
    5574              :                 for node_id in shard.intent.get_secondary().clone() {
    5575              :                     if !nodes.get(&node_id).unwrap().is_available() {
    5576              :                         tracing::info!(
    5577              :                             "Dropping secondary intent for {tenant_shard_id} on unavailable node {node_id}"
    5578              :                         );
    5579              :                         shard.intent.remove_secondary(scheduler, node_id);
    5580              :                     }
    5581              :                 }
    5582              : 
    5583              :                 shard.splitting = SplitState::Idle;
    5584              :                 if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
    5585              :                     // If this shard can't be scheduled now (perhaps due to offline nodes or
    5586              :                     // capacity issues), that must not prevent us rolling back a split.  In this
    5587              :                     // case it should be eventually scheduled in the background.
    5588              :                     tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
    5589              :                 }
    5590              : 
    5591              :                 self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    5592              :             }
    5593              : 
    5594              :             // We don't expect any new_shard_count shards to exist here, but drop them just in case
    5595              :             tenants
    5596            0 :                 .retain(|id, s| !(id.tenant_id == *tenant_id && s.shard.count == *new_shard_count));
    5597              : 
    5598              :             detach_locations
    5599              :         };
    5600              : 
    5601              :         for (node, child_id) in detach_locations {
    5602              :             if !node.is_available() {
    5603              :                 // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
    5604              :                 // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
    5605              :                 // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
    5606              :                 // them from the node.
    5607              :                 tracing::warn!(
    5608              :                     "Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated."
    5609              :                 );
    5610              :                 continue;
    5611              :             }
    5612              : 
    5613              :             // Detach the remote child.  If the pageserver split API call is still in progress, this call will get
    5614              :             // a 503 and retry, up to our limit.
    5615              :             tracing::info!("Detaching {child_id} on {node}...");
    5616              :             match node
    5617              :                 .with_client_retries(
    5618            0 :                     |client| async move {
    5619            0 :                         let config = LocationConfig {
    5620            0 :                             mode: LocationConfigMode::Detached,
    5621            0 :                             generation: None,
    5622            0 :                             secondary_conf: None,
    5623            0 :                             shard_number: child_id.shard_number.0,
    5624            0 :                             shard_count: child_id.shard_count.literal(),
    5625            0 :                             // Stripe size and tenant config don't matter when detaching
    5626            0 :                             shard_stripe_size: 0,
    5627            0 :                             tenant_conf: TenantConfig::default(),
    5628            0 :                         };
    5629              : 
    5630            0 :                         client.location_config(child_id, config, None, false).await
    5631            0 :                     },
    5632              :                     &self.http_client,
    5633              :                     &self.config.pageserver_jwt_token,
    5634              :                     1,
    5635              :                     10,
    5636              :                     Duration::from_secs(5),
    5637              :                     &self.reconcilers_cancel,
    5638              :                 )
    5639              :                 .await
    5640              :             {
    5641              :                 Some(Ok(_)) => {}
    5642              :                 Some(Err(e)) => {
    5643              :                     // We failed to communicate with the remote node.  This is problematic: we may be
    5644              :                     // leaving it with a rogue child shard.
    5645              :                     tracing::warn!(
    5646              :                         "Failed to detach child {child_id} from node {node} during abort"
    5647              :                     );
    5648              :                     return Err(e.into());
    5649              :                 }
    5650              :                 None => {
    5651              :                     // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
    5652              :                     // clean up on restart. The node going offline requires a retry.
    5653              :                     return Err(TenantShardSplitAbortError::Unavailable);
    5654              :                 }
    5655              :             };
    5656              :         }
    5657              : 
    5658              :         tracing::info!("Successfully aborted split");
    5659              :         Ok(())
    5660              :     }
    5661              : 
    5662              :     /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
    5663              :     /// of the tenant map to reflect the child shards that exist after the split.
    5664            0 :     fn tenant_shard_split_commit_inmem(
    5665            0 :         &self,
    5666            0 :         tenant_id: TenantId,
    5667            0 :         new_shard_count: ShardCount,
    5668            0 :         new_stripe_size: Option<ShardStripeSize>,
    5669            0 :     ) -> (
    5670            0 :         TenantShardSplitResponse,
    5671            0 :         Vec<(TenantShardId, NodeId, ShardStripeSize)>,
    5672            0 :         Vec<ReconcilerWaiter>,
    5673            0 :     ) {
    5674            0 :         let mut response = TenantShardSplitResponse {
    5675            0 :             new_shards: Vec::new(),
    5676            0 :         };
    5677            0 :         let mut child_locations = Vec::new();
    5678            0 :         let mut waiters = Vec::new();
    5679              : 
    5680              :         {
    5681            0 :             let mut locked = self.inner.write().unwrap();
    5682              : 
    5683            0 :             let parent_ids = locked
    5684            0 :                 .tenants
    5685            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    5686            0 :                 .map(|(shard_id, _)| *shard_id)
    5687            0 :                 .collect::<Vec<_>>();
    5688              : 
    5689            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5690            0 :             for parent_id in parent_ids {
    5691            0 :                 let child_ids = parent_id.split(new_shard_count);
    5692              : 
    5693              :                 let (
    5694            0 :                     pageserver,
    5695            0 :                     generation,
    5696            0 :                     policy,
    5697            0 :                     parent_ident,
    5698            0 :                     config,
    5699            0 :                     preferred_az,
    5700            0 :                     secondary_count,
    5701              :                 ) = {
    5702            0 :                     let mut old_state = tenants
    5703            0 :                         .remove(&parent_id)
    5704            0 :                         .expect("It was present, we just split it");
    5705              : 
    5706              :                     // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
    5707              :                     // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
    5708              :                     // nothing else can clear this.
    5709            0 :                     assert!(matches!(old_state.splitting, SplitState::Splitting));
    5710              : 
    5711            0 :                     let old_attached = old_state.intent.get_attached().unwrap();
    5712            0 :                     old_state.intent.clear(scheduler);
    5713            0 :                     let generation = old_state.generation.expect("Shard must have been attached");
    5714            0 :                     (
    5715            0 :                         old_attached,
    5716            0 :                         generation,
    5717            0 :                         old_state.policy.clone(),
    5718            0 :                         old_state.shard,
    5719            0 :                         old_state.config.clone(),
    5720            0 :                         old_state.preferred_az().cloned(),
    5721            0 :                         old_state.intent.get_secondary().len(),
    5722            0 :                     )
    5723              :                 };
    5724              : 
    5725            0 :                 let mut schedule_context = ScheduleContext::default();
    5726            0 :                 for child in child_ids {
    5727            0 :                     let mut child_shard = parent_ident;
    5728            0 :                     child_shard.number = child.shard_number;
    5729            0 :                     child_shard.count = child.shard_count;
    5730            0 :                     if let Some(stripe_size) = new_stripe_size {
    5731            0 :                         child_shard.stripe_size = stripe_size;
    5732            0 :                     }
    5733              : 
    5734            0 :                     let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
    5735            0 :                     child_observed.insert(
    5736            0 :                         pageserver,
    5737            0 :                         ObservedStateLocation {
    5738            0 :                             conf: Some(attached_location_conf(
    5739            0 :                                 generation,
    5740            0 :                                 &child_shard,
    5741            0 :                                 &config,
    5742            0 :                                 &policy,
    5743            0 :                                 secondary_count,
    5744            0 :                             )),
    5745            0 :                         },
    5746              :                     );
    5747              : 
    5748            0 :                     let mut child_state =
    5749            0 :                         TenantShard::new(child, child_shard, policy.clone(), preferred_az.clone());
    5750            0 :                     child_state.intent =
    5751            0 :                         IntentState::single(scheduler, Some(pageserver), preferred_az.clone());
    5752            0 :                     child_state.observed = ObservedState {
    5753            0 :                         locations: child_observed,
    5754            0 :                     };
    5755            0 :                     child_state.generation = Some(generation);
    5756            0 :                     child_state.config = config.clone();
    5757              : 
    5758              :                     // The child's TenantShard::splitting is intentionally left at the default value of Idle,
    5759              :                     // as at this point in the split process we have succeeded and this part is infallible:
    5760              :                     // we will never need to do any special recovery from this state.
    5761              : 
    5762            0 :                     child_locations.push((child, pageserver, child_shard.stripe_size));
    5763              : 
    5764            0 :                     if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
    5765              :                         // This is not fatal, because we've implicitly already got an attached
    5766              :                         // location for the child shard.  Failure here just means we couldn't
    5767              :                         // find a secondary (e.g. because cluster is overloaded).
    5768            0 :                         tracing::warn!("Failed to schedule child shard {child}: {e}");
    5769            0 :                     }
    5770              :                     // In the background, attach secondary locations for the new shards
    5771            0 :                     if let Some(waiter) = self.maybe_reconcile_shard(
    5772            0 :                         &mut child_state,
    5773            0 :                         nodes,
    5774            0 :                         ReconcilerPriority::High,
    5775            0 :                     ) {
    5776            0 :                         waiters.push(waiter);
    5777            0 :                     }
    5778              : 
    5779            0 :                     tenants.insert(child, child_state);
    5780            0 :                     response.new_shards.push(child);
    5781              :                 }
    5782              :             }
    5783            0 :             (response, child_locations, waiters)
    5784              :         }
    5785            0 :     }
    5786              : 
    5787            0 :     async fn tenant_shard_split_start_secondaries(
    5788            0 :         &self,
    5789            0 :         tenant_id: TenantId,
    5790            0 :         waiters: Vec<ReconcilerWaiter>,
    5791            0 :     ) {
    5792              :         // Wait for initial reconcile of child shards, this creates the secondary locations
    5793            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    5794              :             // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
    5795              :             // their secondaries couldn't be attached.
    5796            0 :             tracing::warn!("Failed to reconcile after split: {e}");
    5797            0 :             return;
    5798            0 :         }
    5799              : 
    5800              :         // Take the state lock to discover the attached & secondary intents for all shards
    5801            0 :         let (attached, secondary) = {
    5802            0 :             let locked = self.inner.read().unwrap();
    5803            0 :             let mut attached = Vec::new();
    5804            0 :             let mut secondary = Vec::new();
    5805              : 
    5806            0 :             for (tenant_shard_id, shard) in
    5807            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5808              :             {
    5809            0 :                 let Some(node_id) = shard.intent.get_attached() else {
    5810              :                     // Unexpected.  Race with a PlacementPolicy change?
    5811            0 :                     tracing::warn!(
    5812            0 :                         "No attached node on {tenant_shard_id} immediately after shard split!"
    5813              :                     );
    5814            0 :                     continue;
    5815              :                 };
    5816              : 
    5817            0 :                 let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
    5818              :                     // No secondary location.  Nothing for us to do.
    5819            0 :                     continue;
    5820              :                 };
    5821              : 
    5822            0 :                 let attached_node = locked
    5823            0 :                     .nodes
    5824            0 :                     .get(node_id)
    5825            0 :                     .expect("Pageservers may not be deleted while referenced");
    5826              : 
    5827            0 :                 let secondary_node = locked
    5828            0 :                     .nodes
    5829            0 :                     .get(secondary_node_id)
    5830            0 :                     .expect("Pageservers may not be deleted while referenced");
    5831              : 
    5832            0 :                 attached.push((*tenant_shard_id, attached_node.clone()));
    5833            0 :                 secondary.push((*tenant_shard_id, secondary_node.clone()));
    5834              :             }
    5835            0 :             (attached, secondary)
    5836              :         };
    5837              : 
    5838            0 :         if secondary.is_empty() {
    5839              :             // No secondary locations; nothing for us to do
    5840            0 :             return;
    5841            0 :         }
    5842              : 
    5843            0 :         for result in self
    5844            0 :             .tenant_for_shards_api(
    5845            0 :                 attached,
    5846            0 :                 |tenant_shard_id, client| async move {
    5847            0 :                     client.tenant_heatmap_upload(tenant_shard_id).await
    5848            0 :                 },
    5849              :                 1,
    5850              :                 1,
    5851              :                 SHORT_RECONCILE_TIMEOUT,
    5852            0 :                 &self.cancel,
    5853              :             )
    5854            0 :             .await
    5855              :         {
    5856            0 :             if let Err(e) = result {
    5857            0 :                 tracing::warn!("Error calling heatmap upload after shard split: {e}");
    5858            0 :                 return;
    5859            0 :             }
    5860              :         }
    5861              : 
    5862            0 :         for result in self
    5863            0 :             .tenant_for_shards_api(
    5864            0 :                 secondary,
    5865            0 :                 |tenant_shard_id, client| async move {
    5866            0 :                     client
    5867            0 :                         .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
    5868            0 :                         .await
    5869            0 :                 },
    5870              :                 1,
    5871              :                 1,
    5872              :                 SHORT_RECONCILE_TIMEOUT,
    5873            0 :                 &self.cancel,
    5874              :             )
    5875            0 :             .await
    5876              :         {
    5877            0 :             if let Err(e) = result {
    5878            0 :                 tracing::warn!("Error calling secondary download after shard split: {e}");
    5879            0 :                 return;
    5880            0 :             }
    5881              :         }
    5882            0 :     }
    5883              : 
    5884            0 :     pub(crate) async fn tenant_shard_split(
    5885            0 :         &self,
    5886            0 :         tenant_id: TenantId,
    5887            0 :         split_req: TenantShardSplitRequest,
    5888            0 :     ) -> Result<TenantShardSplitResponse, ApiError> {
    5889              :         // TODO: return 503 if we get stuck waiting for this lock
    5890              :         // (issue https://github.com/neondatabase/neon/issues/7108)
    5891            0 :         let _tenant_lock = trace_exclusive_lock(
    5892            0 :             &self.tenant_op_locks,
    5893            0 :             tenant_id,
    5894            0 :             TenantOperations::ShardSplit,
    5895            0 :         )
    5896            0 :         .await;
    5897              : 
    5898            0 :         let _gate = self
    5899            0 :             .reconcilers_gate
    5900            0 :             .enter()
    5901            0 :             .map_err(|_| ApiError::ShuttingDown)?;
    5902              : 
    5903              :         // Timeline imports on the pageserver side can't handle shard-splits.
    5904              :         // If the tenant is importing a timeline, dont't shard split it.
    5905            0 :         match self
    5906            0 :             .persistence
    5907            0 :             .is_tenant_importing_timeline(tenant_id)
    5908            0 :             .await
    5909              :         {
    5910            0 :             Ok(importing) => {
    5911            0 :                 if importing {
    5912            0 :                     return Err(ApiError::Conflict(
    5913            0 :                         "Cannot shard split during timeline import".to_string(),
    5914            0 :                     ));
    5915            0 :                 }
    5916              :             }
    5917            0 :             Err(err) => {
    5918            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5919            0 :                     "Failed to check for running imports: {err}"
    5920            0 :                 )));
    5921              :             }
    5922              :         }
    5923              : 
    5924            0 :         let new_shard_count = ShardCount::new(split_req.new_shard_count);
    5925            0 :         let new_stripe_size = split_req.new_stripe_size;
    5926              : 
    5927              :         // Validate the request and construct parameters.  This phase is fallible, but does not require
    5928              :         // rollback on errors, as it does no I/O and mutates no state.
    5929            0 :         let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
    5930            0 :             ShardSplitAction::NoOp(resp) => return Ok(resp),
    5931            0 :             ShardSplitAction::Split(params) => params,
    5932              :         };
    5933              : 
    5934              :         // Execute this split: this phase mutates state and does remote I/O on pageservers.  If it fails,
    5935              :         // we must roll back.
    5936            0 :         let r = self
    5937            0 :             .do_tenant_shard_split(tenant_id, shard_split_params)
    5938            0 :             .await;
    5939              : 
    5940            0 :         let (response, waiters) = match r {
    5941            0 :             Ok(r) => r,
    5942            0 :             Err(e) => {
    5943              :                 // Split might be part-done, we must do work to abort it.
    5944            0 :                 tracing::warn!("Enqueuing background abort of split on {tenant_id}");
    5945            0 :                 self.abort_tx
    5946            0 :                     .send(TenantShardSplitAbort {
    5947            0 :                         tenant_id,
    5948            0 :                         new_shard_count,
    5949            0 :                         new_stripe_size,
    5950            0 :                         _tenant_lock,
    5951            0 :                         _gate,
    5952            0 :                     })
    5953              :                     // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
    5954            0 :                     .ok();
    5955            0 :                 return Err(e);
    5956              :             }
    5957              :         };
    5958              : 
    5959              :         // The split is now complete.  As an optimization, we will trigger all the child shards to upload
    5960              :         // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
    5961              :         // for the background heatmap/download interval before secondaries get warm enough to migrate shards
    5962              :         // in [`Self::optimize_all`]
    5963            0 :         self.tenant_shard_split_start_secondaries(tenant_id, waiters)
    5964            0 :             .await;
    5965            0 :         Ok(response)
    5966            0 :     }
    5967              : 
    5968            0 :     fn prepare_tenant_shard_split(
    5969            0 :         &self,
    5970            0 :         tenant_id: TenantId,
    5971            0 :         split_req: TenantShardSplitRequest,
    5972            0 :     ) -> Result<ShardSplitAction, ApiError> {
    5973            0 :         fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
    5974            0 :             anyhow::anyhow!("failpoint")
    5975            0 :         )));
    5976              : 
    5977            0 :         let mut policy = None;
    5978            0 :         let mut config = None;
    5979            0 :         let mut shard_ident = None;
    5980            0 :         let mut preferred_az_id = None;
    5981              :         // Validate input, and calculate which shards we will create
    5982            0 :         let (old_shard_count, targets) =
    5983              :             {
    5984            0 :                 let locked = self.inner.read().unwrap();
    5985              : 
    5986            0 :                 let pageservers = locked.nodes.clone();
    5987              : 
    5988            0 :                 let mut targets = Vec::new();
    5989              : 
    5990              :                 // In case this is a retry, count how many already-split shards we found
    5991            0 :                 let mut children_found = Vec::new();
    5992            0 :                 let mut old_shard_count = None;
    5993              : 
    5994            0 :                 for (tenant_shard_id, shard) in
    5995            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5996              :                 {
    5997            0 :                     match shard.shard.count.count().cmp(&split_req.new_shard_count) {
    5998              :                         Ordering::Equal => {
    5999              :                             //  Already split this
    6000            0 :                             children_found.push(*tenant_shard_id);
    6001            0 :                             continue;
    6002              :                         }
    6003              :                         Ordering::Greater => {
    6004            0 :                             return Err(ApiError::BadRequest(anyhow::anyhow!(
    6005            0 :                                 "Requested count {} but already have shards at count {}",
    6006            0 :                                 split_req.new_shard_count,
    6007            0 :                                 shard.shard.count.count()
    6008            0 :                             )));
    6009              :                         }
    6010            0 :                         Ordering::Less => {
    6011            0 :                             // Fall through: this shard has lower count than requested,
    6012            0 :                             // is a candidate for splitting.
    6013            0 :                         }
    6014              :                     }
    6015              : 
    6016            0 :                     match old_shard_count {
    6017            0 :                         None => old_shard_count = Some(shard.shard.count),
    6018            0 :                         Some(old_shard_count) => {
    6019            0 :                             if old_shard_count != shard.shard.count {
    6020              :                                 // We may hit this case if a caller asked for two splits to
    6021              :                                 // different sizes, before the first one is complete.
    6022              :                                 // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
    6023              :                                 // of shard_count=1 and shard_count=2 shards in the map.
    6024            0 :                                 return Err(ApiError::Conflict(
    6025            0 :                                     "Cannot split, currently mid-split".to_string(),
    6026            0 :                                 ));
    6027            0 :                             }
    6028              :                         }
    6029              :                     }
    6030            0 :                     if policy.is_none() {
    6031            0 :                         policy = Some(shard.policy.clone());
    6032            0 :                     }
    6033            0 :                     if shard_ident.is_none() {
    6034            0 :                         shard_ident = Some(shard.shard);
    6035            0 :                     }
    6036            0 :                     if config.is_none() {
    6037            0 :                         config = Some(shard.config.clone());
    6038            0 :                     }
    6039            0 :                     if preferred_az_id.is_none() {
    6040            0 :                         preferred_az_id = shard.preferred_az().cloned();
    6041            0 :                     }
    6042              : 
    6043            0 :                     if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
    6044            0 :                         tracing::info!(
    6045            0 :                             "Tenant shard {} already has shard count {}",
    6046              :                             tenant_shard_id,
    6047              :                             split_req.new_shard_count
    6048              :                         );
    6049            0 :                         continue;
    6050            0 :                     }
    6051              : 
    6052            0 :                     let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
    6053            0 :                         anyhow::anyhow!("Cannot split a tenant that is not attached"),
    6054            0 :                     ))?;
    6055              : 
    6056            0 :                     let node = pageservers
    6057            0 :                         .get(&node_id)
    6058            0 :                         .expect("Pageservers may not be deleted while referenced");
    6059              : 
    6060            0 :                     targets.push(ShardSplitTarget {
    6061            0 :                         parent_id: *tenant_shard_id,
    6062            0 :                         node: node.clone(),
    6063            0 :                         child_ids: tenant_shard_id
    6064            0 :                             .split(ShardCount::new(split_req.new_shard_count)),
    6065            0 :                     });
    6066              :                 }
    6067              : 
    6068            0 :                 if targets.is_empty() {
    6069            0 :                     if children_found.len() == split_req.new_shard_count as usize {
    6070            0 :                         return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
    6071            0 :                             new_shards: children_found,
    6072            0 :                         }));
    6073              :                     } else {
    6074              :                         // No shards found to split, and no existing children found: the
    6075              :                         // tenant doesn't exist at all.
    6076            0 :                         return Err(ApiError::NotFound(
    6077            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    6078            0 :                         ));
    6079              :                     }
    6080            0 :                 }
    6081              : 
    6082            0 :                 (old_shard_count, targets)
    6083              :             };
    6084              : 
    6085              :         // unwrap safety: we would have returned above if we didn't find at least one shard to split
    6086            0 :         let old_shard_count = old_shard_count.unwrap();
    6087            0 :         let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
    6088              :             // This ShardIdentity will be used as the template for all children, so this implicitly
    6089              :             // applies the new stripe size to the children.
    6090            0 :             let mut shard_ident = shard_ident.unwrap();
    6091            0 :             if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
    6092            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6093            0 :                     "Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards",
    6094            0 :                     shard_ident.stripe_size
    6095            0 :                 )));
    6096            0 :             }
    6097              : 
    6098            0 :             shard_ident.stripe_size = new_stripe_size;
    6099            0 :             tracing::info!("applied  stripe size {}", shard_ident.stripe_size.0);
    6100            0 :             shard_ident
    6101              :         } else {
    6102            0 :             shard_ident.unwrap()
    6103              :         };
    6104            0 :         let policy = policy.unwrap();
    6105            0 :         let config = config.unwrap();
    6106              : 
    6107            0 :         Ok(ShardSplitAction::Split(Box::new(ShardSplitParams {
    6108            0 :             old_shard_count,
    6109            0 :             new_shard_count: ShardCount::new(split_req.new_shard_count),
    6110            0 :             new_stripe_size: split_req.new_stripe_size,
    6111            0 :             targets,
    6112            0 :             policy,
    6113            0 :             config,
    6114            0 :             shard_ident,
    6115            0 :             preferred_az_id,
    6116            0 :         })))
    6117            0 :     }
    6118              : 
    6119            0 :     async fn do_tenant_shard_split(
    6120            0 :         &self,
    6121            0 :         tenant_id: TenantId,
    6122            0 :         params: Box<ShardSplitParams>,
    6123            0 :     ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
    6124              :         // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
    6125              :         // request could occur here, deleting or mutating the tenant.  begin_shard_split checks that the
    6126              :         // parent shards exist as expected, but it would be neater to do the above pre-checks within the
    6127              :         // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
    6128              :         // (https://github.com/neondatabase/neon/issues/6676)
    6129              : 
    6130              :         let ShardSplitParams {
    6131            0 :             old_shard_count,
    6132            0 :             new_shard_count,
    6133            0 :             new_stripe_size,
    6134            0 :             mut targets,
    6135            0 :             policy,
    6136            0 :             config,
    6137            0 :             shard_ident,
    6138            0 :             preferred_az_id,
    6139            0 :         } = *params;
    6140              : 
    6141              :         // Drop any secondary locations: pageservers do not support splitting these, and in any case the
    6142              :         // end-state for a split tenant will usually be to have secondary locations on different nodes.
    6143              :         // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
    6144              :         // at the time of split.
    6145            0 :         let waiters = {
    6146            0 :             let mut locked = self.inner.write().unwrap();
    6147            0 :             let mut waiters = Vec::new();
    6148            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6149            0 :             for target in &mut targets {
    6150            0 :                 let Some(shard) = tenants.get_mut(&target.parent_id) else {
    6151              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    6152            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6153            0 :                         "Shard {} not found",
    6154            0 :                         target.parent_id
    6155            0 :                     )));
    6156              :                 };
    6157              : 
    6158            0 :                 if shard.intent.get_attached() != &Some(target.node.get_id()) {
    6159              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    6160            0 :                     return Err(ApiError::Conflict(format!(
    6161            0 :                         "Shard {} unexpectedly rescheduled during split",
    6162            0 :                         target.parent_id
    6163            0 :                     )));
    6164            0 :                 }
    6165              : 
    6166              :                 // Irrespective of PlacementPolicy, clear secondary locations from intent
    6167            0 :                 shard.intent.clear_secondary(scheduler);
    6168              : 
    6169              :                 // Run Reconciler to execute detach fo secondary locations.
    6170            0 :                 if let Some(waiter) =
    6171            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    6172            0 :                 {
    6173            0 :                     waiters.push(waiter);
    6174            0 :                 }
    6175              :             }
    6176            0 :             waiters
    6177              :         };
    6178            0 :         self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
    6179              : 
    6180              :         // Before creating any new child shards in memory or on the pageservers, persist them: this
    6181              :         // enables us to ensure that we will always be able to clean up if something goes wrong.  This also
    6182              :         // acts as the protection against two concurrent attempts to split: one of them will get a database
    6183              :         // error trying to insert the child shards.
    6184            0 :         let mut child_tsps = Vec::new();
    6185            0 :         for target in &targets {
    6186            0 :             let mut this_child_tsps = Vec::new();
    6187            0 :             for child in &target.child_ids {
    6188            0 :                 let mut child_shard = shard_ident;
    6189            0 :                 child_shard.number = child.shard_number;
    6190            0 :                 child_shard.count = child.shard_count;
    6191              : 
    6192            0 :                 tracing::info!(
    6193            0 :                     "Create child shard persistence with stripe size {}",
    6194              :                     shard_ident.stripe_size.0
    6195              :                 );
    6196              : 
    6197            0 :                 this_child_tsps.push(TenantShardPersistence {
    6198            0 :                     tenant_id: child.tenant_id.to_string(),
    6199            0 :                     shard_number: child.shard_number.0 as i32,
    6200            0 :                     shard_count: child.shard_count.literal() as i32,
    6201            0 :                     shard_stripe_size: shard_ident.stripe_size.0 as i32,
    6202              :                     // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
    6203              :                     // populate the correct generation as part of its transaction, to protect us
    6204              :                     // against racing with changes in the state of the parent.
    6205            0 :                     generation: None,
    6206            0 :                     generation_pageserver: Some(target.node.get_id().0 as i64),
    6207            0 :                     placement_policy: serde_json::to_string(&policy).unwrap(),
    6208            0 :                     config: serde_json::to_string(&config).unwrap(),
    6209            0 :                     splitting: SplitState::Splitting,
    6210              : 
    6211              :                     // Scheduling policies and preferred AZ do not carry through to children
    6212            0 :                     scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    6213            0 :                         .unwrap(),
    6214            0 :                     preferred_az_id: preferred_az_id.as_ref().map(|az| az.0.clone()),
    6215              :                 });
    6216              :             }
    6217              : 
    6218            0 :             child_tsps.push((target.parent_id, this_child_tsps));
    6219              :         }
    6220              : 
    6221            0 :         if let Err(e) = self
    6222            0 :             .persistence
    6223            0 :             .begin_shard_split(old_shard_count, tenant_id, child_tsps)
    6224            0 :             .await
    6225              :         {
    6226            0 :             match e {
    6227              :                 DatabaseError::Query(diesel::result::Error::DatabaseError(
    6228              :                     DatabaseErrorKind::UniqueViolation,
    6229              :                     _,
    6230              :                 )) => {
    6231              :                     // Inserting a child shard violated a unique constraint: we raced with another call to
    6232              :                     // this function
    6233            0 :                     tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
    6234            0 :                     return Err(ApiError::Conflict("Tenant is already splitting".into()));
    6235              :                 }
    6236            0 :                 _ => return Err(ApiError::InternalServerError(e.into())),
    6237              :             }
    6238            0 :         }
    6239            0 :         fail::fail_point!("shard-split-post-begin", |_| Err(
    6240            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    6241              :         ));
    6242              : 
    6243              :         // Now that I have persisted the splitting state, apply it in-memory.  This is infallible, so
    6244              :         // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
    6245              :         // is not set in memory, then it was not persisted.
    6246              :         {
    6247            0 :             let mut locked = self.inner.write().unwrap();
    6248            0 :             for target in &targets {
    6249            0 :                 if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
    6250            0 :                     parent_shard.splitting = SplitState::Splitting;
    6251            0 :                     // Put the observed state to None, to reflect that it is indeterminate once we start the
    6252            0 :                     // split operation.
    6253            0 :                     parent_shard
    6254            0 :                         .observed
    6255            0 :                         .locations
    6256            0 :                         .insert(target.node.get_id(), ObservedStateLocation { conf: None });
    6257            0 :                 }
    6258              :             }
    6259              :         }
    6260              : 
    6261              :         // TODO: issue split calls concurrently (this only matters once we're splitting
    6262              :         // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
    6263              : 
    6264            0 :         for target in &targets {
    6265              :             let ShardSplitTarget {
    6266            0 :                 parent_id,
    6267            0 :                 node,
    6268            0 :                 child_ids,
    6269            0 :             } = target;
    6270            0 :             let client = PageserverClient::new(
    6271            0 :                 node.get_id(),
    6272            0 :                 self.http_client.clone(),
    6273            0 :                 node.base_url(),
    6274            0 :                 self.config.pageserver_jwt_token.as_deref(),
    6275              :             );
    6276            0 :             let response = client
    6277            0 :                 .tenant_shard_split(
    6278            0 :                     *parent_id,
    6279            0 :                     TenantShardSplitRequest {
    6280            0 :                         new_shard_count: new_shard_count.literal(),
    6281            0 :                         new_stripe_size,
    6282            0 :                     },
    6283            0 :                 )
    6284            0 :                 .await
    6285            0 :                 .map_err(|e| ApiError::Conflict(format!("Failed to split {parent_id}: {e}")))?;
    6286              : 
    6287            0 :             fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
    6288            0 :                 "failpoint".to_string()
    6289            0 :             )));
    6290              : 
    6291            0 :             failpoint_support::sleep_millis_async!(
    6292              :                 "shard-split-post-remote-sleep",
    6293            0 :                 &self.reconcilers_cancel
    6294              :             );
    6295              : 
    6296            0 :             tracing::info!(
    6297            0 :                 "Split {} into {}",
    6298              :                 parent_id,
    6299            0 :                 response
    6300            0 :                     .new_shards
    6301            0 :                     .iter()
    6302            0 :                     .map(|s| format!("{s:?}"))
    6303            0 :                     .collect::<Vec<_>>()
    6304            0 :                     .join(",")
    6305              :             );
    6306              : 
    6307            0 :             if &response.new_shards != child_ids {
    6308              :                 // This should never happen: the pageserver should agree with us on how shard splits work.
    6309            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6310            0 :                     "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
    6311            0 :                     parent_id,
    6312            0 :                     response.new_shards,
    6313            0 :                     child_ids
    6314            0 :                 )));
    6315            0 :             }
    6316              :         }
    6317              : 
    6318            0 :         fail::fail_point!("shard-split-pre-complete", |_| Err(ApiError::Conflict(
    6319            0 :             "failpoint".to_string()
    6320            0 :         )));
    6321              : 
    6322            0 :         pausable_failpoint!("shard-split-pre-complete-pause");
    6323              : 
    6324              :         // TODO: if the pageserver restarted concurrently with our split API call,
    6325              :         // the actual generation of the child shard might differ from the generation
    6326              :         // we expect it to have.  In order for our in-database generation to end up
    6327              :         // correct, we should carry the child generation back in the response and apply it here
    6328              :         // in complete_shard_split (and apply the correct generation in memory)
    6329              :         // (or, we can carry generation in the request and reject the request if
    6330              :         //  it doesn't match, but that requires more retry logic on this side)
    6331              : 
    6332            0 :         self.persistence
    6333            0 :             .complete_shard_split(tenant_id, old_shard_count, new_shard_count)
    6334            0 :             .await?;
    6335              : 
    6336            0 :         fail::fail_point!("shard-split-post-complete", |_| Err(
    6337            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    6338              :         ));
    6339              : 
    6340              :         // Replace all the shards we just split with their children: this phase is infallible.
    6341            0 :         let (response, child_locations, waiters) =
    6342            0 :             self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
    6343              : 
    6344              :         // Notify all page servers to detach and clean up the old shards because they will no longer
    6345              :         // be needed. This is best-effort: if it fails, it will be cleaned up on a subsequent
    6346              :         // Pageserver re-attach/startup.
    6347            0 :         let shards_to_cleanup = targets
    6348            0 :             .iter()
    6349            0 :             .map(|target| (target.parent_id, target.node.get_id()))
    6350            0 :             .collect();
    6351            0 :         self.cleanup_locations(shards_to_cleanup).await;
    6352              : 
    6353              :         // Send compute notifications for all the new shards
    6354            0 :         let mut failed_notifications = Vec::new();
    6355            0 :         for (child_id, child_ps, stripe_size) in child_locations {
    6356            0 :             if let Err(e) = self
    6357            0 :                 .compute_hook
    6358            0 :                 .notify_attach(
    6359            0 :                     compute_hook::ShardUpdate {
    6360            0 :                         tenant_shard_id: child_id,
    6361            0 :                         node_id: child_ps,
    6362            0 :                         stripe_size,
    6363            0 :                         preferred_az: preferred_az_id.as_ref().map(Cow::Borrowed),
    6364            0 :                     },
    6365            0 :                     &self.reconcilers_cancel,
    6366            0 :                 )
    6367            0 :                 .await
    6368              :             {
    6369            0 :                 tracing::warn!(
    6370            0 :                     "Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
    6371              :                     child_id,
    6372              :                     child_ps
    6373              :                 );
    6374            0 :                 failed_notifications.push(child_id);
    6375            0 :             }
    6376              :         }
    6377              : 
    6378              :         // If we failed any compute notifications, make a note to retry later.
    6379            0 :         if !failed_notifications.is_empty() {
    6380            0 :             let mut locked = self.inner.write().unwrap();
    6381            0 :             for failed in failed_notifications {
    6382            0 :                 if let Some(shard) = locked.tenants.get_mut(&failed) {
    6383            0 :                     shard.pending_compute_notification = true;
    6384            0 :                 }
    6385              :             }
    6386            0 :         }
    6387              : 
    6388            0 :         Ok((response, waiters))
    6389            0 :     }
    6390              : 
    6391              :     /// A graceful migration: update the preferred node and let optimisation handle the migration
    6392              :     /// in the background (may take a long time as it will fully warm up a location before cutting over)
    6393              :     ///
    6394              :     /// Our external API calls this a 'prewarm=true' migration, but internally it isn't a special prewarm step: it's
    6395              :     /// just a migration that uses the same graceful procedure as our background scheduling optimisations would use.
    6396            0 :     fn tenant_shard_migrate_with_prewarm(
    6397            0 :         &self,
    6398            0 :         migrate_req: &TenantShardMigrateRequest,
    6399            0 :         shard: &mut TenantShard,
    6400            0 :         scheduler: &mut Scheduler,
    6401            0 :         schedule_context: ScheduleContext,
    6402            0 :     ) -> Result<Option<ScheduleOptimization>, ApiError> {
    6403            0 :         shard.set_preferred_node(Some(migrate_req.node_id));
    6404              : 
    6405              :         // Generate whatever the initial change to the intent is: this could be creation of a secondary, or
    6406              :         // cutting over to an existing secondary.  Caller is responsible for validating this before applying it,
    6407              :         // e.g. by checking secondary is warm enough.
    6408            0 :         Ok(shard.optimize_attachment(scheduler, &schedule_context))
    6409            0 :     }
    6410              : 
    6411              :     /// Immediate migration: directly update the intent state and kick off a reconciler
    6412            0 :     fn tenant_shard_migrate_immediate(
    6413            0 :         &self,
    6414            0 :         migrate_req: &TenantShardMigrateRequest,
    6415            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    6416            0 :         shard: &mut TenantShard,
    6417            0 :         scheduler: &mut Scheduler,
    6418            0 :     ) -> Result<Option<ReconcilerWaiter>, ApiError> {
    6419              :         // Non-graceful migration: update the intent state immediately
    6420            0 :         let old_attached = *shard.intent.get_attached();
    6421            0 :         match shard.policy {
    6422            0 :             PlacementPolicy::Attached(n) => {
    6423              :                 // If our new attached node was a secondary, it no longer should be.
    6424            0 :                 shard
    6425            0 :                     .intent
    6426            0 :                     .remove_secondary(scheduler, migrate_req.node_id);
    6427              : 
    6428            0 :                 shard
    6429            0 :                     .intent
    6430            0 :                     .set_attached(scheduler, Some(migrate_req.node_id));
    6431              : 
    6432              :                 // If we were already attached to something, demote that to a secondary
    6433            0 :                 if let Some(old_attached) = old_attached {
    6434            0 :                     if n > 0 {
    6435              :                         // Remove other secondaries to make room for the location we'll demote
    6436            0 :                         while shard.intent.get_secondary().len() >= n {
    6437            0 :                             shard.intent.pop_secondary(scheduler);
    6438            0 :                         }
    6439              : 
    6440            0 :                         shard.intent.push_secondary(scheduler, old_attached);
    6441            0 :                     }
    6442            0 :                 }
    6443              :             }
    6444            0 :             PlacementPolicy::Secondary => {
    6445            0 :                 shard.intent.clear(scheduler);
    6446            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    6447            0 :             }
    6448              :             PlacementPolicy::Detached => {
    6449            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6450            0 :                     "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
    6451            0 :                 )));
    6452              :             }
    6453              :         }
    6454              : 
    6455            0 :         tracing::info!("Migrating: new intent {:?}", shard.intent);
    6456            0 :         shard.sequence = shard.sequence.next();
    6457            0 :         shard.set_preferred_node(None); // Abort any in-flight graceful migration
    6458            0 :         Ok(self.maybe_configured_reconcile_shard(
    6459            0 :             shard,
    6460            0 :             nodes,
    6461            0 :             (&migrate_req.migration_config).into(),
    6462            0 :         ))
    6463            0 :     }
    6464              : 
    6465            0 :     pub(crate) async fn tenant_shard_migrate(
    6466            0 :         &self,
    6467            0 :         tenant_shard_id: TenantShardId,
    6468            0 :         migrate_req: TenantShardMigrateRequest,
    6469            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    6470              :         // Depending on whether the migration is a change and whether it's graceful or immediate, we might
    6471              :         // get a different outcome to handle
    6472              :         enum MigrationOutcome {
    6473              :             Optimization(Option<ScheduleOptimization>),
    6474              :             Reconcile(Option<ReconcilerWaiter>),
    6475              :         }
    6476              : 
    6477            0 :         let outcome = {
    6478            0 :             let mut locked = self.inner.write().unwrap();
    6479            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6480              : 
    6481            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    6482            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6483            0 :                     "Node {} not found",
    6484            0 :                     migrate_req.node_id
    6485            0 :                 )));
    6486              :             };
    6487              : 
    6488              :             // Migration to unavavailable node requires force flag
    6489            0 :             if !node.is_available() {
    6490            0 :                 if migrate_req.migration_config.override_scheduler {
    6491              :                     // Warn but proceed: the caller may intend to manually adjust the placement of
    6492              :                     // a shard even if the node is down, e.g. if intervening during an incident.
    6493            0 :                     tracing::warn!("Forcibly migrating to unavailable node {node}");
    6494              :                 } else {
    6495            0 :                     tracing::warn!("Node {node} is unavailable, refusing migration");
    6496            0 :                     return Err(ApiError::PreconditionFailed(
    6497            0 :                         format!("Node {node} is unavailable").into_boxed_str(),
    6498            0 :                     ));
    6499              :                 }
    6500            0 :             }
    6501              : 
    6502              :             // Calculate the ScheduleContext for this tenant
    6503            0 :             let mut schedule_context = ScheduleContext::default();
    6504            0 :             for (_shard_id, shard) in
    6505            0 :                 tenants.range(TenantShardId::tenant_range(tenant_shard_id.tenant_id))
    6506            0 :             {
    6507            0 :                 schedule_context.avoid(&shard.intent.all_pageservers());
    6508            0 :             }
    6509              : 
    6510              :             // Look up the specific shard we will migrate
    6511            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    6512            0 :                 return Err(ApiError::NotFound(
    6513            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    6514            0 :                 ));
    6515              :             };
    6516              : 
    6517              :             // Migration to a node with unfavorable scheduling score requires a force flag, because it might just
    6518              :             // be migrated back by the optimiser.
    6519            0 :             if let Some(better_node) = shard.find_better_location::<AttachedShardTag>(
    6520            0 :                 scheduler,
    6521            0 :                 &schedule_context,
    6522            0 :                 migrate_req.node_id,
    6523            0 :                 &[],
    6524            0 :             ) {
    6525            0 :                 if !migrate_req.migration_config.override_scheduler {
    6526            0 :                     return Err(ApiError::PreconditionFailed(
    6527            0 :                         "Migration to a worse-scoring node".into(),
    6528            0 :                     ));
    6529              :                 } else {
    6530            0 :                     tracing::info!(
    6531            0 :                         "Migrating to a worse-scoring node {} (optimiser would prefer {better_node})",
    6532              :                         migrate_req.node_id
    6533              :                     );
    6534              :                 }
    6535            0 :             }
    6536              : 
    6537            0 :             if let Some(origin_node_id) = migrate_req.origin_node_id {
    6538            0 :                 if shard.intent.get_attached() != &Some(origin_node_id) {
    6539            0 :                     return Err(ApiError::PreconditionFailed(
    6540            0 :                         format!(
    6541            0 :                             "Migration expected to originate from {} but shard is on {:?}",
    6542            0 :                             origin_node_id,
    6543            0 :                             shard.intent.get_attached()
    6544            0 :                         )
    6545            0 :                         .into(),
    6546            0 :                     ));
    6547            0 :                 }
    6548            0 :             }
    6549              : 
    6550            0 :             if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    6551              :                 // No-op case: we will still proceed to wait for reconciliation in case it is
    6552              :                 // incomplete from an earlier update to the intent.
    6553            0 :                 tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
    6554              : 
    6555              :                 // An instruction to migrate to the currently attached node should
    6556              :                 // cancel any pending graceful migration
    6557            0 :                 shard.set_preferred_node(None);
    6558              : 
    6559            0 :                 MigrationOutcome::Reconcile(self.maybe_configured_reconcile_shard(
    6560            0 :                     shard,
    6561            0 :                     nodes,
    6562            0 :                     (&migrate_req.migration_config).into(),
    6563            0 :                 ))
    6564            0 :             } else if migrate_req.migration_config.prewarm {
    6565            0 :                 MigrationOutcome::Optimization(self.tenant_shard_migrate_with_prewarm(
    6566            0 :                     &migrate_req,
    6567            0 :                     shard,
    6568            0 :                     scheduler,
    6569            0 :                     schedule_context,
    6570            0 :                 )?)
    6571              :             } else {
    6572            0 :                 MigrationOutcome::Reconcile(self.tenant_shard_migrate_immediate(
    6573            0 :                     &migrate_req,
    6574            0 :                     nodes,
    6575            0 :                     shard,
    6576            0 :                     scheduler,
    6577            0 :                 )?)
    6578              :             }
    6579              :         };
    6580              : 
    6581              :         // We may need to validate + apply an optimisation, or we may need to just retrive a reconcile waiter
    6582            0 :         let waiter = match outcome {
    6583            0 :             MigrationOutcome::Optimization(Some(optimization)) => {
    6584              :                 // Validate and apply the optimization -- this would happen anyway in background reconcile loop, but
    6585              :                 // we might as well do it more promptly as this is a direct external request.
    6586            0 :                 let mut validated = self
    6587            0 :                     .optimize_all_validate(vec![(tenant_shard_id, optimization)])
    6588            0 :                     .await;
    6589            0 :                 if let Some((_shard_id, optimization)) = validated.pop() {
    6590            0 :                     let mut locked = self.inner.write().unwrap();
    6591            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    6592            0 :                     let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    6593              :                         // Rare but possible: tenant is removed between generating optimisation and validating it.
    6594            0 :                         return Err(ApiError::NotFound(
    6595            0 :                             anyhow::anyhow!("Tenant shard not found").into(),
    6596            0 :                         ));
    6597              :                     };
    6598              : 
    6599            0 :                     if !shard.apply_optimization(scheduler, optimization) {
    6600              :                         // This can happen but is unusual enough to warn on: something else changed in the shard that made the optimisation stale
    6601              :                         // and therefore not applied.
    6602            0 :                         tracing::warn!(
    6603            0 :                             "Schedule optimisation generated during graceful migration was not applied, shard changed?"
    6604              :                         );
    6605            0 :                     }
    6606            0 :                     self.maybe_configured_reconcile_shard(
    6607            0 :                         shard,
    6608            0 :                         nodes,
    6609            0 :                         (&migrate_req.migration_config).into(),
    6610              :                     )
    6611              :                 } else {
    6612            0 :                     None
    6613              :                 }
    6614              :             }
    6615            0 :             MigrationOutcome::Optimization(None) => None,
    6616            0 :             MigrationOutcome::Reconcile(waiter) => waiter,
    6617              :         };
    6618              : 
    6619              :         // Finally, wait for any reconcile we started to complete.  In the case of immediate-mode migrations to cold
    6620              :         // locations, this has a good chance of timing out.
    6621            0 :         if let Some(waiter) = waiter {
    6622            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    6623              :         } else {
    6624            0 :             tracing::info!("Migration is a no-op");
    6625              :         }
    6626              : 
    6627            0 :         Ok(TenantShardMigrateResponse {})
    6628            0 :     }
    6629              : 
    6630            0 :     pub(crate) async fn tenant_shard_migrate_secondary(
    6631            0 :         &self,
    6632            0 :         tenant_shard_id: TenantShardId,
    6633            0 :         migrate_req: TenantShardMigrateRequest,
    6634            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    6635            0 :         let waiter = {
    6636            0 :             let mut locked = self.inner.write().unwrap();
    6637            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6638              : 
    6639            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    6640            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6641            0 :                     "Node {} not found",
    6642            0 :                     migrate_req.node_id
    6643            0 :                 )));
    6644              :             };
    6645              : 
    6646            0 :             if !node.is_available() {
    6647              :                 // Warn but proceed: the caller may intend to manually adjust the placement of
    6648              :                 // a shard even if the node is down, e.g. if intervening during an incident.
    6649            0 :                 tracing::warn!("Migrating to unavailable node {node}");
    6650            0 :             }
    6651              : 
    6652            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    6653            0 :                 return Err(ApiError::NotFound(
    6654            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    6655            0 :                 ));
    6656              :             };
    6657              : 
    6658            0 :             if shard.intent.get_secondary().len() == 1
    6659            0 :                 && shard.intent.get_secondary()[0] == migrate_req.node_id
    6660              :             {
    6661            0 :                 tracing::info!(
    6662            0 :                     "Migrating secondary to {node}: intent is unchanged {:?}",
    6663              :                     shard.intent
    6664              :                 );
    6665            0 :             } else if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    6666            0 :                 tracing::info!(
    6667            0 :                     "Migrating secondary to {node}: already attached where we were asked to create a secondary"
    6668              :                 );
    6669              :             } else {
    6670            0 :                 let old_secondaries = shard.intent.get_secondary().clone();
    6671            0 :                 for secondary in old_secondaries {
    6672            0 :                     shard.intent.remove_secondary(scheduler, secondary);
    6673            0 :                 }
    6674              : 
    6675            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    6676            0 :                 shard.sequence = shard.sequence.next();
    6677            0 :                 tracing::info!(
    6678            0 :                     "Migrating secondary to {node}: new intent {:?}",
    6679              :                     shard.intent
    6680              :                 );
    6681              :             }
    6682              : 
    6683            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    6684              :         };
    6685              : 
    6686            0 :         if let Some(waiter) = waiter {
    6687            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    6688              :         } else {
    6689            0 :             tracing::info!("Migration is a no-op");
    6690              :         }
    6691              : 
    6692            0 :         Ok(TenantShardMigrateResponse {})
    6693            0 :     }
    6694              : 
    6695              :     /// 'cancel' in this context means cancel any ongoing reconcile
    6696            0 :     pub(crate) async fn tenant_shard_cancel_reconcile(
    6697            0 :         &self,
    6698            0 :         tenant_shard_id: TenantShardId,
    6699            0 :     ) -> Result<(), ApiError> {
    6700              :         // Take state lock and fire the cancellation token, after which we drop lock and wait for any ongoing reconcile to complete
    6701            0 :         let waiter = {
    6702            0 :             let locked = self.inner.write().unwrap();
    6703            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    6704            0 :                 return Err(ApiError::NotFound(
    6705            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    6706            0 :                 ));
    6707              :             };
    6708              : 
    6709            0 :             let waiter = shard.get_waiter();
    6710            0 :             match waiter {
    6711              :                 None => {
    6712            0 :                     tracing::info!("Shard does not have an ongoing Reconciler");
    6713            0 :                     return Ok(());
    6714              :                 }
    6715            0 :                 Some(waiter) => {
    6716            0 :                     tracing::info!("Cancelling Reconciler");
    6717            0 :                     shard.cancel_reconciler();
    6718            0 :                     waiter
    6719              :                 }
    6720              :             }
    6721              :         };
    6722              : 
    6723              :         // Cancellation should be prompt.  If this fails we have still done our job of firing the
    6724              :         // cancellation token, but by returning an ApiError we will indicate to the caller that
    6725              :         // the Reconciler is misbehaving and not respecting the cancellation token
    6726            0 :         self.await_waiters(vec![waiter], SHORT_RECONCILE_TIMEOUT)
    6727            0 :             .await?;
    6728              : 
    6729            0 :         Ok(())
    6730            0 :     }
    6731              : 
    6732              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    6733              :     /// detaching or deleting it on pageservers.
    6734            0 :     pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
    6735            0 :         self.persistence.delete_tenant(tenant_id).await?;
    6736              : 
    6737            0 :         let mut locked = self.inner.write().unwrap();
    6738            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    6739            0 :         let mut shards = Vec::new();
    6740            0 :         for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
    6741            0 :             shards.push(*tenant_shard_id);
    6742            0 :         }
    6743              : 
    6744            0 :         for shard_id in shards {
    6745            0 :             if let Some(mut shard) = tenants.remove(&shard_id) {
    6746            0 :                 shard.intent.clear(scheduler);
    6747            0 :             }
    6748              :         }
    6749              : 
    6750            0 :         Ok(())
    6751            0 :     }
    6752              : 
    6753              :     /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
    6754              :     /// tenant with a very high generation number so that it will see the existing data.
    6755              :     /// It does not create timelines on safekeepers, because they might already exist on some
    6756              :     /// safekeeper set. So, the timelines are not storcon-managed after the import.
    6757            0 :     pub(crate) async fn tenant_import(
    6758            0 :         &self,
    6759            0 :         tenant_id: TenantId,
    6760            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    6761              :         // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
    6762            0 :         let maybe_node = {
    6763            0 :             self.inner
    6764            0 :                 .read()
    6765            0 :                 .unwrap()
    6766            0 :                 .nodes
    6767            0 :                 .values()
    6768            0 :                 .find(|n| n.is_available())
    6769            0 :                 .cloned()
    6770              :         };
    6771            0 :         let Some(node) = maybe_node else {
    6772            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
    6773              :         };
    6774              : 
    6775            0 :         let client = PageserverClient::new(
    6776            0 :             node.get_id(),
    6777            0 :             self.http_client.clone(),
    6778            0 :             node.base_url(),
    6779            0 :             self.config.pageserver_jwt_token.as_deref(),
    6780              :         );
    6781              : 
    6782            0 :         let scan_result = client
    6783            0 :             .tenant_scan_remote_storage(tenant_id)
    6784            0 :             .await
    6785            0 :             .map_err(|e| passthrough_api_error(&node, e))?;
    6786              : 
    6787              :         // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
    6788            0 :         let Some(shard_count) = scan_result
    6789            0 :             .shards
    6790            0 :             .iter()
    6791            0 :             .map(|s| s.tenant_shard_id.shard_count)
    6792            0 :             .max()
    6793              :         else {
    6794            0 :             return Err(ApiError::NotFound(
    6795            0 :                 anyhow::anyhow!("No shards found").into(),
    6796            0 :             ));
    6797              :         };
    6798              : 
    6799              :         // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
    6800              :         // to
    6801            0 :         let generation = scan_result
    6802            0 :             .shards
    6803            0 :             .iter()
    6804            0 :             .map(|s| s.generation)
    6805            0 :             .max()
    6806            0 :             .expect("We already validated >0 shards");
    6807              : 
    6808              :         // Find the tenant's stripe size. This wasn't always persisted in the tenant manifest, so
    6809              :         // fall back to the original default stripe size of 32768 (256 MB) if it's not specified.
    6810              :         const ORIGINAL_STRIPE_SIZE: ShardStripeSize = ShardStripeSize(32768);
    6811            0 :         let stripe_size = scan_result
    6812            0 :             .shards
    6813            0 :             .iter()
    6814            0 :             .find(|s| s.tenant_shard_id.shard_count == shard_count && s.generation == generation)
    6815            0 :             .expect("we validated >0 shards above")
    6816              :             .stripe_size
    6817            0 :             .unwrap_or_else(|| {
    6818            0 :                 if shard_count.count() > 1 {
    6819            0 :                     warn!("unknown stripe size, assuming {ORIGINAL_STRIPE_SIZE}");
    6820            0 :                 }
    6821            0 :                 ORIGINAL_STRIPE_SIZE
    6822            0 :             });
    6823              : 
    6824            0 :         let (response, waiters) = self
    6825            0 :             .do_tenant_create(TenantCreateRequest {
    6826            0 :                 new_tenant_id: TenantShardId::unsharded(tenant_id),
    6827            0 :                 generation,
    6828            0 : 
    6829            0 :                 shard_parameters: ShardParameters {
    6830            0 :                     count: shard_count,
    6831            0 :                     stripe_size,
    6832            0 :                 },
    6833            0 :                 placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
    6834            0 :                 config: TenantConfig::default(),
    6835            0 :             })
    6836            0 :             .await?;
    6837              : 
    6838            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    6839              :             // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
    6840              :             // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
    6841              :             // reconcile, as reconciliation includes notifying compute.
    6842            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
    6843            0 :         }
    6844              : 
    6845            0 :         Ok(response)
    6846            0 :     }
    6847              : 
    6848              :     /// For debug/support: a full JSON dump of TenantShards.  Returns a response so that
    6849              :     /// we don't have to make TenantShard clonable in the return path.
    6850            0 :     pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    6851            0 :         let serialized = {
    6852            0 :             let locked = self.inner.read().unwrap();
    6853            0 :             let result = locked.tenants.values().collect::<Vec<_>>();
    6854            0 :             serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
    6855              :         };
    6856              : 
    6857            0 :         hyper::Response::builder()
    6858            0 :             .status(hyper::StatusCode::OK)
    6859            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    6860            0 :             .body(hyper::Body::from(serialized))
    6861            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    6862            0 :     }
    6863              : 
    6864              :     /// Check the consistency of in-memory state vs. persistent state, and check that the
    6865              :     /// scheduler's statistics are up to date.
    6866              :     ///
    6867              :     /// These consistency checks expect an **idle** system.  If changes are going on while
    6868              :     /// we run, then we can falsely indicate a consistency issue.  This is sufficient for end-of-test
    6869              :     /// checks, but not suitable for running continuously in the background in the field.
    6870            0 :     pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
    6871            0 :         let (mut expect_nodes, mut expect_shards) = {
    6872            0 :             let locked = self.inner.read().unwrap();
    6873              : 
    6874            0 :             locked
    6875            0 :                 .scheduler
    6876            0 :                 .consistency_check(locked.nodes.values(), locked.tenants.values())
    6877            0 :                 .context("Scheduler checks")
    6878            0 :                 .map_err(ApiError::InternalServerError)?;
    6879              : 
    6880            0 :             let expect_nodes = locked
    6881            0 :                 .nodes
    6882            0 :                 .values()
    6883            0 :                 .map(|n| n.to_persistent())
    6884            0 :                 .collect::<Vec<_>>();
    6885              : 
    6886            0 :             let expect_shards = locked
    6887            0 :                 .tenants
    6888            0 :                 .values()
    6889            0 :                 .map(|t| t.to_persistent())
    6890            0 :                 .collect::<Vec<_>>();
    6891              : 
    6892              :             // This method can only validate the state of an idle system: if a reconcile is in
    6893              :             // progress, fail out early to avoid giving false errors on state that won't match
    6894              :             // between database and memory under a ReconcileResult is processed.
    6895            0 :             for t in locked.tenants.values() {
    6896            0 :                 if t.reconciler.is_some() {
    6897            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6898            0 :                         "Shard {} reconciliation in progress",
    6899            0 :                         t.tenant_shard_id
    6900            0 :                     )));
    6901            0 :                 }
    6902              :             }
    6903              : 
    6904            0 :             (expect_nodes, expect_shards)
    6905              :         };
    6906              : 
    6907            0 :         let mut nodes = self.persistence.list_nodes().await?;
    6908            0 :         expect_nodes.sort_by_key(|n| n.node_id);
    6909            0 :         nodes.sort_by_key(|n| n.node_id);
    6910              : 
    6911              :         // Errors relating to nodes are deferred so that we don't skip the shard checks below if we have a node error
    6912            0 :         let node_result = if nodes != expect_nodes {
    6913            0 :             tracing::error!("Consistency check failed on nodes.");
    6914            0 :             tracing::error!(
    6915            0 :                 "Nodes in memory: {}",
    6916            0 :                 serde_json::to_string(&expect_nodes)
    6917            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6918              :             );
    6919            0 :             tracing::error!(
    6920            0 :                 "Nodes in database: {}",
    6921            0 :                 serde_json::to_string(&nodes)
    6922            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6923              :             );
    6924            0 :             Err(ApiError::InternalServerError(anyhow::anyhow!(
    6925            0 :                 "Node consistency failure"
    6926            0 :             )))
    6927              :         } else {
    6928            0 :             Ok(())
    6929              :         };
    6930              : 
    6931            0 :         let mut persistent_shards = self.persistence.load_active_tenant_shards().await?;
    6932            0 :         persistent_shards
    6933            0 :             .sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    6934              : 
    6935            0 :         expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    6936              : 
    6937              :         // Because JSON contents of persistent tenants might disagree with the fields in current `TenantConfig`
    6938              :         // definition, we will do an encode/decode cycle to ensure any legacy fields are dropped and any new
    6939              :         // fields are added, before doing a comparison.
    6940            0 :         for tsp in &mut persistent_shards {
    6941            0 :             let config: TenantConfig = serde_json::from_str(&tsp.config)
    6942            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?;
    6943            0 :             tsp.config = serde_json::to_string(&config).expect("Encoding config is infallible");
    6944              :         }
    6945              : 
    6946            0 :         if persistent_shards != expect_shards {
    6947            0 :             tracing::error!("Consistency check failed on shards.");
    6948              : 
    6949            0 :             tracing::error!(
    6950            0 :                 "Shards in memory: {}",
    6951            0 :                 serde_json::to_string(&expect_shards)
    6952            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6953              :             );
    6954            0 :             tracing::error!(
    6955            0 :                 "Shards in database: {}",
    6956            0 :                 serde_json::to_string(&persistent_shards)
    6957            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6958              :             );
    6959              : 
    6960              :             // The total dump log lines above are useful in testing but in the field grafana will
    6961              :             // usually just drop them because they're so large. So we also do some explicit logging
    6962              :             // of just the diffs.
    6963            0 :             let persistent_shards = persistent_shards
    6964            0 :                 .into_iter()
    6965            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    6966            0 :                 .collect::<HashMap<_, _>>();
    6967            0 :             let expect_shards = expect_shards
    6968            0 :                 .into_iter()
    6969            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    6970            0 :                 .collect::<HashMap<_, _>>();
    6971            0 :             for (tenant_shard_id, persistent_tsp) in &persistent_shards {
    6972            0 :                 match expect_shards.get(tenant_shard_id) {
    6973              :                     None => {
    6974            0 :                         tracing::error!(
    6975            0 :                             "Shard {} found in database but not in memory",
    6976              :                             tenant_shard_id
    6977              :                         );
    6978              :                     }
    6979            0 :                     Some(expect_tsp) => {
    6980            0 :                         if expect_tsp != persistent_tsp {
    6981            0 :                             tracing::error!(
    6982            0 :                                 "Shard {} is inconsistent.  In memory: {}, database has: {}",
    6983              :                                 tenant_shard_id,
    6984            0 :                                 serde_json::to_string(expect_tsp).unwrap(),
    6985            0 :                                 serde_json::to_string(&persistent_tsp).unwrap()
    6986              :                             );
    6987            0 :                         }
    6988              :                     }
    6989              :                 }
    6990              :             }
    6991              : 
    6992              :             // Having already logged any differences, log any shards that simply aren't present in the database
    6993            0 :             for (tenant_shard_id, memory_tsp) in &expect_shards {
    6994            0 :                 if !persistent_shards.contains_key(tenant_shard_id) {
    6995            0 :                     tracing::error!(
    6996            0 :                         "Shard {} found in memory but not in database: {}",
    6997              :                         tenant_shard_id,
    6998            0 :                         serde_json::to_string(memory_tsp)
    6999            0 :                             .map_err(|e| ApiError::InternalServerError(e.into()))?
    7000              :                     );
    7001            0 :                 }
    7002              :             }
    7003              : 
    7004            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    7005            0 :                 "Shard consistency failure"
    7006            0 :             )));
    7007            0 :         }
    7008              : 
    7009            0 :         node_result
    7010            0 :     }
    7011              : 
    7012              :     /// For debug/support: a JSON dump of the [`Scheduler`].  Returns a response so that
    7013              :     /// we don't have to make TenantShard clonable in the return path.
    7014            0 :     pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    7015            0 :         let serialized = {
    7016            0 :             let locked = self.inner.read().unwrap();
    7017            0 :             serde_json::to_string(&locked.scheduler)
    7018            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?
    7019              :         };
    7020              : 
    7021            0 :         hyper::Response::builder()
    7022            0 :             .status(hyper::StatusCode::OK)
    7023            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    7024            0 :             .body(hyper::Body::from(serialized))
    7025            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    7026            0 :     }
    7027              : 
    7028              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    7029              :     /// detaching or deleting it on pageservers.  We do not try and re-schedule any
    7030              :     /// tenants that were on this node.
    7031            0 :     pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
    7032            0 :         self.persistence.set_tombstone(node_id).await?;
    7033              : 
    7034            0 :         let mut locked = self.inner.write().unwrap();
    7035              : 
    7036            0 :         for shard in locked.tenants.values_mut() {
    7037            0 :             shard.deref_node(node_id);
    7038            0 :             shard.observed.locations.remove(&node_id);
    7039            0 :         }
    7040              : 
    7041            0 :         let mut nodes = (*locked.nodes).clone();
    7042            0 :         nodes.remove(&node_id);
    7043            0 :         locked.nodes = Arc::new(nodes);
    7044            0 :         metrics::METRICS_REGISTRY
    7045            0 :             .metrics_group
    7046            0 :             .storage_controller_pageserver_nodes
    7047            0 :             .set(locked.nodes.len() as i64);
    7048            0 :         metrics::METRICS_REGISTRY
    7049            0 :             .metrics_group
    7050            0 :             .storage_controller_https_pageserver_nodes
    7051            0 :             .set(locked.nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7052              : 
    7053            0 :         locked.scheduler.node_remove(node_id);
    7054              : 
    7055            0 :         Ok(())
    7056            0 :     }
    7057              : 
    7058              :     /// If a node has any work on it, it will be rescheduled: this is "clean" in the sense
    7059              :     /// that we don't leave any bad state behind in the storage controller, but unclean
    7060              :     /// in the sense that we are not carefully draining the node.
    7061            0 :     pub(crate) async fn node_delete_old(&self, node_id: NodeId) -> Result<(), ApiError> {
    7062            0 :         let _node_lock =
    7063            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Delete).await;
    7064              : 
    7065              :         // 1. Atomically update in-memory state:
    7066              :         //    - set the scheduling state to Pause to make subsequent scheduling ops skip it
    7067              :         //    - update shards' intents to exclude the node, and reschedule any shards whose intents we modified.
    7068              :         //    - drop the node from the main nodes map, so that when running reconciles complete they do not
    7069              :         //      re-insert references to this node into the ObservedState of shards
    7070              :         //    - drop the node from the scheduler
    7071              :         {
    7072            0 :             let mut locked = self.inner.write().unwrap();
    7073            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    7074              : 
    7075              :             {
    7076            0 :                 let mut nodes_mut = (*nodes).deref().clone();
    7077            0 :                 match nodes_mut.get_mut(&node_id) {
    7078            0 :                     Some(node) => {
    7079            0 :                         // We do not bother setting this in the database, because we're about to delete the row anyway, and
    7080            0 :                         // if we crash it would not be desirable to leave the node paused after a restart.
    7081            0 :                         node.set_scheduling(NodeSchedulingPolicy::Pause);
    7082            0 :                     }
    7083              :                     None => {
    7084            0 :                         tracing::info!(
    7085            0 :                             "Node not found: presuming this is a retry and returning success"
    7086              :                         );
    7087            0 :                         return Ok(());
    7088              :                     }
    7089              :                 }
    7090              : 
    7091            0 :                 *nodes = Arc::new(nodes_mut);
    7092              :             }
    7093              : 
    7094            0 :             for (_tenant_id, mut schedule_context, shards) in
    7095            0 :                 TenantShardExclusiveIterator::new(tenants, ScheduleMode::Normal)
    7096              :             {
    7097            0 :                 for shard in shards {
    7098            0 :                     if shard.deref_node(node_id) {
    7099            0 :                         if let Err(e) = shard.schedule(scheduler, &mut schedule_context) {
    7100              :                             // TODO: implement force flag to remove a node even if we can't reschedule
    7101              :                             // a tenant
    7102            0 :                             tracing::error!(
    7103            0 :                                 "Refusing to delete node, shard {} can't be rescheduled: {e}",
    7104              :                                 shard.tenant_shard_id
    7105              :                             );
    7106            0 :                             return Err(e.into());
    7107              :                         } else {
    7108            0 :                             tracing::info!(
    7109            0 :                                 "Rescheduled shard {} away from node during deletion",
    7110              :                                 shard.tenant_shard_id
    7111              :                             )
    7112              :                         }
    7113              : 
    7114            0 :                         self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    7115            0 :                     }
    7116              : 
    7117              :                     // Here we remove an existing observed location for the node we're removing, and it will
    7118              :                     // not be re-added by a reconciler's completion because we filter out removed nodes in
    7119              :                     // process_result.
    7120              :                     //
    7121              :                     // Note that we update the shard's observed state _after_ calling maybe_reconcile_shard: that
    7122              :                     // means any reconciles we spawned will know about the node we're deleting, enabling them
    7123              :                     // to do live migrations if it's still online.
    7124            0 :                     shard.observed.locations.remove(&node_id);
    7125              :                 }
    7126              :             }
    7127              : 
    7128            0 :             scheduler.node_remove(node_id);
    7129              : 
    7130              :             {
    7131            0 :                 let mut nodes_mut = (**nodes).clone();
    7132            0 :                 if let Some(mut removed_node) = nodes_mut.remove(&node_id) {
    7133            0 :                     // Ensure that any reconciler holding an Arc<> to this node will
    7134            0 :                     // drop out when trying to RPC to it (setting Offline state sets the
    7135            0 :                     // cancellation token on the Node object).
    7136            0 :                     removed_node.set_availability(NodeAvailability::Offline);
    7137            0 :                 }
    7138            0 :                 *nodes = Arc::new(nodes_mut);
    7139            0 :                 metrics::METRICS_REGISTRY
    7140            0 :                     .metrics_group
    7141            0 :                     .storage_controller_pageserver_nodes
    7142            0 :                     .set(nodes.len() as i64);
    7143            0 :                 metrics::METRICS_REGISTRY
    7144            0 :                     .metrics_group
    7145            0 :                     .storage_controller_https_pageserver_nodes
    7146            0 :                     .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7147              :             }
    7148              :         }
    7149              : 
    7150              :         // Note: some `generation_pageserver` columns on tenant shards in the database may still refer to
    7151              :         // the removed node, as this column means "The pageserver to which this generation was issued", and
    7152              :         // their generations won't get updated until the reconcilers moving them away from this node complete.
    7153              :         // That is safe because in Service::spawn we only use generation_pageserver if it refers to a node
    7154              :         // that exists.
    7155              : 
    7156              :         // 2. Actually delete the node from in-memory state and set tombstone to the database
    7157              :         // for preventing the node to register again.
    7158            0 :         tracing::info!("Deleting node from database");
    7159            0 :         self.persistence.set_tombstone(node_id).await?;
    7160              : 
    7161            0 :         Ok(())
    7162            0 :     }
    7163              : 
    7164            0 :     pub(crate) async fn delete_node(
    7165            0 :         self: &Arc<Self>,
    7166            0 :         node_id: NodeId,
    7167            0 :         policy_on_start: NodeSchedulingPolicy,
    7168            0 :         cancel: CancellationToken,
    7169            0 :     ) -> Result<(), OperationError> {
    7170            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal).build();
    7171              : 
    7172            0 :         let mut waiters: Vec<ReconcilerWaiter> = Vec::new();
    7173            0 :         let mut tid_iter = create_shared_shard_iterator(self.clone());
    7174              : 
    7175            0 :         while !tid_iter.finished() {
    7176            0 :             if cancel.is_cancelled() {
    7177            0 :                 match self
    7178            0 :                     .node_configure(node_id, None, Some(policy_on_start))
    7179            0 :                     .await
    7180              :                 {
    7181            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    7182            0 :                     Err(err) => {
    7183            0 :                         return Err(OperationError::FinalizeError(
    7184            0 :                             format!(
    7185            0 :                                 "Failed to finalise delete cancel of {} by setting scheduling policy to {}: {}",
    7186            0 :                                 node_id, String::from(policy_on_start), err
    7187            0 :                             )
    7188            0 :                             .into(),
    7189            0 :                         ));
    7190              :                     }
    7191              :                 }
    7192            0 :             }
    7193              : 
    7194            0 :             operation_utils::validate_node_state(
    7195            0 :                 &node_id,
    7196            0 :                 self.inner.read().unwrap().nodes.clone(),
    7197            0 :                 NodeSchedulingPolicy::Deleting,
    7198            0 :             )?;
    7199              : 
    7200            0 :             while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    7201            0 :                 let tid = match tid_iter.next() {
    7202            0 :                     Some(tid) => tid,
    7203              :                     None => {
    7204            0 :                         break;
    7205              :                     }
    7206              :                 };
    7207              : 
    7208            0 :                 let mut locked = self.inner.write().unwrap();
    7209            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    7210              : 
    7211              :                 // Calculate a schedule context here to avoid borrow checker issues.
    7212            0 :                 let mut schedule_context = ScheduleContext::default();
    7213            0 :                 for (_, shard) in tenants.range(TenantShardId::tenant_range(tid.tenant_id)) {
    7214            0 :                     schedule_context.avoid(&shard.intent.all_pageservers());
    7215            0 :                 }
    7216              : 
    7217            0 :                 let tenant_shard = match tenants.get_mut(&tid) {
    7218            0 :                     Some(tenant_shard) => tenant_shard,
    7219              :                     None => {
    7220              :                         // Tenant shard was deleted by another operation. Skip it.
    7221            0 :                         continue;
    7222              :                     }
    7223              :                 };
    7224              : 
    7225            0 :                 match tenant_shard.get_scheduling_policy() {
    7226            0 :                     ShardSchedulingPolicy::Active | ShardSchedulingPolicy::Essential => {
    7227            0 :                         // A migration during delete is classed as 'essential' because it is required to
    7228            0 :                         // uphold our availability goals for the tenant: this shard is elegible for migration.
    7229            0 :                     }
    7230              :                     ShardSchedulingPolicy::Pause | ShardSchedulingPolicy::Stop => {
    7231              :                         // If we have been asked to avoid rescheduling this shard, then do not migrate it during a deletion
    7232            0 :                         tracing::warn!(
    7233            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    7234            0 :                             "Skip migration during deletion because shard scheduling policy {:?} disallows it",
    7235            0 :                             tenant_shard.get_scheduling_policy(),
    7236              :                         );
    7237            0 :                         continue;
    7238              :                     }
    7239              :                 }
    7240              : 
    7241            0 :                 if tenant_shard.deref_node(node_id) {
    7242            0 :                     if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
    7243            0 :                         tracing::error!(
    7244            0 :                             "Refusing to delete node, shard {} can't be rescheduled: {e}",
    7245              :                             tenant_shard.tenant_shard_id
    7246              :                         );
    7247            0 :                         return Err(OperationError::ImpossibleConstraint(e.to_string().into()));
    7248              :                     } else {
    7249            0 :                         tracing::info!(
    7250            0 :                             "Rescheduled shard {} away from node during deletion",
    7251              :                             tenant_shard.tenant_shard_id
    7252              :                         )
    7253              :                     }
    7254              : 
    7255            0 :                     let waiter = self.maybe_configured_reconcile_shard(
    7256            0 :                         tenant_shard,
    7257            0 :                         nodes,
    7258            0 :                         reconciler_config,
    7259            0 :                     );
    7260            0 :                     if let Some(some) = waiter {
    7261            0 :                         waiters.push(some);
    7262            0 :                     }
    7263            0 :                 }
    7264              :             }
    7265              : 
    7266            0 :             waiters = self
    7267            0 :                 .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT)
    7268            0 :                 .await;
    7269              : 
    7270            0 :             failpoint_support::sleep_millis_async!("sleepy-delete-loop", &cancel);
    7271              :         }
    7272              : 
    7273            0 :         while !waiters.is_empty() {
    7274            0 :             if cancel.is_cancelled() {
    7275            0 :                 match self
    7276            0 :                     .node_configure(node_id, None, Some(policy_on_start))
    7277            0 :                     .await
    7278              :                 {
    7279            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    7280            0 :                     Err(err) => {
    7281            0 :                         return Err(OperationError::FinalizeError(
    7282            0 :                             format!(
    7283            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to {}: {}",
    7284            0 :                                 node_id, String::from(policy_on_start), err
    7285            0 :                             )
    7286            0 :                             .into(),
    7287            0 :                         ));
    7288              :                     }
    7289              :                 }
    7290            0 :             }
    7291              : 
    7292            0 :             tracing::info!("Awaiting {} pending delete reconciliations", waiters.len());
    7293              : 
    7294            0 :             waiters = self
    7295            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    7296            0 :                 .await;
    7297              :         }
    7298              : 
    7299            0 :         self.persistence
    7300            0 :             .set_tombstone(node_id)
    7301            0 :             .await
    7302            0 :             .map_err(|e| OperationError::FinalizeError(e.to_string().into()))?;
    7303              : 
    7304              :         {
    7305            0 :             let mut locked = self.inner.write().unwrap();
    7306            0 :             let (nodes, _, scheduler) = locked.parts_mut();
    7307              : 
    7308            0 :             scheduler.node_remove(node_id);
    7309              : 
    7310            0 :             let mut nodes_mut = (**nodes).clone();
    7311            0 :             if let Some(mut removed_node) = nodes_mut.remove(&node_id) {
    7312            0 :                 // Ensure that any reconciler holding an Arc<> to this node will
    7313            0 :                 // drop out when trying to RPC to it (setting Offline state sets the
    7314            0 :                 // cancellation token on the Node object).
    7315            0 :                 removed_node.set_availability(NodeAvailability::Offline);
    7316            0 :             }
    7317            0 :             *nodes = Arc::new(nodes_mut);
    7318              : 
    7319            0 :             metrics::METRICS_REGISTRY
    7320            0 :                 .metrics_group
    7321            0 :                 .storage_controller_pageserver_nodes
    7322            0 :                 .set(nodes.len() as i64);
    7323            0 :             metrics::METRICS_REGISTRY
    7324            0 :                 .metrics_group
    7325            0 :                 .storage_controller_https_pageserver_nodes
    7326            0 :                 .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7327              :         }
    7328              : 
    7329            0 :         Ok(())
    7330            0 :     }
    7331              : 
    7332            0 :     pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
    7333            0 :         let nodes = {
    7334            0 :             self.inner
    7335            0 :                 .read()
    7336            0 :                 .unwrap()
    7337            0 :                 .nodes
    7338            0 :                 .values()
    7339            0 :                 .cloned()
    7340            0 :                 .collect::<Vec<_>>()
    7341              :         };
    7342              : 
    7343            0 :         Ok(nodes)
    7344            0 :     }
    7345              : 
    7346            0 :     pub(crate) async fn tombstone_list(&self) -> Result<Vec<Node>, ApiError> {
    7347            0 :         self.persistence
    7348            0 :             .list_tombstones()
    7349            0 :             .await?
    7350            0 :             .into_iter()
    7351            0 :             .map(|np| Node::from_persistent(np, false))
    7352            0 :             .collect::<Result<Vec<_>, _>>()
    7353            0 :             .map_err(ApiError::InternalServerError)
    7354            0 :     }
    7355              : 
    7356            0 :     pub(crate) async fn tombstone_delete(&self, node_id: NodeId) -> Result<(), ApiError> {
    7357            0 :         let _node_lock = trace_exclusive_lock(
    7358            0 :             &self.node_op_locks,
    7359            0 :             node_id,
    7360            0 :             NodeOperations::DeleteTombstone,
    7361            0 :         )
    7362            0 :         .await;
    7363              : 
    7364            0 :         if matches!(self.get_node(node_id).await, Err(ApiError::NotFound(_))) {
    7365            0 :             self.persistence.delete_node(node_id).await?;
    7366            0 :             Ok(())
    7367              :         } else {
    7368            0 :             Err(ApiError::Conflict(format!(
    7369            0 :                 "Node {node_id} is in use, consider using tombstone API first"
    7370            0 :             )))
    7371              :         }
    7372            0 :     }
    7373              : 
    7374            0 :     pub(crate) async fn get_node(&self, node_id: NodeId) -> Result<Node, ApiError> {
    7375            0 :         self.inner
    7376            0 :             .read()
    7377            0 :             .unwrap()
    7378            0 :             .nodes
    7379            0 :             .get(&node_id)
    7380            0 :             .cloned()
    7381            0 :             .ok_or(ApiError::NotFound(
    7382            0 :                 format!("Node {node_id} not registered").into(),
    7383            0 :             ))
    7384            0 :     }
    7385              : 
    7386            0 :     pub(crate) async fn get_node_shards(
    7387            0 :         &self,
    7388            0 :         node_id: NodeId,
    7389            0 :     ) -> Result<NodeShardResponse, ApiError> {
    7390            0 :         let locked = self.inner.read().unwrap();
    7391            0 :         let mut shards = Vec::new();
    7392            0 :         for (tid, tenant) in locked.tenants.iter() {
    7393            0 :             let is_intended_secondary = match (
    7394            0 :                 tenant.intent.get_attached() == &Some(node_id),
    7395            0 :                 tenant.intent.get_secondary().contains(&node_id),
    7396            0 :             ) {
    7397              :                 (true, true) => {
    7398            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    7399            0 :                         "{} attached as primary+secondary on the same node",
    7400            0 :                         tid
    7401            0 :                     )));
    7402              :                 }
    7403            0 :                 (true, false) => Some(false),
    7404            0 :                 (false, true) => Some(true),
    7405            0 :                 (false, false) => None,
    7406              :             };
    7407            0 :             let is_observed_secondary = if let Some(ObservedStateLocation { conf: Some(conf) }) =
    7408            0 :                 tenant.observed.locations.get(&node_id)
    7409              :             {
    7410            0 :                 Some(conf.secondary_conf.is_some())
    7411              :             } else {
    7412            0 :                 None
    7413              :             };
    7414            0 :             if is_intended_secondary.is_some() || is_observed_secondary.is_some() {
    7415            0 :                 shards.push(NodeShard {
    7416            0 :                     tenant_shard_id: *tid,
    7417            0 :                     is_intended_secondary,
    7418            0 :                     is_observed_secondary,
    7419            0 :                 });
    7420            0 :             }
    7421              :         }
    7422            0 :         Ok(NodeShardResponse { node_id, shards })
    7423            0 :     }
    7424              : 
    7425            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
    7426            0 :         self.persistence.get_leader().await
    7427            0 :     }
    7428              : 
    7429            0 :     pub(crate) async fn node_register(
    7430            0 :         &self,
    7431            0 :         register_req: NodeRegisterRequest,
    7432            0 :     ) -> Result<(), ApiError> {
    7433            0 :         let _node_lock = trace_exclusive_lock(
    7434            0 :             &self.node_op_locks,
    7435            0 :             register_req.node_id,
    7436            0 :             NodeOperations::Register,
    7437            0 :         )
    7438            0 :         .await;
    7439              : 
    7440              :         #[derive(PartialEq)]
    7441              :         enum RegistrationStatus {
    7442              :             UpToDate,
    7443              :             NeedUpdate,
    7444              :             Mismatched,
    7445              :             New,
    7446              :         }
    7447              : 
    7448            0 :         let registration_status = {
    7449            0 :             let locked = self.inner.read().unwrap();
    7450            0 :             if let Some(node) = locked.nodes.get(&register_req.node_id) {
    7451            0 :                 if node.registration_match(&register_req) {
    7452            0 :                     if node.need_update(&register_req) {
    7453            0 :                         RegistrationStatus::NeedUpdate
    7454              :                     } else {
    7455            0 :                         RegistrationStatus::UpToDate
    7456              :                     }
    7457              :                 } else {
    7458            0 :                     RegistrationStatus::Mismatched
    7459              :                 }
    7460              :             } else {
    7461            0 :                 RegistrationStatus::New
    7462              :             }
    7463              :         };
    7464              : 
    7465            0 :         match registration_status {
    7466              :             RegistrationStatus::UpToDate => {
    7467            0 :                 tracing::info!(
    7468            0 :                     "Node {} re-registered with matching address and is up to date",
    7469              :                     register_req.node_id
    7470              :                 );
    7471              : 
    7472            0 :                 return Ok(());
    7473              :             }
    7474              :             RegistrationStatus::Mismatched => {
    7475              :                 // TODO: decide if we want to allow modifying node addresses without removing and re-adding
    7476              :                 // the node.  Safest/simplest thing is to refuse it, and usually we deploy with
    7477              :                 // a fixed address through the lifetime of a node.
    7478            0 :                 tracing::warn!(
    7479            0 :                     "Node {} tried to register with different address",
    7480              :                     register_req.node_id
    7481              :                 );
    7482            0 :                 return Err(ApiError::Conflict(
    7483            0 :                     "Node is already registered with different address".to_string(),
    7484            0 :                 ));
    7485              :             }
    7486            0 :             RegistrationStatus::New | RegistrationStatus::NeedUpdate => {
    7487            0 :                 // fallthrough
    7488            0 :             }
    7489              :         }
    7490              : 
    7491              :         // We do not require that a node is actually online when registered (it will start life
    7492              :         // with it's  availability set to Offline), but we _do_ require that its DNS record exists. We're
    7493              :         // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
    7494              :         // that register themselves with a broken DNS config.  We check only the HTTP hostname, because
    7495              :         // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
    7496            0 :         if tokio::net::lookup_host(format!(
    7497            0 :             "{}:{}",
    7498              :             register_req.listen_http_addr, register_req.listen_http_port
    7499              :         ))
    7500            0 :         .await
    7501            0 :         .is_err()
    7502              :         {
    7503              :             // If we have a transient DNS issue, it's up to the caller to retry their registration.  Because
    7504              :             // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
    7505              :             // we return a soft 503 error, to encourage callers to retry past transient issues.
    7506            0 :             return Err(ApiError::ResourceUnavailable(
    7507            0 :                 format!(
    7508            0 :                     "Node {} tried to register with unknown DNS name '{}'",
    7509            0 :                     register_req.node_id, register_req.listen_http_addr
    7510            0 :                 )
    7511            0 :                 .into(),
    7512            0 :             ));
    7513            0 :         }
    7514              : 
    7515            0 :         if self.config.use_https_pageserver_api && register_req.listen_https_port.is_none() {
    7516            0 :             return Err(ApiError::PreconditionFailed(
    7517            0 :                 format!(
    7518            0 :                     "Node {} has no https port, but use_https is enabled",
    7519            0 :                     register_req.node_id
    7520            0 :                 )
    7521            0 :                 .into(),
    7522            0 :             ));
    7523            0 :         }
    7524              : 
    7525            0 :         if register_req.listen_grpc_addr.is_some() != register_req.listen_grpc_port.is_some() {
    7526            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    7527            0 :                 "must specify both gRPC address and port"
    7528            0 :             )));
    7529            0 :         }
    7530              : 
    7531              :         // Ordering: we must persist the new node _before_ adding it to in-memory state.
    7532              :         // This ensures that before we use it for anything or expose it via any external
    7533              :         // API, it is guaranteed to be available after a restart.
    7534            0 :         let new_node = Node::new(
    7535            0 :             register_req.node_id,
    7536            0 :             register_req.listen_http_addr,
    7537            0 :             register_req.listen_http_port,
    7538            0 :             register_req.listen_https_port,
    7539            0 :             register_req.listen_pg_addr,
    7540            0 :             register_req.listen_pg_port,
    7541            0 :             register_req.listen_grpc_addr,
    7542            0 :             register_req.listen_grpc_port,
    7543            0 :             register_req.availability_zone_id.clone(),
    7544            0 :             self.config.use_https_pageserver_api,
    7545              :         );
    7546            0 :         let new_node = match new_node {
    7547            0 :             Ok(new_node) => new_node,
    7548            0 :             Err(error) => return Err(ApiError::InternalServerError(error)),
    7549              :         };
    7550              : 
    7551            0 :         match registration_status {
    7552              :             RegistrationStatus::New => {
    7553            0 :                 self.persistence.insert_node(&new_node).await.map_err(|e| {
    7554            0 :                     if matches!(
    7555            0 :                         e,
    7556              :                         crate::persistence::DatabaseError::Query(
    7557              :                             diesel::result::Error::DatabaseError(
    7558              :                                 diesel::result::DatabaseErrorKind::UniqueViolation,
    7559              :                                 _,
    7560              :                             )
    7561              :                         )
    7562              :                     ) {
    7563              :                         // The node can be deleted by tombstone API, and not show up in the list of nodes.
    7564              :                         // If you see this error, check tombstones first.
    7565            0 :                         ApiError::Conflict(format!("Node {} is already exists", new_node.get_id()))
    7566              :                     } else {
    7567            0 :                         ApiError::from(e)
    7568              :                     }
    7569            0 :                 })?;
    7570              :             }
    7571              :             RegistrationStatus::NeedUpdate => {
    7572            0 :                 self.persistence
    7573            0 :                     .update_node_on_registration(
    7574            0 :                         register_req.node_id,
    7575            0 :                         register_req.listen_https_port,
    7576            0 :                     )
    7577            0 :                     .await?
    7578              :             }
    7579            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    7580              :         }
    7581              : 
    7582            0 :         let mut locked = self.inner.write().unwrap();
    7583            0 :         let mut new_nodes = (*locked.nodes).clone();
    7584              : 
    7585            0 :         locked.scheduler.node_upsert(&new_node);
    7586            0 :         new_nodes.insert(register_req.node_id, new_node);
    7587              : 
    7588            0 :         locked.nodes = Arc::new(new_nodes);
    7589              : 
    7590            0 :         metrics::METRICS_REGISTRY
    7591            0 :             .metrics_group
    7592            0 :             .storage_controller_pageserver_nodes
    7593            0 :             .set(locked.nodes.len() as i64);
    7594            0 :         metrics::METRICS_REGISTRY
    7595            0 :             .metrics_group
    7596            0 :             .storage_controller_https_pageserver_nodes
    7597            0 :             .set(locked.nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7598              : 
    7599            0 :         match registration_status {
    7600              :             RegistrationStatus::New => {
    7601            0 :                 tracing::info!(
    7602            0 :                     "Registered pageserver {} ({}), now have {} pageservers",
    7603              :                     register_req.node_id,
    7604              :                     register_req.availability_zone_id,
    7605            0 :                     locked.nodes.len()
    7606              :                 );
    7607              :             }
    7608              :             RegistrationStatus::NeedUpdate => {
    7609            0 :                 tracing::info!(
    7610            0 :                     "Re-registered and updated node {} ({})",
    7611              :                     register_req.node_id,
    7612              :                     register_req.availability_zone_id,
    7613              :                 );
    7614              :             }
    7615            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    7616              :         }
    7617            0 :         Ok(())
    7618            0 :     }
    7619              : 
    7620              :     /// Configure in-memory and persistent state of a node as requested
    7621              :     ///
    7622              :     /// Note that this function does not trigger any immediate side effects in response
    7623              :     /// to the changes. That part is handled by [`Self::handle_node_availability_transition`].
    7624            0 :     async fn node_state_configure(
    7625            0 :         &self,
    7626            0 :         node_id: NodeId,
    7627            0 :         availability: Option<NodeAvailability>,
    7628            0 :         scheduling: Option<NodeSchedulingPolicy>,
    7629            0 :         node_lock: &TracingExclusiveGuard<NodeOperations>,
    7630            0 :     ) -> Result<AvailabilityTransition, ApiError> {
    7631            0 :         if let Some(scheduling) = scheduling {
    7632              :             // Scheduling is a persistent part of Node: we must write updates to the database before
    7633              :             // applying them in memory
    7634            0 :             self.persistence
    7635            0 :                 .update_node_scheduling_policy(node_id, scheduling)
    7636            0 :                 .await?;
    7637            0 :         }
    7638              : 
    7639              :         // If we're activating a node, then before setting it active we must reconcile any shard locations
    7640              :         // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
    7641              :         // by calling [`Self::node_activate_reconcile`]
    7642              :         //
    7643              :         // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
    7644              :         // nothing else can mutate its availability while we run.
    7645            0 :         let availability_transition = if let Some(input_availability) = availability.as_ref() {
    7646            0 :             let (activate_node, availability_transition) = {
    7647            0 :                 let locked = self.inner.read().unwrap();
    7648            0 :                 let Some(node) = locked.nodes.get(&node_id) else {
    7649            0 :                     return Err(ApiError::NotFound(
    7650            0 :                         anyhow::anyhow!("Node {} not registered", node_id).into(),
    7651            0 :                     ));
    7652              :                 };
    7653              : 
    7654            0 :                 (
    7655            0 :                     node.clone(),
    7656            0 :                     node.get_availability_transition(input_availability),
    7657            0 :                 )
    7658              :             };
    7659              : 
    7660            0 :             if matches!(availability_transition, AvailabilityTransition::ToActive) {
    7661            0 :                 self.node_activate_reconcile(activate_node, node_lock)
    7662            0 :                     .await?;
    7663            0 :             }
    7664            0 :             availability_transition
    7665              :         } else {
    7666            0 :             AvailabilityTransition::Unchanged
    7667              :         };
    7668              : 
    7669              :         // Apply changes from the request to our in-memory state for the Node
    7670            0 :         let mut locked = self.inner.write().unwrap();
    7671            0 :         let (nodes, _tenants, scheduler) = locked.parts_mut();
    7672              : 
    7673            0 :         let mut new_nodes = (**nodes).clone();
    7674              : 
    7675            0 :         let Some(node) = new_nodes.get_mut(&node_id) else {
    7676            0 :             return Err(ApiError::NotFound(
    7677            0 :                 anyhow::anyhow!("Node not registered").into(),
    7678            0 :             ));
    7679              :         };
    7680              : 
    7681            0 :         if let Some(availability) = availability {
    7682            0 :             node.set_availability(availability);
    7683            0 :         }
    7684              : 
    7685            0 :         if let Some(scheduling) = scheduling {
    7686            0 :             node.set_scheduling(scheduling);
    7687            0 :         }
    7688              : 
    7689              :         // Update the scheduler, in case the elegibility of the node for new shards has changed
    7690            0 :         scheduler.node_upsert(node);
    7691              : 
    7692            0 :         let new_nodes = Arc::new(new_nodes);
    7693            0 :         locked.nodes = new_nodes;
    7694              : 
    7695            0 :         Ok(availability_transition)
    7696            0 :     }
    7697              : 
    7698              :     /// Handle availability transition of one node
    7699              :     ///
    7700              :     /// Note that you should first call [`Self::node_state_configure`] to update
    7701              :     /// the in-memory state referencing that node. If you need to handle more than one transition
    7702              :     /// consider using [`Self::handle_node_availability_transitions`].
    7703            0 :     async fn handle_node_availability_transition(
    7704            0 :         &self,
    7705            0 :         node_id: NodeId,
    7706            0 :         transition: AvailabilityTransition,
    7707            0 :         _node_lock: &TracingExclusiveGuard<NodeOperations>,
    7708            0 :     ) -> Result<(), ApiError> {
    7709              :         // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
    7710            0 :         match transition {
    7711              :             AvailabilityTransition::ToOffline => {
    7712            0 :                 tracing::info!("Node {} transition to offline", node_id);
    7713              : 
    7714            0 :                 let mut locked = self.inner.write().unwrap();
    7715            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    7716              : 
    7717            0 :                 let mut tenants_affected: usize = 0;
    7718              : 
    7719            0 :                 for (_tenant_id, mut schedule_context, shards) in
    7720            0 :                     TenantShardExclusiveIterator::new(tenants, ScheduleMode::Normal)
    7721              :                 {
    7722            0 :                     for tenant_shard in shards {
    7723            0 :                         let tenant_shard_id = tenant_shard.tenant_shard_id;
    7724            0 :                         if let Some(observed_loc) =
    7725            0 :                             tenant_shard.observed.locations.get_mut(&node_id)
    7726            0 :                         {
    7727            0 :                             // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
    7728            0 :                             // not assume our knowledge of the node's configuration is accurate until it comes back online
    7729            0 :                             observed_loc.conf = None;
    7730            0 :                         }
    7731              : 
    7732            0 :                         if nodes.len() == 1 {
    7733              :                             // Special case for single-node cluster: there is no point trying to reschedule
    7734              :                             // any tenant shards: avoid doing so, in order to avoid spewing warnings about
    7735              :                             // failures to schedule them.
    7736            0 :                             continue;
    7737            0 :                         }
    7738              : 
    7739            0 :                         if !nodes
    7740            0 :                             .values()
    7741            0 :                             .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    7742              :                         {
    7743              :                             // Special case for when all nodes are unavailable and/or unschedulable: there is no point
    7744              :                             // trying to reschedule since there's nowhere else to go. Without this
    7745              :                             // branch we incorrectly detach tenants in response to node unavailability.
    7746            0 :                             continue;
    7747            0 :                         }
    7748              : 
    7749            0 :                         if tenant_shard.intent.demote_attached(scheduler, node_id) {
    7750            0 :                             tenant_shard.sequence = tenant_shard.sequence.next();
    7751              : 
    7752            0 :                             match tenant_shard.schedule(scheduler, &mut schedule_context) {
    7753            0 :                                 Err(e) => {
    7754              :                                     // It is possible that some tenants will become unschedulable when too many pageservers
    7755              :                                     // go offline: in this case there isn't much we can do other than make the issue observable.
    7756              :                                     // TODO: give TenantShard a scheduling error attribute to be queried later.
    7757            0 :                                     tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
    7758              :                                 }
    7759              :                                 Ok(()) => {
    7760            0 :                                     if self
    7761            0 :                                         .maybe_reconcile_shard(
    7762            0 :                                             tenant_shard,
    7763            0 :                                             nodes,
    7764            0 :                                             ReconcilerPriority::Normal,
    7765            0 :                                         )
    7766            0 :                                         .is_some()
    7767            0 :                                     {
    7768            0 :                                         tenants_affected += 1;
    7769            0 :                                     };
    7770              :                                 }
    7771              :                             }
    7772            0 :                         }
    7773              :                     }
    7774              :                 }
    7775            0 :                 tracing::info!(
    7776            0 :                     "Launched {} reconciler tasks for tenants affected by node {} going offline",
    7777              :                     tenants_affected,
    7778              :                     node_id
    7779              :                 )
    7780              :             }
    7781              :             AvailabilityTransition::ToActive => {
    7782            0 :                 tracing::info!("Node {} transition to active", node_id);
    7783              : 
    7784            0 :                 let mut locked = self.inner.write().unwrap();
    7785            0 :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    7786              : 
    7787              :                 // When a node comes back online, we must reconcile any tenant that has a None observed
    7788              :                 // location on the node.
    7789            0 :                 for tenant_shard in tenants.values_mut() {
    7790              :                     // If a reconciliation is already in progress, rely on the previous scheduling
    7791              :                     // decision and skip triggering a new reconciliation.
    7792            0 :                     if tenant_shard.reconciler.is_some() {
    7793            0 :                         continue;
    7794            0 :                     }
    7795              : 
    7796            0 :                     if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
    7797            0 :                         if observed_loc.conf.is_none() {
    7798            0 :                             self.maybe_reconcile_shard(
    7799            0 :                                 tenant_shard,
    7800            0 :                                 nodes,
    7801            0 :                                 ReconcilerPriority::Normal,
    7802            0 :                             );
    7803            0 :                         }
    7804            0 :                     }
    7805              :                 }
    7806              : 
    7807              :                 // TODO: in the background, we should balance work back onto this pageserver
    7808              :             }
    7809              :             // No action required for the intermediate unavailable state.
    7810              :             // When we transition into active or offline from the unavailable state,
    7811              :             // the correct handling above will kick in.
    7812              :             AvailabilityTransition::ToWarmingUpFromActive => {
    7813            0 :                 tracing::info!("Node {} transition to unavailable from active", node_id);
    7814              :             }
    7815              :             AvailabilityTransition::ToWarmingUpFromOffline => {
    7816            0 :                 tracing::info!("Node {} transition to unavailable from offline", node_id);
    7817              :             }
    7818              :             AvailabilityTransition::Unchanged => {
    7819            0 :                 tracing::debug!("Node {} no availability change during config", node_id);
    7820              :             }
    7821              :         }
    7822              : 
    7823            0 :         Ok(())
    7824            0 :     }
    7825              : 
    7826              :     /// Handle availability transition for multiple nodes
    7827              :     ///
    7828              :     /// Note that you should first call [`Self::node_state_configure`] for
    7829              :     /// all nodes being handled here for the handling to use fresh in-memory state.
    7830            0 :     async fn handle_node_availability_transitions(
    7831            0 :         &self,
    7832            0 :         transitions: Vec<(
    7833            0 :             NodeId,
    7834            0 :             TracingExclusiveGuard<NodeOperations>,
    7835            0 :             AvailabilityTransition,
    7836            0 :         )>,
    7837            0 :     ) -> Result<(), Vec<(NodeId, ApiError)>> {
    7838            0 :         let mut errors = Vec::default();
    7839            0 :         for (node_id, node_lock, transition) in transitions {
    7840            0 :             let res = self
    7841            0 :                 .handle_node_availability_transition(node_id, transition, &node_lock)
    7842            0 :                 .await;
    7843            0 :             if let Err(err) = res {
    7844            0 :                 errors.push((node_id, err));
    7845            0 :             }
    7846              :         }
    7847              : 
    7848            0 :         if errors.is_empty() {
    7849            0 :             Ok(())
    7850              :         } else {
    7851            0 :             Err(errors)
    7852              :         }
    7853            0 :     }
    7854              : 
    7855            0 :     pub(crate) async fn node_configure(
    7856            0 :         &self,
    7857            0 :         node_id: NodeId,
    7858            0 :         availability: Option<NodeAvailability>,
    7859            0 :         scheduling: Option<NodeSchedulingPolicy>,
    7860            0 :     ) -> Result<(), ApiError> {
    7861            0 :         let node_lock =
    7862            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
    7863              : 
    7864            0 :         let transition = self
    7865            0 :             .node_state_configure(node_id, availability, scheduling, &node_lock)
    7866            0 :             .await?;
    7867            0 :         self.handle_node_availability_transition(node_id, transition, &node_lock)
    7868            0 :             .await
    7869            0 :     }
    7870              : 
    7871              :     /// Wrapper around [`Self::node_configure`] which only allows changes while there is no ongoing
    7872              :     /// operation for HTTP api.
    7873            0 :     pub(crate) async fn external_node_configure(
    7874            0 :         &self,
    7875            0 :         node_id: NodeId,
    7876            0 :         availability: Option<NodeAvailability>,
    7877            0 :         scheduling: Option<NodeSchedulingPolicy>,
    7878            0 :     ) -> Result<(), ApiError> {
    7879              :         {
    7880            0 :             let locked = self.inner.read().unwrap();
    7881            0 :             if let Some(op) = locked.ongoing_operation.as_ref().map(|op| op.operation) {
    7882            0 :                 return Err(ApiError::PreconditionFailed(
    7883            0 :                     format!("Ongoing background operation forbids configuring: {op}").into(),
    7884            0 :                 ));
    7885            0 :             }
    7886              :         }
    7887              : 
    7888            0 :         self.node_configure(node_id, availability, scheduling).await
    7889            0 :     }
    7890              : 
    7891            0 :     pub(crate) async fn start_node_delete(
    7892            0 :         self: &Arc<Self>,
    7893            0 :         node_id: NodeId,
    7894            0 :     ) -> Result<(), ApiError> {
    7895            0 :         let (ongoing_op, node_policy, schedulable_nodes_count) = {
    7896            0 :             let locked = self.inner.read().unwrap();
    7897            0 :             let nodes = &locked.nodes;
    7898            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    7899            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    7900            0 :             ))?;
    7901            0 :             let schedulable_nodes_count = nodes
    7902            0 :                 .iter()
    7903            0 :                 .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    7904            0 :                 .count();
    7905              : 
    7906              :             (
    7907            0 :                 locked
    7908            0 :                     .ongoing_operation
    7909            0 :                     .as_ref()
    7910            0 :                     .map(|ongoing| ongoing.operation),
    7911            0 :                 node.get_scheduling(),
    7912            0 :                 schedulable_nodes_count,
    7913              :             )
    7914              :         };
    7915              : 
    7916            0 :         if let Some(ongoing) = ongoing_op {
    7917            0 :             return Err(ApiError::PreconditionFailed(
    7918            0 :                 format!("Background operation already ongoing for node: {ongoing}").into(),
    7919            0 :             ));
    7920            0 :         }
    7921              : 
    7922            0 :         if schedulable_nodes_count == 0 {
    7923            0 :             return Err(ApiError::PreconditionFailed(
    7924            0 :                 "No other schedulable nodes to move shards".into(),
    7925            0 :             ));
    7926            0 :         }
    7927              : 
    7928            0 :         match node_policy {
    7929              :             NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => {
    7930            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Deleting))
    7931            0 :                     .await?;
    7932              : 
    7933            0 :                 let cancel = self.cancel.child_token();
    7934            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    7935            0 :                 let policy_on_start = node_policy;
    7936              : 
    7937            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    7938            0 :                     operation: Operation::Delete(Delete { node_id }),
    7939            0 :                     cancel: cancel.clone(),
    7940            0 :                 });
    7941              : 
    7942            0 :                 let span = tracing::info_span!(parent: None, "delete_node", %node_id);
    7943              : 
    7944            0 :                 tokio::task::spawn(
    7945              :                     {
    7946            0 :                         let service = self.clone();
    7947            0 :                         let cancel = cancel.clone();
    7948            0 :                         async move {
    7949            0 :                             let _gate_guard = gate_guard;
    7950              : 
    7951            0 :                             scopeguard::defer! {
    7952              :                                 let prev = service.inner.write().unwrap().ongoing_operation.take();
    7953              : 
    7954              :                                 if let Some(Operation::Delete(removed_delete)) = prev.map(|h| h.operation) {
    7955              :                                     assert_eq!(removed_delete.node_id, node_id, "We always take the same operation");
    7956              :                                 } else {
    7957              :                                     panic!("We always remove the same operation")
    7958              :                                 }
    7959              :                             }
    7960              : 
    7961            0 :                             tracing::info!("Delete background operation starting");
    7962            0 :                             let res = service
    7963            0 :                                 .delete_node(node_id, policy_on_start, cancel)
    7964            0 :                                 .await;
    7965            0 :                             match res {
    7966              :                                 Ok(()) => {
    7967            0 :                                     tracing::info!(
    7968            0 :                                         "Delete background operation completed successfully"
    7969              :                                     );
    7970              :                                 }
    7971              :                                 Err(OperationError::Cancelled) => {
    7972            0 :                                     tracing::info!("Delete background operation was cancelled");
    7973              :                                 }
    7974            0 :                                 Err(err) => {
    7975            0 :                                     tracing::error!(
    7976            0 :                                         "Delete background operation encountered: {err}"
    7977              :                                     )
    7978              :                                 }
    7979              :                             }
    7980            0 :                         }
    7981              :                     }
    7982            0 :                     .instrument(span),
    7983              :                 );
    7984              :             }
    7985              :             NodeSchedulingPolicy::Deleting => {
    7986            0 :                 return Err(ApiError::Conflict(format!(
    7987            0 :                     "Node {node_id} has delete in progress"
    7988            0 :                 )));
    7989              :             }
    7990            0 :             policy => {
    7991            0 :                 return Err(ApiError::PreconditionFailed(
    7992            0 :                     format!("Node {node_id} cannot be deleted due to {policy:?} policy").into(),
    7993            0 :                 ));
    7994              :             }
    7995              :         }
    7996              : 
    7997            0 :         Ok(())
    7998            0 :     }
    7999              : 
    8000            0 :     pub(crate) async fn cancel_node_delete(
    8001            0 :         self: &Arc<Self>,
    8002            0 :         node_id: NodeId,
    8003            0 :     ) -> Result<(), ApiError> {
    8004              :         {
    8005            0 :             let locked = self.inner.read().unwrap();
    8006            0 :             let nodes = &locked.nodes;
    8007            0 :             nodes.get(&node_id).ok_or(ApiError::NotFound(
    8008            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8009            0 :             ))?;
    8010              :         }
    8011              : 
    8012            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    8013            0 :             if let Operation::Delete(delete) = op_handler.operation {
    8014            0 :                 if delete.node_id == node_id {
    8015            0 :                     tracing::info!("Cancelling background delete operation for node {node_id}");
    8016            0 :                     op_handler.cancel.cancel();
    8017            0 :                     return Ok(());
    8018            0 :                 }
    8019            0 :             }
    8020            0 :         }
    8021              : 
    8022            0 :         Err(ApiError::PreconditionFailed(
    8023            0 :             format!("Node {node_id} has no delete in progress").into(),
    8024            0 :         ))
    8025            0 :     }
    8026              : 
    8027            0 :     pub(crate) async fn start_node_drain(
    8028            0 :         self: &Arc<Self>,
    8029            0 :         node_id: NodeId,
    8030            0 :     ) -> Result<(), ApiError> {
    8031            0 :         let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = {
    8032            0 :             let locked = self.inner.read().unwrap();
    8033            0 :             let nodes = &locked.nodes;
    8034            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8035            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8036            0 :             ))?;
    8037            0 :             let schedulable_nodes_count = nodes
    8038            0 :                 .iter()
    8039            0 :                 .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    8040            0 :                 .count();
    8041              : 
    8042              :             (
    8043            0 :                 locked
    8044            0 :                     .ongoing_operation
    8045            0 :                     .as_ref()
    8046            0 :                     .map(|ongoing| ongoing.operation),
    8047            0 :                 node.is_available(),
    8048            0 :                 node.get_scheduling(),
    8049            0 :                 schedulable_nodes_count,
    8050              :             )
    8051              :         };
    8052              : 
    8053            0 :         if let Some(ongoing) = ongoing_op {
    8054            0 :             return Err(ApiError::PreconditionFailed(
    8055            0 :                 format!("Background operation already ongoing for node: {ongoing}").into(),
    8056            0 :             ));
    8057            0 :         }
    8058              : 
    8059            0 :         if !node_available {
    8060            0 :             return Err(ApiError::ResourceUnavailable(
    8061            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8062            0 :             ));
    8063            0 :         }
    8064              : 
    8065            0 :         if schedulable_nodes_count == 0 {
    8066            0 :             return Err(ApiError::PreconditionFailed(
    8067            0 :                 "No other schedulable nodes to drain to".into(),
    8068            0 :             ));
    8069            0 :         }
    8070              : 
    8071            0 :         match node_policy {
    8072              :             NodeSchedulingPolicy::Active => {
    8073            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining))
    8074            0 :                     .await?;
    8075              : 
    8076            0 :                 let cancel = self.cancel.child_token();
    8077            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    8078              : 
    8079            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    8080            0 :                     operation: Operation::Drain(Drain { node_id }),
    8081            0 :                     cancel: cancel.clone(),
    8082            0 :                 });
    8083              : 
    8084            0 :                 let span = tracing::info_span!(parent: None, "drain_node", %node_id);
    8085              : 
    8086            0 :                 tokio::task::spawn({
    8087            0 :                     let service = self.clone();
    8088            0 :                     let cancel = cancel.clone();
    8089            0 :                     async move {
    8090            0 :                         let _gate_guard = gate_guard;
    8091              : 
    8092            0 :                         scopeguard::defer! {
    8093              :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    8094              : 
    8095              :                             if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) {
    8096              :                                 assert_eq!(removed_drain.node_id, node_id, "We always take the same operation");
    8097              :                             } else {
    8098              :                                 panic!("We always remove the same operation")
    8099              :                             }
    8100              :                         }
    8101              : 
    8102            0 :                         tracing::info!("Drain background operation starting");
    8103            0 :                         let res = service.drain_node(node_id, cancel).await;
    8104            0 :                         match res {
    8105              :                             Ok(()) => {
    8106            0 :                                 tracing::info!("Drain background operation completed successfully");
    8107              :                             }
    8108              :                             Err(OperationError::Cancelled) => {
    8109            0 :                                 tracing::info!("Drain background operation was cancelled");
    8110              :                             }
    8111            0 :                             Err(err) => {
    8112            0 :                                 tracing::error!("Drain background operation encountered: {err}")
    8113              :                             }
    8114              :                         }
    8115            0 :                     }
    8116            0 :                 }.instrument(span));
    8117              :             }
    8118              :             NodeSchedulingPolicy::Draining => {
    8119            0 :                 return Err(ApiError::Conflict(format!(
    8120            0 :                     "Node {node_id} has drain in progress"
    8121            0 :                 )));
    8122              :             }
    8123            0 :             policy => {
    8124            0 :                 return Err(ApiError::PreconditionFailed(
    8125            0 :                     format!("Node {node_id} cannot be drained due to {policy:?} policy").into(),
    8126            0 :                 ));
    8127              :             }
    8128              :         }
    8129              : 
    8130            0 :         Ok(())
    8131            0 :     }
    8132              : 
    8133            0 :     pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> {
    8134            0 :         let node_available = {
    8135            0 :             let locked = self.inner.read().unwrap();
    8136            0 :             let nodes = &locked.nodes;
    8137            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8138            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8139            0 :             ))?;
    8140              : 
    8141            0 :             node.is_available()
    8142              :         };
    8143              : 
    8144            0 :         if !node_available {
    8145            0 :             return Err(ApiError::ResourceUnavailable(
    8146            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8147            0 :             ));
    8148            0 :         }
    8149              : 
    8150            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    8151            0 :             if let Operation::Drain(drain) = op_handler.operation {
    8152            0 :                 if drain.node_id == node_id {
    8153            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    8154            0 :                     op_handler.cancel.cancel();
    8155            0 :                     return Ok(());
    8156            0 :                 }
    8157            0 :             }
    8158            0 :         }
    8159              : 
    8160            0 :         Err(ApiError::PreconditionFailed(
    8161            0 :             format!("Node {node_id} has no drain in progress").into(),
    8162            0 :         ))
    8163            0 :     }
    8164              : 
    8165            0 :     pub(crate) async fn start_node_fill(self: &Arc<Self>, node_id: NodeId) -> Result<(), ApiError> {
    8166            0 :         let (ongoing_op, node_available, node_policy, total_nodes_count) = {
    8167            0 :             let locked = self.inner.read().unwrap();
    8168            0 :             let nodes = &locked.nodes;
    8169            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8170            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8171            0 :             ))?;
    8172              : 
    8173              :             (
    8174            0 :                 locked
    8175            0 :                     .ongoing_operation
    8176            0 :                     .as_ref()
    8177            0 :                     .map(|ongoing| ongoing.operation),
    8178            0 :                 node.is_available(),
    8179            0 :                 node.get_scheduling(),
    8180            0 :                 nodes.len(),
    8181              :             )
    8182              :         };
    8183              : 
    8184            0 :         if let Some(ongoing) = ongoing_op {
    8185            0 :             return Err(ApiError::PreconditionFailed(
    8186            0 :                 format!("Background operation already ongoing for node: {ongoing}").into(),
    8187            0 :             ));
    8188            0 :         }
    8189              : 
    8190            0 :         if !node_available {
    8191            0 :             return Err(ApiError::ResourceUnavailable(
    8192            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8193            0 :             ));
    8194            0 :         }
    8195              : 
    8196            0 :         if total_nodes_count <= 1 {
    8197            0 :             return Err(ApiError::PreconditionFailed(
    8198            0 :                 "No other nodes to fill from".into(),
    8199            0 :             ));
    8200            0 :         }
    8201              : 
    8202            0 :         match node_policy {
    8203              :             NodeSchedulingPolicy::Active => {
    8204            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling))
    8205            0 :                     .await?;
    8206              : 
    8207            0 :                 let cancel = self.cancel.child_token();
    8208            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    8209              : 
    8210            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    8211            0 :                     operation: Operation::Fill(Fill { node_id }),
    8212            0 :                     cancel: cancel.clone(),
    8213            0 :                 });
    8214              : 
    8215            0 :                 let span = tracing::info_span!(parent: None, "fill_node", %node_id);
    8216              : 
    8217            0 :                 tokio::task::spawn({
    8218            0 :                     let service = self.clone();
    8219            0 :                     let cancel = cancel.clone();
    8220            0 :                     async move {
    8221            0 :                         let _gate_guard = gate_guard;
    8222              : 
    8223            0 :                         scopeguard::defer! {
    8224              :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    8225              : 
    8226              :                             if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) {
    8227              :                                 assert_eq!(removed_fill.node_id, node_id, "We always take the same operation");
    8228              :                             } else {
    8229              :                                 panic!("We always remove the same operation")
    8230              :                             }
    8231              :                         }
    8232              : 
    8233            0 :                         tracing::info!("Fill background operation starting");
    8234            0 :                         let res = service.fill_node(node_id, cancel).await;
    8235            0 :                         match res {
    8236              :                             Ok(()) => {
    8237            0 :                                 tracing::info!("Fill background operation completed successfully");
    8238              :                             }
    8239              :                             Err(OperationError::Cancelled) => {
    8240            0 :                                 tracing::info!("Fill background operation was cancelled");
    8241              :                             }
    8242            0 :                             Err(err) => {
    8243            0 :                                 tracing::error!("Fill background operation encountered: {err}")
    8244              :                             }
    8245              :                         }
    8246            0 :                     }
    8247            0 :                 }.instrument(span));
    8248              :             }
    8249              :             NodeSchedulingPolicy::Filling => {
    8250            0 :                 return Err(ApiError::Conflict(format!(
    8251            0 :                     "Node {node_id} has fill in progress"
    8252            0 :                 )));
    8253              :             }
    8254            0 :             policy => {
    8255            0 :                 return Err(ApiError::PreconditionFailed(
    8256            0 :                     format!("Node {node_id} cannot be filled due to {policy:?} policy").into(),
    8257            0 :                 ));
    8258              :             }
    8259              :         }
    8260              : 
    8261            0 :         Ok(())
    8262            0 :     }
    8263              : 
    8264            0 :     pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> {
    8265            0 :         let node_available = {
    8266            0 :             let locked = self.inner.read().unwrap();
    8267            0 :             let nodes = &locked.nodes;
    8268            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8269            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8270            0 :             ))?;
    8271              : 
    8272            0 :             node.is_available()
    8273              :         };
    8274              : 
    8275            0 :         if !node_available {
    8276            0 :             return Err(ApiError::ResourceUnavailable(
    8277            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8278            0 :             ));
    8279            0 :         }
    8280              : 
    8281            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    8282            0 :             if let Operation::Fill(fill) = op_handler.operation {
    8283            0 :                 if fill.node_id == node_id {
    8284            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    8285            0 :                     op_handler.cancel.cancel();
    8286            0 :                     return Ok(());
    8287            0 :                 }
    8288            0 :             }
    8289            0 :         }
    8290              : 
    8291            0 :         Err(ApiError::PreconditionFailed(
    8292            0 :             format!("Node {node_id} has no fill in progress").into(),
    8293            0 :         ))
    8294            0 :     }
    8295              : 
    8296              :     /// Like [`Self::maybe_configured_reconcile_shard`], but uses the default reconciler
    8297              :     /// configuration
    8298            0 :     fn maybe_reconcile_shard(
    8299            0 :         &self,
    8300            0 :         shard: &mut TenantShard,
    8301            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    8302            0 :         priority: ReconcilerPriority,
    8303            0 :     ) -> Option<ReconcilerWaiter> {
    8304            0 :         self.maybe_configured_reconcile_shard(shard, nodes, ReconcilerConfig::new(priority))
    8305            0 :     }
    8306              : 
    8307              :     /// Before constructing a Reconciler, acquire semaphore units from the appropriate concurrency limit (depends on priority)
    8308            0 :     fn get_reconciler_units(
    8309            0 :         &self,
    8310            0 :         priority: ReconcilerPriority,
    8311            0 :     ) -> Result<ReconcileUnits, TryAcquireError> {
    8312            0 :         let units = match priority {
    8313            0 :             ReconcilerPriority::Normal => self.reconciler_concurrency.clone().try_acquire_owned(),
    8314              :             ReconcilerPriority::High => {
    8315            0 :                 match self
    8316            0 :                     .priority_reconciler_concurrency
    8317            0 :                     .clone()
    8318            0 :                     .try_acquire_owned()
    8319              :                 {
    8320            0 :                     Ok(u) => Ok(u),
    8321              :                     Err(TryAcquireError::NoPermits) => {
    8322              :                         // If the high priority semaphore is exhausted, then high priority tasks may steal units from
    8323              :                         // the normal priority semaphore.
    8324            0 :                         self.reconciler_concurrency.clone().try_acquire_owned()
    8325              :                     }
    8326            0 :                     Err(e) => Err(e),
    8327              :                 }
    8328              :             }
    8329              :         };
    8330              : 
    8331            0 :         units.map(ReconcileUnits::new)
    8332            0 :     }
    8333              : 
    8334              :     /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
    8335            0 :     fn maybe_configured_reconcile_shard(
    8336            0 :         &self,
    8337            0 :         shard: &mut TenantShard,
    8338            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    8339            0 :         reconciler_config: ReconcilerConfig,
    8340            0 :     ) -> Option<ReconcilerWaiter> {
    8341            0 :         let reconcile_needed = shard.get_reconcile_needed(nodes);
    8342              : 
    8343            0 :         let reconcile_reason = match reconcile_needed {
    8344            0 :             ReconcileNeeded::No => return None,
    8345            0 :             ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
    8346            0 :             ReconcileNeeded::Yes(reason) => {
    8347              :                 // Fall through to try and acquire units for spawning reconciler
    8348            0 :                 reason
    8349              :             }
    8350              :         };
    8351              : 
    8352            0 :         let units = match self.get_reconciler_units(reconciler_config.priority) {
    8353            0 :             Ok(u) => u,
    8354              :             Err(_) => {
    8355            0 :                 tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
    8356            0 :                     "Concurrency limited: enqueued for reconcile later");
    8357            0 :                 if !shard.delayed_reconcile {
    8358            0 :                     match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
    8359            0 :                         Err(TrySendError::Closed(_)) => {
    8360            0 :                             // Weird mid-shutdown case?
    8361            0 :                         }
    8362              :                         Err(TrySendError::Full(_)) => {
    8363              :                             // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
    8364            0 :                             tracing::warn!(
    8365            0 :                                 "Many shards are waiting to reconcile: delayed_reconcile queue is full"
    8366              :                             );
    8367              :                         }
    8368            0 :                         Ok(()) => {
    8369            0 :                             shard.delayed_reconcile = true;
    8370            0 :                         }
    8371              :                     }
    8372            0 :                 }
    8373              : 
    8374              :                 // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
    8375              :                 // number to advance.  When this function is eventually called again and succeeds in getting units,
    8376              :                 // it will spawn a reconciler that makes this waiter complete.
    8377            0 :                 return Some(shard.future_reconcile_waiter());
    8378              :             }
    8379              :         };
    8380              : 
    8381            0 :         let Ok(gate_guard) = self.reconcilers_gate.enter() else {
    8382              :             // Gate closed: we're shutting down, drop out.
    8383            0 :             return None;
    8384              :         };
    8385              : 
    8386            0 :         shard.spawn_reconciler(
    8387            0 :             reconcile_reason,
    8388            0 :             &self.result_tx,
    8389            0 :             nodes,
    8390            0 :             &self.compute_hook,
    8391            0 :             reconciler_config,
    8392            0 :             &self.config,
    8393            0 :             &self.persistence,
    8394            0 :             units,
    8395            0 :             gate_guard,
    8396            0 :             &self.reconcilers_cancel,
    8397            0 :             self.http_client.clone(),
    8398              :         )
    8399            0 :     }
    8400              : 
    8401              :     /// Check all tenants for pending reconciliation work, and reconcile those in need.
    8402              :     /// Additionally, reschedule tenants that require it.
    8403              :     ///
    8404              :     /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
    8405              :     /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
    8406              :     /// available.  A return value of 0 indicates that everything is fully reconciled already.
    8407            0 :     fn reconcile_all(&self) -> ReconcileAllResult {
    8408            0 :         let mut locked = self.inner.write().unwrap();
    8409            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    8410            0 :         let pageservers = nodes.clone();
    8411              : 
    8412              :         // This function is an efficient place to update lazy statistics, since we are walking
    8413              :         // all tenants.
    8414            0 :         let mut pending_reconciles = 0;
    8415            0 :         let mut keep_failing_reconciles = 0;
    8416            0 :         let mut az_violations = 0;
    8417              : 
    8418              :         // If we find any tenants to drop from memory, stash them to offload after
    8419              :         // we're done traversing the map of tenants.
    8420            0 :         let mut drop_detached_tenants = Vec::new();
    8421              : 
    8422            0 :         let mut spawned_reconciles = 0;
    8423            0 :         let mut has_delayed_reconciles = false;
    8424              : 
    8425            0 :         for shard in tenants.values_mut() {
    8426              :             // Accumulate scheduling statistics
    8427            0 :             if let (Some(attached), Some(preferred)) =
    8428            0 :                 (shard.intent.get_attached(), shard.preferred_az())
    8429              :             {
    8430            0 :                 let node_az = nodes
    8431            0 :                     .get(attached)
    8432            0 :                     .expect("Nodes exist if referenced")
    8433            0 :                     .get_availability_zone_id();
    8434            0 :                 if node_az != preferred {
    8435            0 :                     az_violations += 1;
    8436            0 :                 }
    8437            0 :             }
    8438              : 
    8439              :             // Skip checking if this shard is already enqueued for reconciliation
    8440            0 :             if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
    8441              :                 // If there is something delayed, then return a nonzero count so that
    8442              :                 // callers like reconcile_all_now do not incorrectly get the impression
    8443              :                 // that the system is in a quiescent state.
    8444            0 :                 has_delayed_reconciles = true;
    8445            0 :                 pending_reconciles += 1;
    8446            0 :                 continue;
    8447            0 :             }
    8448              : 
    8449              :             // Eventual consistency: if an earlier reconcile job failed, and the shard is still
    8450              :             // dirty, spawn another one
    8451            0 :             let consecutive_errors_count = shard.consecutive_errors_count;
    8452            0 :             if self
    8453            0 :                 .maybe_reconcile_shard(shard, &pageservers, ReconcilerPriority::Normal)
    8454            0 :                 .is_some()
    8455              :             {
    8456            0 :                 spawned_reconciles += 1;
    8457              : 
    8458              :                 // Count shards that are keep-failing. We still want to reconcile them
    8459              :                 // to avoid a situation where a shard is stuck.
    8460              :                 // But we don't want to consider them when deciding to run optimizations.
    8461            0 :                 if consecutive_errors_count >= MAX_CONSECUTIVE_RECONCILIATION_ERRORS {
    8462            0 :                     tracing::warn!(
    8463              :                         tenant_id=%shard.tenant_shard_id.tenant_id,
    8464            0 :                         shard_id=%shard.tenant_shard_id.shard_slug(),
    8465            0 :                         "Shard reconciliation is keep-failing: {} errors",
    8466              :                         consecutive_errors_count
    8467              :                     );
    8468            0 :                     keep_failing_reconciles += 1;
    8469            0 :                 }
    8470            0 :             } else if shard.delayed_reconcile {
    8471            0 :                 // Shard wanted to reconcile but for some reason couldn't.
    8472            0 :                 pending_reconciles += 1;
    8473            0 :             }
    8474              : 
    8475              :             // If this tenant is detached, try dropping it from memory. This is usually done
    8476              :             // proactively in [`Self::process_results`], but we do it here to handle the edge
    8477              :             // case where a reconcile completes while someone else is holding an op lock for the tenant.
    8478            0 :             if shard.tenant_shard_id.shard_number == ShardNumber(0)
    8479            0 :                 && shard.policy == PlacementPolicy::Detached
    8480              :             {
    8481            0 :                 if let Some(guard) = self.tenant_op_locks.try_exclusive(
    8482            0 :                     shard.tenant_shard_id.tenant_id,
    8483            0 :                     TenantOperations::DropDetached,
    8484            0 :                 ) {
    8485            0 :                     drop_detached_tenants.push((shard.tenant_shard_id.tenant_id, guard));
    8486            0 :                 }
    8487            0 :             }
    8488              :         }
    8489              : 
    8490              :         // Some metrics are calculated from SchedulerNode state, update these periodically
    8491            0 :         scheduler.update_metrics();
    8492              : 
    8493              :         // Process any deferred tenant drops
    8494            0 :         for (tenant_id, guard) in drop_detached_tenants {
    8495            0 :             self.maybe_drop_tenant(tenant_id, &mut locked, &guard);
    8496            0 :         }
    8497              : 
    8498            0 :         metrics::METRICS_REGISTRY
    8499            0 :             .metrics_group
    8500            0 :             .storage_controller_schedule_az_violation
    8501            0 :             .set(az_violations as i64);
    8502              : 
    8503            0 :         metrics::METRICS_REGISTRY
    8504            0 :             .metrics_group
    8505            0 :             .storage_controller_pending_reconciles
    8506            0 :             .set(pending_reconciles as i64);
    8507              : 
    8508            0 :         metrics::METRICS_REGISTRY
    8509            0 :             .metrics_group
    8510            0 :             .storage_controller_keep_failing_reconciles
    8511            0 :             .set(keep_failing_reconciles as i64);
    8512              : 
    8513            0 :         ReconcileAllResult::new(
    8514            0 :             spawned_reconciles,
    8515            0 :             keep_failing_reconciles,
    8516            0 :             has_delayed_reconciles,
    8517              :         )
    8518            0 :     }
    8519              : 
    8520              :     /// `optimize` in this context means identifying shards which have valid scheduled locations, but
    8521              :     /// could be scheduled somewhere better:
    8522              :     /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
    8523              :     ///    * e.g. after a node fails then recovers, to move some work back to it
    8524              :     /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
    8525              :     ///    * e.g. after a shard split, the initial attached locations will all be on the node where
    8526              :     ///      we did the split, but are probably better placed elsewhere.
    8527              :     /// - Creating new secondary locations if it improves the spreading of a sharded tenant
    8528              :     ///    * e.g. after a shard split, some locations will be on the same node (where the split
    8529              :     ///      happened), and will probably be better placed elsewhere.
    8530              :     ///
    8531              :     /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
    8532              :     /// the time of scheduling, this function looks for cases where a better-scoring location is available
    8533              :     /// according to those same soft constraints.
    8534            0 :     async fn optimize_all(&self) -> usize {
    8535              :         // Limit on how many shards' optmizations each call to this function will execute.  Combined
    8536              :         // with the frequency of background calls, this acts as an implicit rate limit that runs a small
    8537              :         // trickle of optimizations in the background, rather than executing a large number in parallel
    8538              :         // when a change occurs.
    8539              :         const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 16;
    8540              : 
    8541              :         // Synchronous prepare: scan shards for possible scheduling optimizations
    8542            0 :         let candidate_work = self.optimize_all_plan();
    8543            0 :         let candidate_work_len = candidate_work.len();
    8544              : 
    8545              :         // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
    8546            0 :         let validated_work = self.optimize_all_validate(candidate_work).await;
    8547              : 
    8548            0 :         let was_work_filtered = validated_work.len() != candidate_work_len;
    8549              : 
    8550              :         // Synchronous apply: update the shards' intent states according to validated optimisations
    8551            0 :         let mut reconciles_spawned = 0;
    8552            0 :         let mut optimizations_applied = 0;
    8553            0 :         let mut locked = self.inner.write().unwrap();
    8554            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    8555            0 :         for (tenant_shard_id, optimization) in validated_work {
    8556            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    8557              :                 // Shard was dropped between planning and execution;
    8558            0 :                 continue;
    8559              :             };
    8560            0 :             tracing::info!(tenant_shard_id=%tenant_shard_id, "Applying optimization: {optimization:?}");
    8561            0 :             if shard.apply_optimization(scheduler, optimization) {
    8562            0 :                 optimizations_applied += 1;
    8563            0 :                 if self
    8564            0 :                     .maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal)
    8565            0 :                     .is_some()
    8566            0 :                 {
    8567            0 :                     reconciles_spawned += 1;
    8568            0 :                 }
    8569            0 :             }
    8570              : 
    8571            0 :             if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
    8572            0 :                 break;
    8573            0 :             }
    8574              :         }
    8575              : 
    8576            0 :         if was_work_filtered {
    8577            0 :             // If we filtered any work out during validation, ensure we return a nonzero value to indicate
    8578            0 :             // to callers that the system is not in a truly quiet state, it's going to do some work as soon
    8579            0 :             // as these validations start passing.
    8580            0 :             reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
    8581            0 :         }
    8582              : 
    8583            0 :         reconciles_spawned
    8584            0 :     }
    8585              : 
    8586            0 :     fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
    8587              :         // How many candidate optimizations we will generate, before evaluating them for readniess: setting
    8588              :         // this higher than the execution limit gives us a chance to execute some work even if the first
    8589              :         // few optimizations we find are not ready.
    8590              :         const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 64;
    8591              : 
    8592            0 :         let mut work = Vec::new();
    8593            0 :         let mut locked = self.inner.write().unwrap();
    8594            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    8595              : 
    8596              :         // We are going to plan a bunch of optimisations before applying any of them, so the
    8597              :         // utilisation stats on nodes will be effectively stale for the >1st optimisation we
    8598              :         // generate.  To avoid this causing unstable migrations/flapping, it's important that the
    8599              :         // code in TenantShard for finding optimisations uses [`NodeAttachmentSchedulingScore::disregard_utilization`]
    8600              :         // to ignore the utilisation component of the score.
    8601              : 
    8602            0 :         for (_tenant_id, schedule_context, shards) in
    8603            0 :             TenantShardExclusiveIterator::new(tenants, ScheduleMode::Speculative)
    8604              :         {
    8605            0 :             for shard in shards {
    8606            0 :                 if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
    8607            0 :                     break;
    8608            0 :                 }
    8609            0 :                 match shard.get_scheduling_policy() {
    8610            0 :                     ShardSchedulingPolicy::Active => {
    8611            0 :                         // Ok to do optimization
    8612            0 :                     }
    8613            0 :                     ShardSchedulingPolicy::Essential if shard.get_preferred_node().is_some() => {
    8614            0 :                         // Ok to do optimization: we are executing a graceful migration that
    8615            0 :                         // has set preferred_node
    8616            0 :                     }
    8617              :                     ShardSchedulingPolicy::Essential
    8618              :                     | ShardSchedulingPolicy::Pause
    8619              :                     | ShardSchedulingPolicy::Stop => {
    8620              :                         // Policy prevents optimizing this shard.
    8621            0 :                         continue;
    8622              :                     }
    8623              :                 }
    8624              : 
    8625            0 :                 if !matches!(shard.splitting, SplitState::Idle)
    8626            0 :                     || matches!(shard.policy, PlacementPolicy::Detached)
    8627            0 :                     || shard.reconciler.is_some()
    8628              :                 {
    8629              :                     // Do not start any optimizations while another change to the tenant is ongoing: this
    8630              :                     // is not necessary for correctness, but simplifies operations and implicitly throttles
    8631              :                     // optimization changes to happen in a "trickle" over time.
    8632            0 :                     continue;
    8633            0 :                 }
    8634              : 
    8635              :                 // Fast path: we may quickly identify shards that don't have any possible optimisations
    8636            0 :                 if !shard.maybe_optimizable(scheduler, &schedule_context) {
    8637            0 :                     if cfg!(feature = "testing") {
    8638              :                         // Check that maybe_optimizable doesn't disagree with the actual optimization functions.
    8639              :                         // Only do this in testing builds because it is not a correctness-critical check, so we shouldn't
    8640              :                         // panic in prod if we hit this, or spend cycles on it in prod.
    8641            0 :                         assert!(
    8642            0 :                             shard
    8643            0 :                                 .optimize_attachment(scheduler, &schedule_context)
    8644            0 :                                 .is_none()
    8645              :                         );
    8646            0 :                         assert!(
    8647            0 :                             shard
    8648            0 :                                 .optimize_secondary(scheduler, &schedule_context)
    8649            0 :                                 .is_none()
    8650              :                         );
    8651            0 :                     }
    8652            0 :                     continue;
    8653            0 :                 }
    8654              : 
    8655            0 :                 if let Some(optimization) =
    8656              :                     // If idle, maybe optimize attachments: if a shard has a secondary location that is preferable to
    8657              :                     // its primary location based on soft constraints, cut it over.
    8658            0 :                     shard.optimize_attachment(scheduler, &schedule_context)
    8659              :                 {
    8660            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for attachment: {optimization:?}");
    8661            0 :                     work.push((shard.tenant_shard_id, optimization));
    8662            0 :                     break;
    8663            0 :                 } else if let Some(optimization) =
    8664              :                     // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
    8665              :                     // better placed on another node, based on ScheduleContext, then adjust it.  This
    8666              :                     // covers cases like after a shard split, where we might have too many shards
    8667              :                     // in the same tenant with secondary locations on the node where they originally split.
    8668            0 :                     shard.optimize_secondary(scheduler, &schedule_context)
    8669              :                 {
    8670            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for secondary: {optimization:?}");
    8671            0 :                     work.push((shard.tenant_shard_id, optimization));
    8672            0 :                     break;
    8673            0 :                 }
    8674              :             }
    8675              :         }
    8676              : 
    8677            0 :         work
    8678            0 :     }
    8679              : 
    8680            0 :     async fn optimize_all_validate(
    8681            0 :         &self,
    8682            0 :         candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
    8683            0 :     ) -> Vec<(TenantShardId, ScheduleOptimization)> {
    8684              :         // Take a clone of the node map to use outside the lock in async validation phase
    8685            0 :         let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
    8686              : 
    8687            0 :         let mut want_secondary_status = Vec::new();
    8688              : 
    8689              :         // Validate our plans: this is an async phase where we may do I/O to pageservers to
    8690              :         // check that the state of locations is acceptable to run the optimization, such as
    8691              :         // checking that a secondary location is sufficiently warmed-up to cleanly cut over
    8692              :         // in a live migration.
    8693            0 :         let mut validated_work = Vec::new();
    8694            0 :         for (tenant_shard_id, optimization) in candidate_work {
    8695            0 :             match optimization.action {
    8696              :                 ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
    8697              :                     old_attached_node_id: _,
    8698            0 :                     new_attached_node_id,
    8699              :                 }) => {
    8700            0 :                     match validation_nodes.get(&new_attached_node_id) {
    8701            0 :                         None => {
    8702            0 :                             // Node was dropped between planning and validation
    8703            0 :                         }
    8704            0 :                         Some(node) => {
    8705            0 :                             if !node.is_available() {
    8706            0 :                                 tracing::info!(
    8707            0 :                                     "Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable"
    8708              :                                 );
    8709            0 :                             } else {
    8710            0 :                                 // Accumulate optimizations that require fetching secondary status, so that we can execute these
    8711            0 :                                 // remote API requests concurrently.
    8712            0 :                                 want_secondary_status.push((
    8713            0 :                                     tenant_shard_id,
    8714            0 :                                     node.clone(),
    8715            0 :                                     optimization,
    8716            0 :                                 ));
    8717            0 :                             }
    8718              :                         }
    8719              :                     }
    8720              :                 }
    8721              :                 ScheduleOptimizationAction::ReplaceSecondary(_)
    8722              :                 | ScheduleOptimizationAction::CreateSecondary(_)
    8723              :                 | ScheduleOptimizationAction::RemoveSecondary(_) => {
    8724              :                     // No extra checks needed to manage secondaries: this does not interrupt client access
    8725            0 :                     validated_work.push((tenant_shard_id, optimization))
    8726              :                 }
    8727              :             };
    8728              :         }
    8729              : 
    8730              :         // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
    8731              :         // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
    8732              :         // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
    8733            0 :         let results = self
    8734            0 :             .tenant_for_shards_api(
    8735            0 :                 want_secondary_status
    8736            0 :                     .iter()
    8737            0 :                     .map(|i| (i.0, i.1.clone()))
    8738            0 :                     .collect(),
    8739            0 :                 |tenant_shard_id, client| async move {
    8740            0 :                     client.tenant_secondary_status(tenant_shard_id).await
    8741            0 :                 },
    8742              :                 1,
    8743              :                 1,
    8744              :                 SHORT_RECONCILE_TIMEOUT,
    8745            0 :                 &self.cancel,
    8746              :             )
    8747            0 :             .await;
    8748              : 
    8749            0 :         for ((tenant_shard_id, node, optimization), secondary_status) in
    8750            0 :             want_secondary_status.into_iter().zip(results.into_iter())
    8751              :         {
    8752            0 :             match secondary_status {
    8753            0 :                 Err(e) => {
    8754            0 :                     tracing::info!(
    8755            0 :                         "Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}"
    8756              :                     );
    8757              :                 }
    8758            0 :                 Ok(progress) => {
    8759              :                     // We require secondary locations to have less than 10GiB of downloads pending before we will use
    8760              :                     // them in an optimization
    8761              :                     const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
    8762              : 
    8763            0 :                     if progress.heatmap_mtime.is_none()
    8764            0 :                         || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
    8765            0 :                             && progress.bytes_downloaded != progress.bytes_total
    8766            0 :                         || progress.bytes_total - progress.bytes_downloaded
    8767            0 :                             > DOWNLOAD_FRESHNESS_THRESHOLD
    8768              :                     {
    8769            0 :                         tracing::info!(
    8770            0 :                             "Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}"
    8771              :                         );
    8772              : 
    8773            0 :                         if progress.heatmap_mtime.is_none() {
    8774              :                             // No heatmap might mean the attached location has never uploaded one, or that
    8775              :                             // the secondary download hasn't happened yet.  This is relatively unusual in the field,
    8776              :                             // but fairly common in tests.
    8777            0 :                             self.kick_secondary_download(tenant_shard_id).await;
    8778            0 :                         }
    8779              :                     } else {
    8780              :                         // Location looks ready: proceed
    8781            0 :                         tracing::info!(
    8782            0 :                             "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
    8783              :                         );
    8784            0 :                         validated_work.push((tenant_shard_id, optimization))
    8785              :                     }
    8786              :                 }
    8787              :             }
    8788              :         }
    8789              : 
    8790            0 :         validated_work
    8791            0 :     }
    8792              : 
    8793              :     /// Some aspects of scheduling optimisation wait for secondary locations to be warm.  This
    8794              :     /// happens on multi-minute timescales in the field, which is fine because optimisation is meant
    8795              :     /// to be a lazy background thing. However, when testing, it is not practical to wait around, so
    8796              :     /// we have this helper to move things along faster.
    8797            0 :     async fn kick_secondary_download(&self, tenant_shard_id: TenantShardId) {
    8798            0 :         if !self.config.kick_secondary_downloads {
    8799              :             // No-op if kick_secondary_downloads functionaliuty is not configured
    8800            0 :             return;
    8801            0 :         }
    8802              : 
    8803            0 :         let (attached_node, secondaries) = {
    8804            0 :             let locked = self.inner.read().unwrap();
    8805            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    8806            0 :                 tracing::warn!(
    8807            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: not found"
    8808              :                 );
    8809            0 :                 return;
    8810              :             };
    8811              : 
    8812            0 :             let Some(attached) = shard.intent.get_attached() else {
    8813            0 :                 tracing::warn!(
    8814            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: no attached"
    8815              :                 );
    8816            0 :                 return;
    8817              :             };
    8818              : 
    8819            0 :             let secondaries = shard
    8820            0 :                 .intent
    8821            0 :                 .get_secondary()
    8822            0 :                 .iter()
    8823            0 :                 .map(|n| locked.nodes.get(n).unwrap().clone())
    8824            0 :                 .collect::<Vec<_>>();
    8825              : 
    8826            0 :             (locked.nodes.get(attached).unwrap().clone(), secondaries)
    8827              :         };
    8828              : 
    8829              :         // Make remote API calls to upload + download heatmaps: we ignore errors because this is just
    8830              :         // a 'kick' to let scheduling optimisation run more promptly.
    8831            0 :         match attached_node
    8832            0 :             .with_client_retries(
    8833            0 :                 |client| async move { client.tenant_heatmap_upload(tenant_shard_id).await },
    8834            0 :                 &self.http_client,
    8835            0 :                 &self.config.pageserver_jwt_token,
    8836              :                 3,
    8837              :                 10,
    8838              :                 SHORT_RECONCILE_TIMEOUT,
    8839            0 :                 &self.cancel,
    8840              :             )
    8841            0 :             .await
    8842              :         {
    8843            0 :             Some(Err(e)) => {
    8844            0 :                 tracing::info!(
    8845            0 :                     "Failed to upload heatmap from {attached_node} for {tenant_shard_id}: {e}"
    8846              :                 );
    8847              :             }
    8848              :             None => {
    8849            0 :                 tracing::info!(
    8850            0 :                     "Cancelled while uploading heatmap from {attached_node} for {tenant_shard_id}"
    8851              :                 );
    8852              :             }
    8853              :             Some(Ok(_)) => {
    8854            0 :                 tracing::info!(
    8855            0 :                     "Successfully uploaded heatmap from {attached_node} for {tenant_shard_id}"
    8856              :                 );
    8857              :             }
    8858              :         }
    8859              : 
    8860            0 :         for secondary_node in secondaries {
    8861            0 :             match secondary_node
    8862            0 :                 .with_client_retries(
    8863            0 :                     |client| async move {
    8864            0 :                         client
    8865            0 :                             .tenant_secondary_download(
    8866            0 :                                 tenant_shard_id,
    8867            0 :                                 Some(Duration::from_secs(1)),
    8868            0 :                             )
    8869            0 :                             .await
    8870            0 :                     },
    8871            0 :                     &self.http_client,
    8872            0 :                     &self.config.pageserver_jwt_token,
    8873              :                     3,
    8874              :                     10,
    8875              :                     SHORT_RECONCILE_TIMEOUT,
    8876            0 :                     &self.cancel,
    8877              :                 )
    8878            0 :                 .await
    8879              :             {
    8880            0 :                 Some(Err(e)) => {
    8881            0 :                     tracing::info!(
    8882            0 :                         "Failed to download heatmap from {secondary_node} for {tenant_shard_id}: {e}"
    8883              :                     );
    8884              :                 }
    8885              :                 None => {
    8886            0 :                     tracing::info!(
    8887            0 :                         "Cancelled while downloading heatmap from {secondary_node} for {tenant_shard_id}"
    8888              :                     );
    8889              :                 }
    8890            0 :                 Some(Ok(progress)) => {
    8891            0 :                     tracing::info!(
    8892            0 :                         "Successfully downloaded heatmap from {secondary_node} for {tenant_shard_id}: {progress:?}"
    8893              :                     );
    8894              :                 }
    8895              :             }
    8896              :         }
    8897            0 :     }
    8898              : 
    8899              :     /// Asynchronously split a tenant that's eligible for automatic splits. At most one tenant will
    8900              :     /// be split per call.
    8901              :     ///
    8902              :     /// Two sets of criteria are used: initial splits and size-based splits (in that order).
    8903              :     /// Initial splits are used to eagerly split unsharded tenants that may be performing initial
    8904              :     /// ingestion, since sharded tenants have significantly better ingestion throughput. Size-based
    8905              :     /// splits are used to bound the maximum shard size and balance out load.
    8906              :     ///
    8907              :     /// Splits are based on max_logical_size, i.e. the logical size of the largest timeline in a
    8908              :     /// tenant. We use this instead of the total logical size because branches will duplicate
    8909              :     /// logical size without actually using more storage. We could also use visible physical size,
    8910              :     /// but this might overestimate tenants that frequently churn branches.
    8911              :     ///
    8912              :     /// Initial splits (initial_split_threshold):
    8913              :     /// * Applies to tenants with 1 shard.
    8914              :     /// * The largest timeline (max_logical_size) exceeds initial_split_threshold.
    8915              :     /// * Splits into initial_split_shards.
    8916              :     ///
    8917              :     /// Size-based splits (split_threshold):
    8918              :     /// * Applies to all tenants.
    8919              :     /// * The largest timeline (max_logical_size) divided by shard count exceeds split_threshold.
    8920              :     /// * Splits such that max_logical_size / shard_count <= split_threshold, in powers of 2.
    8921              :     ///
    8922              :     /// Tenant shards are ordered by descending max_logical_size, first initial split candidates
    8923              :     /// then size-based split candidates. The first matching candidate is split.
    8924              :     ///
    8925              :     /// The shard count is clamped to max_split_shards. If a candidate is eligible for both initial
    8926              :     /// and size-based splits, the largest shard count will be used.
    8927              :     ///
    8928              :     /// An unsharded tenant will get DEFAULT_STRIPE_SIZE, regardless of what its ShardIdentity says.
    8929              :     /// A sharded tenant will retain its stripe size, as splits do not allow changing it.
    8930              :     ///
    8931              :     /// TODO: consider spawning multiple splits in parallel: this is only called once every 20
    8932              :     /// seconds, so a large backlog can take a long time, and if a tenant fails to split it will
    8933              :     /// block all other splits.
    8934            0 :     async fn autosplit_tenants(self: &Arc<Self>) {
    8935              :         // If max_split_shards is set to 0 or 1, we can't split.
    8936            0 :         let max_split_shards = self.config.max_split_shards;
    8937            0 :         if max_split_shards <= 1 {
    8938            0 :             return;
    8939            0 :         }
    8940              : 
    8941              :         // If initial_split_shards is set to 0 or 1, disable initial splits.
    8942            0 :         let mut initial_split_threshold = self.config.initial_split_threshold.unwrap_or(0);
    8943            0 :         let initial_split_shards = self.config.initial_split_shards;
    8944            0 :         if initial_split_shards <= 1 {
    8945            0 :             initial_split_threshold = 0;
    8946            0 :         }
    8947              : 
    8948              :         // If no split_threshold nor initial_split_threshold, disable autosplits.
    8949            0 :         let split_threshold = self.config.split_threshold.unwrap_or(0);
    8950            0 :         if split_threshold == 0 && initial_split_threshold == 0 {
    8951            0 :             return;
    8952            0 :         }
    8953              : 
    8954              :         // Fetch split candidates in prioritized order.
    8955              :         //
    8956              :         // If initial splits are enabled, fetch eligible tenants first. We prioritize initial splits
    8957              :         // over size-based splits, since these are often performing initial ingestion and rely on
    8958              :         // splits to improve ingest throughput.
    8959            0 :         let mut candidates = Vec::new();
    8960              : 
    8961            0 :         if initial_split_threshold > 0 {
    8962              :             // Initial splits: fetch tenants with 1 shard where the logical size of the largest
    8963              :             // timeline exceeds the initial split threshold.
    8964            0 :             let initial_candidates = self
    8965            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    8966            0 :                     order_by: TenantSorting::MaxLogicalSize,
    8967            0 :                     limit: 10,
    8968            0 :                     where_shards_lt: Some(ShardCount(2)),
    8969            0 :                     where_gt: Some(initial_split_threshold),
    8970            0 :                 })
    8971            0 :                 .await;
    8972            0 :             candidates.extend(initial_candidates);
    8973            0 :         }
    8974              : 
    8975            0 :         if split_threshold > 0 {
    8976              :             // Size-based splits: fetch tenants where the logical size of the largest timeline
    8977              :             // divided by shard count exceeds the split threshold.
    8978              :             //
    8979              :             // max_logical_size is only tracked on shard 0, and contains the total logical size
    8980              :             // across all shards. We have to order and filter by MaxLogicalSizePerShard, i.e.
    8981              :             // max_logical_size / shard_count, such that we only receive tenants that are actually
    8982              :             // eligible for splits. But we still use max_logical_size for later split calculations.
    8983            0 :             let size_candidates = self
    8984            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    8985            0 :                     order_by: TenantSorting::MaxLogicalSizePerShard,
    8986            0 :                     limit: 10,
    8987            0 :                     where_shards_lt: Some(ShardCount(max_split_shards)),
    8988            0 :                     where_gt: Some(split_threshold),
    8989            0 :                 })
    8990            0 :                 .await;
    8991              :             #[cfg(feature = "testing")]
    8992            0 :             assert!(
    8993            0 :                 size_candidates.iter().all(|c| c.id.is_shard_zero()),
    8994            0 :                 "MaxLogicalSizePerShard returned non-zero shard: {size_candidates:?}",
    8995              :             );
    8996            0 :             candidates.extend(size_candidates);
    8997            0 :         }
    8998              : 
    8999              :         // Filter out tenants in a prohibiting scheduling modes
    9000              :         // and tenants with an ongoing import.
    9001              :         //
    9002              :         // Note that the import check here is oportunistic. An import might start
    9003              :         // after the check before we actually update [`TenantShard::splitting`].
    9004              :         // [`Self::tenant_shard_split`] checks the database whilst holding the exclusive
    9005              :         // tenant lock. Imports might take a long time, so the check here allows us
    9006              :         // to split something else instead of trying the same shard over and over.
    9007              :         {
    9008            0 :             let state = self.inner.read().unwrap();
    9009            0 :             candidates.retain(|i| {
    9010            0 :                 let shard = state.tenants.get(&i.id);
    9011            0 :                 match shard {
    9012            0 :                     Some(t) => {
    9013            0 :                         t.get_scheduling_policy() == ShardSchedulingPolicy::Active
    9014            0 :                             && t.importing == TimelineImportState::Idle
    9015              :                     }
    9016            0 :                     None => false,
    9017              :                 }
    9018            0 :             });
    9019              :         }
    9020              : 
    9021              :         // Pick the first candidate to split. This will generally always be the first one in
    9022              :         // candidates, but we defensively skip candidates that end up not actually splitting.
    9023            0 :         let Some((candidate, new_shard_count)) = candidates
    9024            0 :             .into_iter()
    9025            0 :             .filter_map(|candidate| {
    9026            0 :                 let new_shard_count = Self::compute_split_shards(ShardSplitInputs {
    9027            0 :                     shard_count: candidate.id.shard_count,
    9028            0 :                     max_logical_size: candidate.max_logical_size,
    9029            0 :                     split_threshold,
    9030            0 :                     max_split_shards,
    9031            0 :                     initial_split_threshold,
    9032            0 :                     initial_split_shards,
    9033            0 :                 });
    9034            0 :                 new_shard_count.map(|shards| (candidate, shards.count()))
    9035            0 :             })
    9036            0 :             .next()
    9037              :         else {
    9038            0 :             debug!("no split-eligible tenants found");
    9039            0 :             return;
    9040              :         };
    9041              : 
    9042              :         // Retain the stripe size of sharded tenants, as splits don't allow changing it. Otherwise,
    9043              :         // use DEFAULT_STRIPE_SIZE for unsharded tenants -- their stripe size doesn't really matter,
    9044              :         // and if we change the default stripe size we want to use the new default rather than an
    9045              :         // old, persisted stripe size.
    9046            0 :         let new_stripe_size = match candidate.id.shard_count.count() {
    9047            0 :             0 => panic!("invalid shard count 0"),
    9048            0 :             1 => Some(DEFAULT_STRIPE_SIZE),
    9049            0 :             2.. => None,
    9050              :         };
    9051              : 
    9052              :         // We spawn a task to run this, so it's exactly like some external API client requesting
    9053              :         // it.  We don't want to block the background reconcile loop on this.
    9054            0 :         let old_shard_count = candidate.id.shard_count.count();
    9055            0 :         info!(
    9056            0 :             "auto-splitting tenant {old_shard_count} → {new_shard_count} shards, \
    9057            0 :                 current size {candidate:?} (split_threshold={split_threshold} \
    9058            0 :                 initial_split_threshold={initial_split_threshold})"
    9059              :         );
    9060              : 
    9061            0 :         let this = self.clone();
    9062            0 :         tokio::spawn(
    9063            0 :             async move {
    9064            0 :                 match this
    9065            0 :                     .tenant_shard_split(
    9066            0 :                         candidate.id.tenant_id,
    9067            0 :                         TenantShardSplitRequest {
    9068            0 :                             new_shard_count,
    9069            0 :                             new_stripe_size,
    9070            0 :                         },
    9071            0 :                     )
    9072            0 :                     .await
    9073              :                 {
    9074              :                     Ok(_) => {
    9075            0 :                         info!("successful auto-split {old_shard_count} → {new_shard_count} shards")
    9076              :                     }
    9077            0 :                     Err(err) => error!("auto-split failed: {err}"),
    9078              :                 }
    9079            0 :             }
    9080            0 :             .instrument(info_span!("auto_split", tenant_id=%candidate.id.tenant_id)),
    9081              :         );
    9082            0 :     }
    9083              : 
    9084              :     /// Returns the number of shards to split a tenant into, or None if the tenant shouldn't split,
    9085              :     /// based on the total logical size of the largest timeline summed across all shards. Uses the
    9086              :     /// larger of size-based and initial splits, clamped to max_split_shards.
    9087              :     ///
    9088              :     /// NB: the thresholds are exclusive, since TopTenantShardsRequest uses where_gt.
    9089           25 :     fn compute_split_shards(inputs: ShardSplitInputs) -> Option<ShardCount> {
    9090              :         let ShardSplitInputs {
    9091           25 :             shard_count,
    9092           25 :             max_logical_size,
    9093           25 :             split_threshold,
    9094           25 :             max_split_shards,
    9095           25 :             initial_split_threshold,
    9096           25 :             initial_split_shards,
    9097           25 :         } = inputs;
    9098              : 
    9099           25 :         let mut new_shard_count: u8 = shard_count.count();
    9100              : 
    9101              :         // Size-based splits. Ensures max_logical_size / new_shard_count <= split_threshold, using
    9102              :         // power-of-two shard counts.
    9103              :         //
    9104              :         // If the current shard count is not a power of two, and does not exceed split_threshold,
    9105              :         // then we leave it alone rather than forcing a power-of-two split.
    9106           25 :         if split_threshold > 0
    9107           18 :             && max_logical_size.div_ceil(split_threshold) > shard_count.count() as u64
    9108           12 :         {
    9109           12 :             new_shard_count = max_logical_size
    9110           12 :                 .div_ceil(split_threshold)
    9111           12 :                 .checked_next_power_of_two()
    9112           12 :                 .unwrap_or(u8::MAX as u64)
    9113           12 :                 .try_into()
    9114           12 :                 .unwrap_or(u8::MAX);
    9115           13 :         }
    9116              : 
    9117              :         // Initial splits. Use the larger of size-based and initial split shard counts. This only
    9118              :         // applies to unsharded tenants, i.e. changes to initial_split_threshold or
    9119              :         // initial_split_shards are not retroactive for sharded tenants.
    9120           25 :         if initial_split_threshold > 0
    9121           14 :             && shard_count.count() <= 1
    9122           11 :             && max_logical_size > initial_split_threshold
    9123            8 :         {
    9124            8 :             new_shard_count = new_shard_count.max(initial_split_shards);
    9125           17 :         }
    9126              : 
    9127              :         // Clamp to max shards.
    9128           25 :         new_shard_count = new_shard_count.min(max_split_shards);
    9129              : 
    9130              :         // Don't split if we're not increasing the shard count.
    9131           25 :         if new_shard_count <= shard_count.count() {
    9132           10 :             return None;
    9133           15 :         }
    9134              : 
    9135           15 :         Some(ShardCount(new_shard_count))
    9136           25 :     }
    9137              : 
    9138              :     /// Fetches the top tenant shards from every available node, in descending order of
    9139              :     /// max logical size. Offline nodes are skipped, and any errors from available nodes
    9140              :     /// will be logged and ignored.
    9141            0 :     async fn get_top_tenant_shards(
    9142            0 :         &self,
    9143            0 :         request: &TopTenantShardsRequest,
    9144            0 :     ) -> Vec<TopTenantShardItem> {
    9145            0 :         let nodes = self
    9146            0 :             .inner
    9147            0 :             .read()
    9148            0 :             .unwrap()
    9149            0 :             .nodes
    9150            0 :             .values()
    9151            0 :             .filter(|node| node.is_available())
    9152            0 :             .cloned()
    9153            0 :             .collect_vec();
    9154              : 
    9155            0 :         let mut futures = FuturesUnordered::new();
    9156            0 :         for node in nodes {
    9157            0 :             futures.push(async move {
    9158            0 :                 node.with_client_retries(
    9159            0 :                     |client| async move { client.top_tenant_shards(request.clone()).await },
    9160            0 :                     &self.http_client,
    9161            0 :                     &self.config.pageserver_jwt_token,
    9162              :                     3,
    9163              :                     3,
    9164            0 :                     Duration::from_secs(5),
    9165            0 :                     &self.cancel,
    9166              :                 )
    9167            0 :                 .await
    9168            0 :             });
    9169              :         }
    9170              : 
    9171            0 :         let mut top = Vec::new();
    9172            0 :         while let Some(output) = futures.next().await {
    9173            0 :             match output {
    9174            0 :                 Some(Ok(response)) => top.extend(response.shards),
    9175            0 :                 Some(Err(mgmt_api::Error::Cancelled)) => {}
    9176            0 :                 Some(Err(err)) => warn!("failed to fetch top tenants: {err}"),
    9177            0 :                 None => {} // node is shutting down
    9178              :             }
    9179              :         }
    9180              : 
    9181            0 :         top.sort_by_key(|i| i.max_logical_size);
    9182            0 :         top.reverse();
    9183            0 :         top
    9184            0 :     }
    9185              : 
    9186              :     /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
    9187              :     /// also wait for any generated Reconcilers to complete.  Calling this until it returns zero should
    9188              :     /// put the system into a quiescent state where future background reconciliations won't do anything.
    9189            0 :     pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
    9190            0 :         let reconcile_all_result = self.reconcile_all();
    9191            0 :         let mut spawned_reconciles = reconcile_all_result.spawned_reconciles;
    9192            0 :         if reconcile_all_result.can_run_optimizations() {
    9193              :             // Only optimize when we are otherwise idle
    9194            0 :             let optimization_reconciles = self.optimize_all().await;
    9195            0 :             spawned_reconciles += optimization_reconciles;
    9196            0 :         }
    9197              : 
    9198            0 :         let waiters = {
    9199            0 :             let mut waiters = Vec::new();
    9200            0 :             let locked = self.inner.read().unwrap();
    9201            0 :             for (_tenant_shard_id, shard) in locked.tenants.iter() {
    9202            0 :                 if let Some(waiter) = shard.get_waiter() {
    9203            0 :                     waiters.push(waiter);
    9204            0 :                 }
    9205              :             }
    9206            0 :             waiters
    9207              :         };
    9208              : 
    9209            0 :         let waiter_count = waiters.len();
    9210            0 :         match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    9211            0 :             Ok(()) => {}
    9212            0 :             Err(e) => {
    9213            0 :                 if let ReconcileWaitError::Failed(_, reconcile_error) = &e {
    9214            0 :                     match **reconcile_error {
    9215              :                         ReconcileError::Cancel
    9216            0 :                         | ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
    9217            0 :                             // Ignore reconciler cancel errors: this reconciler might have shut down
    9218            0 :                             // because some other change superceded it.  We will return a nonzero number,
    9219            0 :                             // so the caller knows they might have to call again to quiesce the system.
    9220            0 :                         }
    9221              :                         _ => {
    9222            0 :                             return Err(e);
    9223              :                         }
    9224              :                     }
    9225              :                 } else {
    9226            0 :                     return Err(e);
    9227              :                 }
    9228              :             }
    9229              :         };
    9230              : 
    9231            0 :         tracing::info!(
    9232            0 :             "{} reconciles in reconcile_all, {} waiters",
    9233              :             spawned_reconciles,
    9234              :             waiter_count
    9235              :         );
    9236              : 
    9237            0 :         Ok(std::cmp::max(waiter_count, spawned_reconciles))
    9238            0 :     }
    9239              : 
    9240            0 :     async fn stop_reconciliations(&self, reason: StopReconciliationsReason) {
    9241              :         // Cancel all on-going reconciles and wait for them to exit the gate.
    9242            0 :         tracing::info!("{reason}: cancelling and waiting for in-flight reconciles");
    9243            0 :         self.reconcilers_cancel.cancel();
    9244            0 :         self.reconcilers_gate.close().await;
    9245              : 
    9246              :         // Signal the background loop in [`Service::process_results`] to exit once
    9247              :         // it has proccessed the results from all the reconciles we cancelled earlier.
    9248            0 :         tracing::info!("{reason}: processing results from previously in-flight reconciles");
    9249            0 :         self.result_tx.send(ReconcileResultRequest::Stop).ok();
    9250            0 :         self.result_tx.closed().await;
    9251            0 :     }
    9252              : 
    9253            0 :     pub async fn shutdown(&self) {
    9254            0 :         self.stop_reconciliations(StopReconciliationsReason::ShuttingDown)
    9255            0 :             .await;
    9256              : 
    9257              :         // Background tasks hold gate guards: this notifies them of the cancellation and
    9258              :         // waits for them all to complete.
    9259            0 :         tracing::info!("Shutting down: cancelling and waiting for background tasks to exit");
    9260            0 :         self.cancel.cancel();
    9261            0 :         self.gate.close().await;
    9262            0 :     }
    9263              : 
    9264              :     /// Spot check the download lag for a secondary location of a shard.
    9265              :     /// Should be used as a heuristic, since it's not always precise: the
    9266              :     /// secondary might have not downloaded the new heat map yet and, hence,
    9267              :     /// is not aware of the lag.
    9268              :     ///
    9269              :     /// Returns:
    9270              :     /// * Ok(None) if the lag could not be determined from the status,
    9271              :     /// * Ok(Some(_)) if the lag could be determind
    9272              :     /// * Err on failures to query the pageserver.
    9273            0 :     async fn secondary_lag(
    9274            0 :         &self,
    9275            0 :         secondary: &NodeId,
    9276            0 :         tenant_shard_id: TenantShardId,
    9277            0 :     ) -> Result<Option<u64>, mgmt_api::Error> {
    9278            0 :         let nodes = self.inner.read().unwrap().nodes.clone();
    9279            0 :         let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError(
    9280            0 :             StatusCode::NOT_FOUND,
    9281            0 :             format!("Node with id {secondary} not found"),
    9282            0 :         ))?;
    9283              : 
    9284            0 :         match node
    9285            0 :             .with_client_retries(
    9286            0 :                 |client| async move { client.tenant_secondary_status(tenant_shard_id).await },
    9287            0 :                 &self.http_client,
    9288            0 :                 &self.config.pageserver_jwt_token,
    9289              :                 1,
    9290              :                 3,
    9291            0 :                 Duration::from_millis(250),
    9292            0 :                 &self.cancel,
    9293              :             )
    9294            0 :             .await
    9295              :         {
    9296            0 :             Some(Ok(status)) => match status.heatmap_mtime {
    9297            0 :                 Some(_) => Ok(Some(status.bytes_total - status.bytes_downloaded)),
    9298            0 :                 None => Ok(None),
    9299              :             },
    9300            0 :             Some(Err(e)) => Err(e),
    9301            0 :             None => Err(mgmt_api::Error::Cancelled),
    9302              :         }
    9303            0 :     }
    9304              : 
    9305              :     /// Drain a node by moving the shards attached to it as primaries.
    9306              :     /// This is a long running operation and it should run as a separate Tokio task.
    9307            0 :     pub(crate) async fn drain_node(
    9308            0 :         self: &Arc<Self>,
    9309            0 :         node_id: NodeId,
    9310            0 :         cancel: CancellationToken,
    9311            0 :     ) -> Result<(), OperationError> {
    9312              :         const MAX_SECONDARY_LAG_BYTES_DEFAULT: u64 = 256 * 1024 * 1024;
    9313            0 :         let max_secondary_lag_bytes = self
    9314            0 :             .config
    9315            0 :             .max_secondary_lag_bytes
    9316            0 :             .unwrap_or(MAX_SECONDARY_LAG_BYTES_DEFAULT);
    9317              : 
    9318              :         // By default, live migrations are generous about the wait time for getting
    9319              :         // the secondary location up to speed. When draining, give up earlier in order
    9320              :         // to not stall the operation when a cold secondary is encountered.
    9321              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(30);
    9322              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    9323            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    9324            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    9325            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    9326            0 :             .build();
    9327              : 
    9328            0 :         let mut waiters = Vec::new();
    9329              : 
    9330            0 :         let mut tid_iter = create_shared_shard_iterator(self.clone());
    9331              : 
    9332            0 :         while !tid_iter.finished() {
    9333            0 :             if cancel.is_cancelled() {
    9334            0 :                 match self
    9335            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9336            0 :                     .await
    9337              :                 {
    9338            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9339            0 :                     Err(err) => {
    9340            0 :                         return Err(OperationError::FinalizeError(
    9341            0 :                             format!(
    9342            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9343            0 :                             )
    9344            0 :                             .into(),
    9345            0 :                         ));
    9346              :                     }
    9347              :                 }
    9348            0 :             }
    9349              : 
    9350            0 :             operation_utils::validate_node_state(
    9351            0 :                 &node_id,
    9352            0 :                 self.inner.read().unwrap().nodes.clone(),
    9353            0 :                 NodeSchedulingPolicy::Draining,
    9354            0 :             )?;
    9355              : 
    9356            0 :             while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    9357            0 :                 let tid = match tid_iter.next() {
    9358            0 :                     Some(tid) => tid,
    9359              :                     None => {
    9360            0 :                         break;
    9361              :                     }
    9362              :                 };
    9363              : 
    9364            0 :                 let tid_drain = TenantShardDrain {
    9365            0 :                     drained_node: node_id,
    9366            0 :                     tenant_shard_id: tid,
    9367            0 :                 };
    9368              : 
    9369            0 :                 let dest_node_id = {
    9370            0 :                     let locked = self.inner.read().unwrap();
    9371              : 
    9372            0 :                     match tid_drain
    9373            0 :                         .tenant_shard_eligible_for_drain(&locked.tenants, &locked.scheduler)
    9374              :                     {
    9375            0 :                         Some(node_id) => node_id,
    9376              :                         None => {
    9377            0 :                             continue;
    9378              :                         }
    9379              :                     }
    9380              :                 };
    9381              : 
    9382            0 :                 match self.secondary_lag(&dest_node_id, tid).await {
    9383            0 :                     Ok(Some(lag)) if lag <= max_secondary_lag_bytes => {
    9384            0 :                         // The secondary is reasonably up to date.
    9385            0 :                         // Migrate to it
    9386            0 :                     }
    9387            0 :                     Ok(Some(lag)) => {
    9388            0 :                         tracing::info!(
    9389            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9390            0 :                             "Secondary on node {dest_node_id} is lagging by {lag}. Skipping reconcile."
    9391              :                         );
    9392            0 :                         continue;
    9393              :                     }
    9394              :                     Ok(None) => {
    9395            0 :                         tracing::info!(
    9396            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9397            0 :                             "Could not determine lag for secondary on node {dest_node_id}. Skipping reconcile."
    9398              :                         );
    9399            0 :                         continue;
    9400              :                     }
    9401            0 :                     Err(err) => {
    9402            0 :                         tracing::warn!(
    9403            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9404            0 :                             "Failed to get secondary lag from node {dest_node_id}. Skipping reconcile: {err}"
    9405              :                         );
    9406            0 :                         continue;
    9407              :                     }
    9408              :                 }
    9409              : 
    9410              :                 {
    9411            0 :                     let mut locked = self.inner.write().unwrap();
    9412            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    9413            0 :                     let rescheduled = tid_drain.reschedule_to_secondary(
    9414            0 :                         dest_node_id,
    9415            0 :                         tenants,
    9416            0 :                         scheduler,
    9417            0 :                         nodes,
    9418            0 :                     )?;
    9419              : 
    9420            0 :                     if let Some(tenant_shard) = rescheduled {
    9421            0 :                         let waiter = self.maybe_configured_reconcile_shard(
    9422            0 :                             tenant_shard,
    9423            0 :                             nodes,
    9424            0 :                             reconciler_config,
    9425            0 :                         );
    9426            0 :                         if let Some(some) = waiter {
    9427            0 :                             waiters.push(some);
    9428            0 :                         }
    9429            0 :                     }
    9430              :                 }
    9431              :             }
    9432              : 
    9433            0 :             waiters = self
    9434            0 :                 .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT)
    9435            0 :                 .await;
    9436              : 
    9437            0 :             failpoint_support::sleep_millis_async!("sleepy-drain-loop", &cancel);
    9438              :         }
    9439              : 
    9440            0 :         while !waiters.is_empty() {
    9441            0 :             if cancel.is_cancelled() {
    9442            0 :                 match self
    9443            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9444            0 :                     .await
    9445              :                 {
    9446            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9447            0 :                     Err(err) => {
    9448            0 :                         return Err(OperationError::FinalizeError(
    9449            0 :                             format!(
    9450            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9451            0 :                             )
    9452            0 :                             .into(),
    9453            0 :                         ));
    9454              :                     }
    9455              :                 }
    9456            0 :             }
    9457              : 
    9458            0 :             tracing::info!("Awaiting {} pending drain reconciliations", waiters.len());
    9459              : 
    9460            0 :             waiters = self
    9461            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    9462            0 :                 .await;
    9463              :         }
    9464              : 
    9465              :         // At this point we have done the best we could to drain shards from this node.
    9466              :         // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]`
    9467              :         // to complete the drain.
    9468            0 :         if let Err(err) = self
    9469            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart))
    9470            0 :             .await
    9471              :         {
    9472              :             // This is not fatal. Anything that is polling the node scheduling policy to detect
    9473              :             // the end of the drain operations will hang, but all such places should enforce an
    9474              :             // overall timeout. The scheduling policy will be updated upon node re-attach and/or
    9475              :             // by the counterpart fill operation.
    9476            0 :             return Err(OperationError::FinalizeError(
    9477            0 :                 format!(
    9478            0 :                     "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}"
    9479            0 :                 )
    9480            0 :                 .into(),
    9481            0 :             ));
    9482            0 :         }
    9483              : 
    9484            0 :         Ok(())
    9485            0 :     }
    9486              : 
    9487              :     /// Create a node fill plan (pick secondaries to promote), based on:
    9488              :     /// 1. Shards which have a secondary on this node, and this node is in their home AZ, and are currently attached to a node
    9489              :     ///    outside their home AZ, should be migrated back here.
    9490              :     /// 2. If after step 1 we have not migrated enough shards for this node to have its fair share of
    9491              :     ///    attached shards, we will promote more shards from the nodes with the most attached shards, unless
    9492              :     ///    those shards have a home AZ that doesn't match the node we're filling.
    9493            0 :     fn fill_node_plan(&self, node_id: NodeId) -> Vec<TenantShardId> {
    9494            0 :         let mut locked = self.inner.write().unwrap();
    9495            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    9496              : 
    9497            0 :         let node_az = nodes
    9498            0 :             .get(&node_id)
    9499            0 :             .expect("Node must exist")
    9500            0 :             .get_availability_zone_id()
    9501            0 :             .clone();
    9502              : 
    9503              :         // The tenant shard IDs that we plan to promote from secondary to attached on this node
    9504            0 :         let mut plan = Vec::new();
    9505              : 
    9506              :         // Collect shards which do not have a preferred AZ & are elegible for moving in stage 2
    9507            0 :         let mut free_tids_by_node: HashMap<NodeId, Vec<TenantShardId>> = HashMap::new();
    9508              : 
    9509              :         // Don't respect AZ preferences if there is only one AZ.  This comes up in tests, but it could
    9510              :         // conceivably come up in real life if deploying a single-AZ region intentionally.
    9511            0 :         let respect_azs = nodes
    9512            0 :             .values()
    9513            0 :             .map(|n| n.get_availability_zone_id())
    9514            0 :             .unique()
    9515            0 :             .count()
    9516              :             > 1;
    9517              : 
    9518              :         // Step 1: collect all shards that we are required to migrate back to this node because their AZ preference
    9519              :         // requires it.
    9520            0 :         for (tsid, tenant_shard) in tenants {
    9521            0 :             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    9522              :                 // Shard doesn't have a secondary on this node, ignore it.
    9523            0 :                 continue;
    9524            0 :             }
    9525              : 
    9526              :             // AZ check: when filling nodes after a restart, our intent is to move _back_ the
    9527              :             // shards which belong on this node, not to promote shards whose scheduling preference
    9528              :             // would be on their currently attached node.  So will avoid promoting shards whose
    9529              :             // home AZ doesn't match the AZ of the node we're filling.
    9530            0 :             match tenant_shard.preferred_az() {
    9531            0 :                 _ if !respect_azs => {
    9532            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    9533            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    9534            0 :                     }
    9535              :                 }
    9536              :                 None => {
    9537              :                     // Shard doesn't have an AZ preference: it is elegible to be moved, but we
    9538              :                     // will only do so if our target shard count requires it.
    9539            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    9540            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    9541            0 :                     }
    9542              :                 }
    9543            0 :                 Some(az) if az == &node_az => {
    9544              :                     // This shard's home AZ is equal to the node we're filling: it should
    9545              :                     // be moved back to this node as part of filling, unless its currently
    9546              :                     // attached location is also in its home AZ.
    9547            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    9548            0 :                         if nodes
    9549            0 :                             .get(primary)
    9550            0 :                             .expect("referenced node must exist")
    9551            0 :                             .get_availability_zone_id()
    9552            0 :                             != tenant_shard
    9553            0 :                                 .preferred_az()
    9554            0 :                                 .expect("tenant must have an AZ preference")
    9555              :                         {
    9556            0 :                             plan.push(*tsid)
    9557            0 :                         }
    9558              :                     } else {
    9559            0 :                         plan.push(*tsid)
    9560              :                     }
    9561              :                 }
    9562            0 :                 Some(_) => {
    9563            0 :                     // This shard's home AZ is somewhere other than the node we're filling,
    9564            0 :                     // it may not be moved back to this node as part of filling.  Ignore it
    9565            0 :                 }
    9566              :             }
    9567              :         }
    9568              : 
    9569              :         // Step 2: also promote any AZ-agnostic shards as required to achieve the target number of attachments
    9570            0 :         let fill_requirement = locked.scheduler.compute_fill_requirement(node_id);
    9571              : 
    9572            0 :         let expected_attached = locked.scheduler.expected_attached_shard_count();
    9573            0 :         let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count();
    9574              : 
    9575            0 :         let mut promoted_per_tenant: HashMap<TenantId, usize> = HashMap::new();
    9576              : 
    9577            0 :         for (node_id, attached) in nodes_by_load {
    9578            0 :             let available = locked.nodes.get(&node_id).is_some_and(|n| n.is_available());
    9579            0 :             if !available {
    9580            0 :                 continue;
    9581            0 :             }
    9582              : 
    9583            0 :             if plan.len() >= fill_requirement
    9584            0 :                 || free_tids_by_node.is_empty()
    9585            0 :                 || attached <= expected_attached
    9586              :             {
    9587            0 :                 break;
    9588            0 :             }
    9589              : 
    9590            0 :             let can_take = attached - expected_attached;
    9591            0 :             let needed = fill_requirement - plan.len();
    9592            0 :             let mut take = std::cmp::min(can_take, needed);
    9593              : 
    9594            0 :             let mut remove_node = false;
    9595            0 :             while take > 0 {
    9596            0 :                 match free_tids_by_node.get_mut(&node_id) {
    9597            0 :                     Some(tids) => match tids.pop() {
    9598            0 :                         Some(tid) => {
    9599            0 :                             let max_promote_for_tenant = std::cmp::max(
    9600            0 :                                 tid.shard_count.count() as usize / locked.nodes.len(),
    9601              :                                 1,
    9602              :                             );
    9603            0 :                             let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default();
    9604            0 :                             if *promoted < max_promote_for_tenant {
    9605            0 :                                 plan.push(tid);
    9606            0 :                                 *promoted += 1;
    9607            0 :                                 take -= 1;
    9608            0 :                             }
    9609              :                         }
    9610              :                         None => {
    9611            0 :                             remove_node = true;
    9612            0 :                             break;
    9613              :                         }
    9614              :                     },
    9615              :                     None => {
    9616            0 :                         break;
    9617              :                     }
    9618              :                 }
    9619              :             }
    9620              : 
    9621            0 :             if remove_node {
    9622            0 :                 free_tids_by_node.remove(&node_id);
    9623            0 :             }
    9624              :         }
    9625              : 
    9626            0 :         plan
    9627            0 :     }
    9628              : 
    9629              :     /// Fill a node by promoting its secondaries until the cluster is balanced
    9630              :     /// with regards to attached shard counts. Note that this operation only
    9631              :     /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`].
    9632              :     /// This is a long running operation and it should run as a separate Tokio task.
    9633            0 :     pub(crate) async fn fill_node(
    9634            0 :         &self,
    9635            0 :         node_id: NodeId,
    9636            0 :         cancel: CancellationToken,
    9637            0 :     ) -> Result<(), OperationError> {
    9638              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(30);
    9639              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    9640            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    9641            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    9642            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    9643            0 :             .build();
    9644              : 
    9645            0 :         let mut tids_to_promote = self.fill_node_plan(node_id);
    9646            0 :         let mut waiters = Vec::new();
    9647              : 
    9648              :         // Execute the plan we've composed above. Before aplying each move from the plan,
    9649              :         // we validate to ensure that it has not gone stale in the meantime.
    9650            0 :         while !tids_to_promote.is_empty() {
    9651            0 :             if cancel.is_cancelled() {
    9652            0 :                 match self
    9653            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9654            0 :                     .await
    9655              :                 {
    9656            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9657            0 :                     Err(err) => {
    9658            0 :                         return Err(OperationError::FinalizeError(
    9659            0 :                             format!(
    9660            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9661            0 :                             )
    9662            0 :                             .into(),
    9663            0 :                         ));
    9664              :                     }
    9665              :                 }
    9666            0 :             }
    9667              : 
    9668              :             {
    9669            0 :                 let mut locked = self.inner.write().unwrap();
    9670            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    9671              : 
    9672            0 :                 let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
    9673            0 :                     format!("node {node_id} was removed").into(),
    9674            0 :                 ))?;
    9675              : 
    9676            0 :                 let current_policy = node.get_scheduling();
    9677            0 :                 if !matches!(current_policy, NodeSchedulingPolicy::Filling) {
    9678              :                     // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
    9679              :                     // about it
    9680            0 :                     return Err(OperationError::NodeStateChanged(
    9681            0 :                         format!("node {node_id} changed state to {current_policy:?}").into(),
    9682            0 :                     ));
    9683            0 :                 }
    9684              : 
    9685            0 :                 while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    9686            0 :                     if let Some(tid) = tids_to_promote.pop() {
    9687            0 :                         if let Some(tenant_shard) = tenants.get_mut(&tid) {
    9688              :                             // If the node being filled is not a secondary anymore,
    9689              :                             // skip the promotion.
    9690            0 :                             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    9691            0 :                                 continue;
    9692            0 :                             }
    9693              : 
    9694            0 :                             let previously_attached_to = *tenant_shard.intent.get_attached();
    9695            0 :                             match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) {
    9696            0 :                                 Err(e) => {
    9697            0 :                                     tracing::warn!(
    9698            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9699            0 :                                         "Scheduling error when filling pageserver {} : {e}", node_id
    9700              :                                     );
    9701              :                                 }
    9702              :                                 Ok(()) => {
    9703            0 :                                     tracing::info!(
    9704            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9705            0 :                                         "Rescheduled shard while filling node {}: {:?} -> {}",
    9706              :                                         node_id,
    9707              :                                         previously_attached_to,
    9708              :                                         node_id
    9709              :                                     );
    9710              : 
    9711            0 :                                     if let Some(waiter) = self.maybe_configured_reconcile_shard(
    9712            0 :                                         tenant_shard,
    9713            0 :                                         nodes,
    9714            0 :                                         reconciler_config,
    9715            0 :                                     ) {
    9716            0 :                                         waiters.push(waiter);
    9717            0 :                                     }
    9718              :                                 }
    9719              :                             }
    9720            0 :                         }
    9721              :                     } else {
    9722            0 :                         break;
    9723              :                     }
    9724              :                 }
    9725              :             }
    9726              : 
    9727            0 :             waiters = self
    9728            0 :                 .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT)
    9729            0 :                 .await;
    9730              :         }
    9731              : 
    9732            0 :         while !waiters.is_empty() {
    9733            0 :             if cancel.is_cancelled() {
    9734            0 :                 match self
    9735            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9736            0 :                     .await
    9737              :                 {
    9738            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9739            0 :                     Err(err) => {
    9740            0 :                         return Err(OperationError::FinalizeError(
    9741            0 :                             format!(
    9742            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9743            0 :                             )
    9744            0 :                             .into(),
    9745            0 :                         ));
    9746              :                     }
    9747              :                 }
    9748            0 :             }
    9749              : 
    9750            0 :             tracing::info!("Awaiting {} pending fill reconciliations", waiters.len());
    9751              : 
    9752            0 :             waiters = self
    9753            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    9754            0 :                 .await;
    9755              :         }
    9756              : 
    9757            0 :         if let Err(err) = self
    9758            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9759            0 :             .await
    9760              :         {
    9761              :             // This isn't a huge issue since the filling process starts upon request. However, it
    9762              :             // will prevent the next drain from starting. The only case in which this can fail
    9763              :             // is database unavailability. Such a case will require manual intervention.
    9764            0 :             return Err(OperationError::FinalizeError(
    9765            0 :                 format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}")
    9766            0 :                     .into(),
    9767            0 :             ));
    9768            0 :         }
    9769              : 
    9770            0 :         Ok(())
    9771            0 :     }
    9772              : 
    9773              :     /// Updates scrubber metadata health check results.
    9774            0 :     pub(crate) async fn metadata_health_update(
    9775            0 :         &self,
    9776            0 :         update_req: MetadataHealthUpdateRequest,
    9777            0 :     ) -> Result<(), ApiError> {
    9778            0 :         let now = chrono::offset::Utc::now();
    9779            0 :         let (healthy_records, unhealthy_records) = {
    9780            0 :             let locked = self.inner.read().unwrap();
    9781            0 :             let healthy_records = update_req
    9782            0 :                 .healthy_tenant_shards
    9783            0 :                 .into_iter()
    9784              :                 // Retain only health records associated with tenant shards managed by storage controller.
    9785            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    9786            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, true, now))
    9787            0 :                 .collect();
    9788            0 :             let unhealthy_records = update_req
    9789            0 :                 .unhealthy_tenant_shards
    9790            0 :                 .into_iter()
    9791            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    9792            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, false, now))
    9793            0 :                 .collect();
    9794              : 
    9795            0 :             (healthy_records, unhealthy_records)
    9796              :         };
    9797              : 
    9798            0 :         self.persistence
    9799            0 :             .update_metadata_health_records(healthy_records, unhealthy_records, now)
    9800            0 :             .await?;
    9801            0 :         Ok(())
    9802            0 :     }
    9803              : 
    9804              :     /// Lists the tenant shards that has unhealthy metadata status.
    9805            0 :     pub(crate) async fn metadata_health_list_unhealthy(
    9806            0 :         &self,
    9807            0 :     ) -> Result<Vec<TenantShardId>, ApiError> {
    9808            0 :         let result = self
    9809            0 :             .persistence
    9810            0 :             .list_unhealthy_metadata_health_records()
    9811            0 :             .await?
    9812            0 :             .iter()
    9813            0 :             .map(|p| p.get_tenant_shard_id().unwrap())
    9814            0 :             .collect();
    9815              : 
    9816            0 :         Ok(result)
    9817            0 :     }
    9818              : 
    9819              :     /// Lists the tenant shards that have not been scrubbed for some duration.
    9820            0 :     pub(crate) async fn metadata_health_list_outdated(
    9821            0 :         &self,
    9822            0 :         not_scrubbed_for: Duration,
    9823            0 :     ) -> Result<Vec<MetadataHealthRecord>, ApiError> {
    9824            0 :         let earlier = chrono::offset::Utc::now() - not_scrubbed_for;
    9825            0 :         let result = self
    9826            0 :             .persistence
    9827            0 :             .list_outdated_metadata_health_records(earlier)
    9828            0 :             .await?
    9829            0 :             .into_iter()
    9830            0 :             .map(|record| record.into())
    9831            0 :             .collect();
    9832            0 :         Ok(result)
    9833            0 :     }
    9834              : 
    9835            0 :     pub(crate) fn get_leadership_status(&self) -> LeadershipStatus {
    9836            0 :         self.inner.read().unwrap().get_leadership_status()
    9837            0 :     }
    9838              : 
    9839              :     /// Handler for step down requests
    9840              :     ///
    9841              :     /// Step down runs in separate task since once it's called it should
    9842              :     /// be driven to completion. Subsequent requests will wait on the same
    9843              :     /// step down task.
    9844            0 :     pub(crate) async fn step_down(self: &Arc<Self>) -> GlobalObservedState {
    9845            0 :         let handle = self.step_down_barrier.get_or_init(|| {
    9846            0 :             let step_down_self = self.clone();
    9847            0 :             let (tx, rx) = tokio::sync::watch::channel::<Option<GlobalObservedState>>(None);
    9848            0 :             tokio::spawn(async move {
    9849            0 :                 let state = step_down_self.step_down_task().await;
    9850            0 :                 tx.send(Some(state))
    9851            0 :                     .expect("Task Arc<Service> keeps receiver alive");
    9852            0 :             });
    9853              : 
    9854            0 :             rx
    9855            0 :         });
    9856              : 
    9857            0 :         handle
    9858            0 :             .clone()
    9859            0 :             .wait_for(|observed_state| observed_state.is_some())
    9860            0 :             .await
    9861            0 :             .expect("Task Arc<Service> keeps sender alive")
    9862            0 :             .deref()
    9863            0 :             .clone()
    9864            0 :             .expect("Checked above")
    9865            0 :     }
    9866              : 
    9867            0 :     async fn step_down_task(&self) -> GlobalObservedState {
    9868            0 :         tracing::info!("Received step down request from peer");
    9869            0 :         failpoint_support::sleep_millis_async!("sleep-on-step-down-handling");
    9870              : 
    9871            0 :         self.inner.write().unwrap().step_down();
    9872              : 
    9873            0 :         let stop_reconciliations =
    9874            0 :             self.stop_reconciliations(StopReconciliationsReason::SteppingDown);
    9875            0 :         let mut stop_reconciliations = std::pin::pin!(stop_reconciliations);
    9876              : 
    9877            0 :         let started_at = Instant::now();
    9878              : 
    9879              :         // Wait for reconciliations to stop and warn if that's taking a long time
    9880              :         loop {
    9881            0 :             tokio::select! {
    9882            0 :                 _ = &mut stop_reconciliations => {
    9883            0 :                     tracing::info!("Reconciliations stopped, proceeding with step down");
    9884            0 :                     break;
    9885              :                 }
    9886            0 :                 _ = tokio::time::sleep(Duration::from_secs(10)) => {
    9887            0 :                     tracing::warn!(
    9888            0 :                         elapsed_sec=%started_at.elapsed().as_secs(),
    9889            0 :                         "Stopping reconciliations during step down is taking too long"
    9890              :                     );
    9891              :                 }
    9892              :             }
    9893              :         }
    9894              : 
    9895            0 :         let mut global_observed = GlobalObservedState::default();
    9896            0 :         let locked = self.inner.read().unwrap();
    9897            0 :         for (tid, tenant_shard) in locked.tenants.iter() {
    9898            0 :             global_observed
    9899            0 :                 .0
    9900            0 :                 .insert(*tid, tenant_shard.observed.clone());
    9901            0 :         }
    9902              : 
    9903            0 :         global_observed
    9904            0 :     }
    9905              : 
    9906            0 :     pub(crate) async fn update_shards_preferred_azs(
    9907            0 :         &self,
    9908            0 :         req: ShardsPreferredAzsRequest,
    9909            0 :     ) -> Result<ShardsPreferredAzsResponse, ApiError> {
    9910            0 :         let preferred_azs = req.preferred_az_ids.into_iter().collect::<Vec<_>>();
    9911            0 :         let updated = self
    9912            0 :             .persistence
    9913            0 :             .set_tenant_shard_preferred_azs(preferred_azs)
    9914            0 :             .await
    9915            0 :             .map_err(|err| {
    9916            0 :                 ApiError::InternalServerError(anyhow::anyhow!(
    9917            0 :                     "Failed to persist preferred AZs: {err}"
    9918            0 :                 ))
    9919            0 :             })?;
    9920              : 
    9921            0 :         let mut updated_in_mem_and_db = Vec::default();
    9922              : 
    9923            0 :         let mut locked = self.inner.write().unwrap();
    9924            0 :         let state = locked.deref_mut();
    9925            0 :         for (tid, az_id) in updated {
    9926            0 :             let shard = state.tenants.get_mut(&tid);
    9927            0 :             if let Some(shard) = shard {
    9928            0 :                 shard.set_preferred_az(&mut state.scheduler, az_id);
    9929            0 :                 updated_in_mem_and_db.push(tid);
    9930            0 :             }
    9931              :         }
    9932              : 
    9933            0 :         Ok(ShardsPreferredAzsResponse {
    9934            0 :             updated: updated_in_mem_and_db,
    9935            0 :         })
    9936            0 :     }
    9937              : }
    9938              : 
    9939              : #[cfg(test)]
    9940              : mod tests {
    9941              :     use super::*;
    9942              : 
    9943              :     /// Tests Service::compute_split_shards. For readability, this specifies sizes in GBs rather
    9944              :     /// than bytes. Note that max_logical_size is the total logical size of the largest timeline
    9945              :     /// summed across all shards.
    9946              :     #[test]
    9947            1 :     fn compute_split_shards() {
    9948              :         // Size-based split: two shards have a 500 GB timeline, which need to split into 8 shards
    9949              :         // that are <= 64 GB,
    9950            1 :         assert_eq!(
    9951            1 :             Service::compute_split_shards(ShardSplitInputs {
    9952            1 :                 shard_count: ShardCount(2),
    9953            1 :                 max_logical_size: 500,
    9954            1 :                 split_threshold: 64,
    9955            1 :                 max_split_shards: 16,
    9956            1 :                 initial_split_threshold: 0,
    9957            1 :                 initial_split_shards: 0,
    9958            1 :             }),
    9959              :             Some(ShardCount(8))
    9960              :         );
    9961              : 
    9962              :         // Size-based split: noop at or below threshold, fires above.
    9963            1 :         assert_eq!(
    9964            1 :             Service::compute_split_shards(ShardSplitInputs {
    9965            1 :                 shard_count: ShardCount(2),
    9966            1 :                 max_logical_size: 127,
    9967            1 :                 split_threshold: 64,
    9968            1 :                 max_split_shards: 16,
    9969            1 :                 initial_split_threshold: 0,
    9970            1 :                 initial_split_shards: 0,
    9971            1 :             }),
    9972              :             None,
    9973              :         );
    9974            1 :         assert_eq!(
    9975            1 :             Service::compute_split_shards(ShardSplitInputs {
    9976            1 :                 shard_count: ShardCount(2),
    9977            1 :                 max_logical_size: 128,
    9978            1 :                 split_threshold: 64,
    9979            1 :                 max_split_shards: 16,
    9980            1 :                 initial_split_threshold: 0,
    9981            1 :                 initial_split_shards: 0,
    9982            1 :             }),
    9983              :             None,
    9984              :         );
    9985            1 :         assert_eq!(
    9986            1 :             Service::compute_split_shards(ShardSplitInputs {
    9987            1 :                 shard_count: ShardCount(2),
    9988            1 :                 max_logical_size: 129,
    9989            1 :                 split_threshold: 64,
    9990            1 :                 max_split_shards: 16,
    9991            1 :                 initial_split_threshold: 0,
    9992            1 :                 initial_split_shards: 0,
    9993            1 :             }),
    9994              :             Some(ShardCount(4)),
    9995              :         );
    9996              : 
    9997              :         // Size-based split: clamped to max_split_shards.
    9998            1 :         assert_eq!(
    9999            1 :             Service::compute_split_shards(ShardSplitInputs {
   10000            1 :                 shard_count: ShardCount(2),
   10001            1 :                 max_logical_size: 10000,
   10002            1 :                 split_threshold: 64,
   10003            1 :                 max_split_shards: 16,
   10004            1 :                 initial_split_threshold: 0,
   10005            1 :                 initial_split_shards: 0,
   10006            1 :             }),
   10007              :             Some(ShardCount(16))
   10008              :         );
   10009              : 
   10010              :         // Size-based split: tenant already at or beyond max_split_shards is not split.
   10011            1 :         assert_eq!(
   10012            1 :             Service::compute_split_shards(ShardSplitInputs {
   10013            1 :                 shard_count: ShardCount(16),
   10014            1 :                 max_logical_size: 10000,
   10015            1 :                 split_threshold: 64,
   10016            1 :                 max_split_shards: 16,
   10017            1 :                 initial_split_threshold: 0,
   10018            1 :                 initial_split_shards: 0,
   10019            1 :             }),
   10020              :             None
   10021              :         );
   10022              : 
   10023            1 :         assert_eq!(
   10024            1 :             Service::compute_split_shards(ShardSplitInputs {
   10025            1 :                 shard_count: ShardCount(32),
   10026            1 :                 max_logical_size: 10000,
   10027            1 :                 split_threshold: 64,
   10028            1 :                 max_split_shards: 16,
   10029            1 :                 initial_split_threshold: 0,
   10030            1 :                 initial_split_shards: 0,
   10031            1 :             }),
   10032              :             None
   10033              :         );
   10034              : 
   10035              :         // Size-based split: a non-power-of-2 shard count is normalized to power-of-2 if it
   10036              :         // exceeds split_threshold (i.e. a 3-shard tenant splits into 8, not 6).
   10037            1 :         assert_eq!(
   10038            1 :             Service::compute_split_shards(ShardSplitInputs {
   10039            1 :                 shard_count: ShardCount(3),
   10040            1 :                 max_logical_size: 320,
   10041            1 :                 split_threshold: 64,
   10042            1 :                 max_split_shards: 16,
   10043            1 :                 initial_split_threshold: 0,
   10044            1 :                 initial_split_shards: 0,
   10045            1 :             }),
   10046              :             Some(ShardCount(8))
   10047              :         );
   10048              : 
   10049              :         // Size-based split: a non-power-of-2 shard count is not normalized to power-of-2 if the
   10050              :         // existing shards are below or at split_threshold, but splits into 4 if it exceeds it.
   10051            1 :         assert_eq!(
   10052            1 :             Service::compute_split_shards(ShardSplitInputs {
   10053            1 :                 shard_count: ShardCount(3),
   10054            1 :                 max_logical_size: 191,
   10055            1 :                 split_threshold: 64,
   10056            1 :                 max_split_shards: 16,
   10057            1 :                 initial_split_threshold: 0,
   10058            1 :                 initial_split_shards: 0,
   10059            1 :             }),
   10060              :             None
   10061              :         );
   10062            1 :         assert_eq!(
   10063            1 :             Service::compute_split_shards(ShardSplitInputs {
   10064            1 :                 shard_count: ShardCount(3),
   10065            1 :                 max_logical_size: 192,
   10066            1 :                 split_threshold: 64,
   10067            1 :                 max_split_shards: 16,
   10068            1 :                 initial_split_threshold: 0,
   10069            1 :                 initial_split_shards: 0,
   10070            1 :             }),
   10071              :             None
   10072              :         );
   10073            1 :         assert_eq!(
   10074            1 :             Service::compute_split_shards(ShardSplitInputs {
   10075            1 :                 shard_count: ShardCount(3),
   10076            1 :                 max_logical_size: 193,
   10077            1 :                 split_threshold: 64,
   10078            1 :                 max_split_shards: 16,
   10079            1 :                 initial_split_threshold: 0,
   10080            1 :                 initial_split_shards: 0,
   10081            1 :             }),
   10082              :             Some(ShardCount(4))
   10083              :         );
   10084              : 
   10085              :         // Initial split: tenant has a 10 GB timeline, split into 4 shards.
   10086            1 :         assert_eq!(
   10087            1 :             Service::compute_split_shards(ShardSplitInputs {
   10088            1 :                 shard_count: ShardCount(1),
   10089            1 :                 max_logical_size: 10,
   10090            1 :                 split_threshold: 0,
   10091            1 :                 max_split_shards: 16,
   10092            1 :                 initial_split_threshold: 8,
   10093            1 :                 initial_split_shards: 4,
   10094            1 :             }),
   10095              :             Some(ShardCount(4))
   10096              :         );
   10097              : 
   10098              :         // Initial split: 0 ShardCount is equivalent to 1.
   10099            1 :         assert_eq!(
   10100            1 :             Service::compute_split_shards(ShardSplitInputs {
   10101            1 :                 shard_count: ShardCount(0),
   10102            1 :                 max_logical_size: 10,
   10103            1 :                 split_threshold: 0,
   10104            1 :                 max_split_shards: 16,
   10105            1 :                 initial_split_threshold: 8,
   10106            1 :                 initial_split_shards: 4,
   10107            1 :             }),
   10108              :             Some(ShardCount(4))
   10109              :         );
   10110              : 
   10111              :         // Initial split: at or below threshold is noop.
   10112            1 :         assert_eq!(
   10113            1 :             Service::compute_split_shards(ShardSplitInputs {
   10114            1 :                 shard_count: ShardCount(1),
   10115            1 :                 max_logical_size: 7,
   10116            1 :                 split_threshold: 0,
   10117            1 :                 max_split_shards: 16,
   10118            1 :                 initial_split_threshold: 8,
   10119            1 :                 initial_split_shards: 4,
   10120            1 :             }),
   10121              :             None,
   10122              :         );
   10123            1 :         assert_eq!(
   10124            1 :             Service::compute_split_shards(ShardSplitInputs {
   10125            1 :                 shard_count: ShardCount(1),
   10126            1 :                 max_logical_size: 8,
   10127            1 :                 split_threshold: 0,
   10128            1 :                 max_split_shards: 16,
   10129            1 :                 initial_split_threshold: 8,
   10130            1 :                 initial_split_shards: 4,
   10131            1 :             }),
   10132              :             None,
   10133              :         );
   10134            1 :         assert_eq!(
   10135            1 :             Service::compute_split_shards(ShardSplitInputs {
   10136            1 :                 shard_count: ShardCount(1),
   10137            1 :                 max_logical_size: 9,
   10138            1 :                 split_threshold: 0,
   10139            1 :                 max_split_shards: 16,
   10140            1 :                 initial_split_threshold: 8,
   10141            1 :                 initial_split_shards: 4,
   10142            1 :             }),
   10143              :             Some(ShardCount(4))
   10144              :         );
   10145              : 
   10146              :         // Initial split: already sharded tenant is not affected, even if above threshold and below
   10147              :         // shard count.
   10148            1 :         assert_eq!(
   10149            1 :             Service::compute_split_shards(ShardSplitInputs {
   10150            1 :                 shard_count: ShardCount(2),
   10151            1 :                 max_logical_size: 20,
   10152            1 :                 split_threshold: 0,
   10153            1 :                 max_split_shards: 16,
   10154            1 :                 initial_split_threshold: 8,
   10155            1 :                 initial_split_shards: 4,
   10156            1 :             }),
   10157              :             None,
   10158              :         );
   10159              : 
   10160              :         // Initial split: clamped to max_shards.
   10161            1 :         assert_eq!(
   10162            1 :             Service::compute_split_shards(ShardSplitInputs {
   10163            1 :                 shard_count: ShardCount(1),
   10164            1 :                 max_logical_size: 10,
   10165            1 :                 split_threshold: 0,
   10166            1 :                 max_split_shards: 3,
   10167            1 :                 initial_split_threshold: 8,
   10168            1 :                 initial_split_shards: 4,
   10169            1 :             }),
   10170              :             Some(ShardCount(3)),
   10171              :         );
   10172              : 
   10173              :         // Initial+size split: tenant eligible for both will use the larger shard count.
   10174            1 :         assert_eq!(
   10175            1 :             Service::compute_split_shards(ShardSplitInputs {
   10176            1 :                 shard_count: ShardCount(1),
   10177            1 :                 max_logical_size: 10,
   10178            1 :                 split_threshold: 64,
   10179            1 :                 max_split_shards: 16,
   10180            1 :                 initial_split_threshold: 8,
   10181            1 :                 initial_split_shards: 4,
   10182            1 :             }),
   10183              :             Some(ShardCount(4)),
   10184              :         );
   10185            1 :         assert_eq!(
   10186            1 :             Service::compute_split_shards(ShardSplitInputs {
   10187            1 :                 shard_count: ShardCount(1),
   10188            1 :                 max_logical_size: 500,
   10189            1 :                 split_threshold: 64,
   10190            1 :                 max_split_shards: 16,
   10191            1 :                 initial_split_threshold: 8,
   10192            1 :                 initial_split_shards: 4,
   10193            1 :             }),
   10194              :             Some(ShardCount(8)),
   10195              :         );
   10196              : 
   10197              :         // Initial+size split: sharded tenant is only eligible for size-based split.
   10198            1 :         assert_eq!(
   10199            1 :             Service::compute_split_shards(ShardSplitInputs {
   10200            1 :                 shard_count: ShardCount(2),
   10201            1 :                 max_logical_size: 200,
   10202            1 :                 split_threshold: 64,
   10203            1 :                 max_split_shards: 16,
   10204            1 :                 initial_split_threshold: 8,
   10205            1 :                 initial_split_shards: 8,
   10206            1 :             }),
   10207              :             Some(ShardCount(4)),
   10208              :         );
   10209              : 
   10210              :         // Initial+size split: uses the larger shard count even with initial_split_threshold above
   10211              :         // split_threshold.
   10212            1 :         assert_eq!(
   10213            1 :             Service::compute_split_shards(ShardSplitInputs {
   10214            1 :                 shard_count: ShardCount(1),
   10215            1 :                 max_logical_size: 10,
   10216            1 :                 split_threshold: 4,
   10217            1 :                 max_split_shards: 16,
   10218            1 :                 initial_split_threshold: 8,
   10219            1 :                 initial_split_shards: 8,
   10220            1 :             }),
   10221              :             Some(ShardCount(8)),
   10222              :         );
   10223              : 
   10224              :         // Test backwards compatibility with production settings when initial/size-based splits were
   10225              :         // rolled out: a single split into 8 shards at 64 GB. Any already sharded tenants with <8
   10226              :         // shards will split according to split_threshold.
   10227            1 :         assert_eq!(
   10228            1 :             Service::compute_split_shards(ShardSplitInputs {
   10229            1 :                 shard_count: ShardCount(1),
   10230            1 :                 max_logical_size: 65,
   10231            1 :                 split_threshold: 64,
   10232            1 :                 max_split_shards: 8,
   10233            1 :                 initial_split_threshold: 64,
   10234            1 :                 initial_split_shards: 8,
   10235            1 :             }),
   10236              :             Some(ShardCount(8)),
   10237              :         );
   10238              : 
   10239            1 :         assert_eq!(
   10240            1 :             Service::compute_split_shards(ShardSplitInputs {
   10241            1 :                 shard_count: ShardCount(1),
   10242            1 :                 max_logical_size: 64,
   10243            1 :                 split_threshold: 64,
   10244            1 :                 max_split_shards: 8,
   10245            1 :                 initial_split_threshold: 64,
   10246            1 :                 initial_split_shards: 8,
   10247            1 :             }),
   10248              :             None,
   10249              :         );
   10250              : 
   10251            1 :         assert_eq!(
   10252            1 :             Service::compute_split_shards(ShardSplitInputs {
   10253            1 :                 shard_count: ShardCount(2),
   10254            1 :                 max_logical_size: 129,
   10255            1 :                 split_threshold: 64,
   10256            1 :                 max_split_shards: 8,
   10257            1 :                 initial_split_threshold: 64,
   10258            1 :                 initial_split_shards: 8,
   10259            1 :             }),
   10260              :             Some(ShardCount(4)),
   10261              :         );
   10262            1 :     }
   10263              : }
        

Generated by: LCOV version 2.1-beta