LCOV - code coverage report
Current view: top level - storage_controller/src - service.rs (source / functions) Coverage Total Hit
Test: 98683a8629f0f7f0031d02e04512998d589d76ea.info Lines: 5.7 % 5618 321
Test Date: 2025-04-11 16:58:57 Functions: 0.4 % 489 2

            Line data    Source code
       1              : pub mod chaos_injector;
       2              : mod context_iterator;
       3              : pub(crate) mod safekeeper_reconciler;
       4              : mod safekeeper_service;
       5              : 
       6              : use std::borrow::Cow;
       7              : use std::cmp::Ordering;
       8              : use std::collections::{BTreeMap, HashMap, HashSet};
       9              : use std::error::Error;
      10              : use std::num::NonZeroU32;
      11              : use std::ops::{Deref, DerefMut};
      12              : use std::path::PathBuf;
      13              : use std::str::FromStr;
      14              : use std::sync::Arc;
      15              : use std::time::{Duration, Instant, SystemTime};
      16              : 
      17              : use anyhow::Context;
      18              : use context_iterator::TenantShardContextIterator;
      19              : use control_plane::storage_controller::{
      20              :     AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
      21              : };
      22              : use diesel::result::DatabaseErrorKind;
      23              : use futures::StreamExt;
      24              : use futures::stream::FuturesUnordered;
      25              : use http_utils::error::ApiError;
      26              : use hyper::Uri;
      27              : use itertools::Itertools;
      28              : use pageserver_api::controller_api::{
      29              :     AvailabilityZone, MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability,
      30              :     NodeRegisterRequest, NodeSchedulingPolicy, NodeShard, NodeShardResponse, PlacementPolicy,
      31              :     ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse,
      32              :     TenantCreateRequest, TenantCreateResponse, TenantCreateResponseShard, TenantDescribeResponse,
      33              :     TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
      34              :     TenantShardMigrateRequest, TenantShardMigrateResponse,
      35              : };
      36              : use pageserver_api::models::{
      37              :     self, DetachBehavior, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease,
      38              :     PageserverUtilization, SecondaryProgress, ShardParameters, TenantConfig,
      39              :     TenantConfigPatchRequest, TenantConfigRequest, TenantLocationConfigRequest,
      40              :     TenantLocationConfigResponse, TenantShardLocation, TenantShardSplitRequest,
      41              :     TenantShardSplitResponse, TenantSorting, TenantTimeTravelRequest,
      42              :     TimelineArchivalConfigRequest, TimelineCreateRequest, TimelineCreateResponseStorcon,
      43              :     TimelineInfo, TopTenantShardItem, TopTenantShardsRequest,
      44              : };
      45              : use pageserver_api::shard::{
      46              :     DEFAULT_STRIPE_SIZE, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
      47              : };
      48              : use pageserver_api::upcall_api::{
      49              :     ReAttachRequest, ReAttachResponse, ReAttachResponseTenant, ValidateRequest, ValidateResponse,
      50              :     ValidateResponseTenant,
      51              : };
      52              : use pageserver_client::{BlockUnblock, mgmt_api};
      53              : use reqwest::{Certificate, StatusCode};
      54              : use safekeeper_api::models::SafekeeperUtilization;
      55              : use safekeeper_reconciler::SafekeeperReconcilers;
      56              : use tokio::sync::TryAcquireError;
      57              : use tokio::sync::mpsc::error::TrySendError;
      58              : use tokio_util::sync::CancellationToken;
      59              : use tracing::{Instrument, debug, error, info, info_span, instrument, warn};
      60              : use utils::completion::Barrier;
      61              : use utils::generation::Generation;
      62              : use utils::id::{NodeId, TenantId, TimelineId};
      63              : use utils::lsn::Lsn;
      64              : use utils::sync::gate::{Gate, GateGuard};
      65              : use utils::{failpoint_support, pausable_failpoint};
      66              : 
      67              : use crate::background_node_operations::{
      68              :     Drain, Fill, MAX_RECONCILES_PER_OPERATION, Operation, OperationError, OperationHandler,
      69              : };
      70              : use crate::compute_hook::{self, ComputeHook, NotifyError};
      71              : use crate::drain_utils::{self, TenantShardDrain, TenantShardIterator};
      72              : use crate::heartbeater::{Heartbeater, PageserverState, SafekeeperState};
      73              : use crate::id_lock_map::{
      74              :     IdLockMap, TracingExclusiveGuard, trace_exclusive_lock, trace_shared_lock,
      75              : };
      76              : use crate::leadership::Leadership;
      77              : use crate::metrics;
      78              : use crate::node::{AvailabilityTransition, Node};
      79              : use crate::pageserver_client::PageserverClient;
      80              : use crate::peer_client::GlobalObservedState;
      81              : use crate::persistence::split_state::SplitState;
      82              : use crate::persistence::{
      83              :     AbortShardSplitStatus, ControllerPersistence, DatabaseError, DatabaseResult,
      84              :     MetadataHealthPersistence, Persistence, ShardGenerationState, TenantFilter,
      85              :     TenantShardPersistence,
      86              : };
      87              : use crate::reconciler::{
      88              :     ReconcileError, ReconcileUnits, ReconcilerConfig, ReconcilerConfigBuilder, ReconcilerPriority,
      89              :     attached_location_conf,
      90              : };
      91              : use crate::safekeeper::Safekeeper;
      92              : use crate::scheduler::{
      93              :     AttachedShardTag, MaySchedule, ScheduleContext, ScheduleError, ScheduleMode, Scheduler,
      94              : };
      95              : use crate::tenant_shard::{
      96              :     IntentState, MigrateAttachment, ObservedState, ObservedStateDelta, ObservedStateLocation,
      97              :     ReconcileNeeded, ReconcileResult, ReconcileWaitError, ReconcilerStatus, ReconcilerWaiter,
      98              :     ScheduleOptimization, ScheduleOptimizationAction, TenantShard,
      99              : };
     100              : 
     101              : const WAITER_FILL_DRAIN_POLL_TIMEOUT: Duration = Duration::from_millis(500);
     102              : 
     103              : // For operations that should be quick, like attaching a new tenant
     104              : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
     105              : 
     106              : // For operations that might be slow, like migrating a tenant with
     107              : // some data in it.
     108              : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     109              : 
     110              : // If we receive a call using Secondary mode initially, it will omit generation.  We will initialize
     111              : // tenant shards into this generation, and as long as it remains in this generation, we will accept
     112              : // input generation from future requests as authoritative.
     113              : const INITIAL_GENERATION: Generation = Generation::new(0);
     114              : 
     115              : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
     116              : /// up on unresponsive pageservers and proceed.
     117              : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     118              : 
     119              : /// How long a node may be unresponsive to heartbeats before we declare it offline.
     120              : /// This must be long enough to cover node restarts as well as normal operations: in future
     121              : pub const MAX_OFFLINE_INTERVAL_DEFAULT: Duration = Duration::from_secs(30);
     122              : 
     123              : /// How long a node may be unresponsive to heartbeats during start up before we declare it
     124              : /// offline.
     125              : ///
     126              : /// This is much more lenient than [`MAX_OFFLINE_INTERVAL_DEFAULT`] since the pageserver's
     127              : /// handling of the re-attach response may take a long time and blocks heartbeats from
     128              : /// being handled on the pageserver side.
     129              : pub const MAX_WARMING_UP_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
     130              : 
     131              : /// How often to send heartbeats to registered nodes?
     132              : pub const HEARTBEAT_INTERVAL_DEFAULT: Duration = Duration::from_secs(5);
     133              : 
     134              : /// How long is too long for a reconciliation?
     135              : pub const LONG_RECONCILE_THRESHOLD_DEFAULT: Duration = Duration::from_secs(120);
     136              : 
     137              : #[derive(Clone, strum_macros::Display)]
     138              : enum TenantOperations {
     139              :     Create,
     140              :     LocationConfig,
     141              :     ConfigSet,
     142              :     ConfigPatch,
     143              :     TimeTravelRemoteStorage,
     144              :     Delete,
     145              :     UpdatePolicy,
     146              :     ShardSplit,
     147              :     SecondaryDownload,
     148              :     TimelineCreate,
     149              :     TimelineDelete,
     150              :     AttachHook,
     151              :     TimelineArchivalConfig,
     152              :     TimelineDetachAncestor,
     153              :     TimelineGcBlockUnblock,
     154              :     DropDetached,
     155              :     DownloadHeatmapLayers,
     156              :     TimelineLsnLease,
     157              : }
     158              : 
     159              : #[derive(Clone, strum_macros::Display)]
     160              : enum NodeOperations {
     161              :     Register,
     162              :     Configure,
     163              :     Delete,
     164              : }
     165              : 
     166              : /// The leadership status for the storage controller process.
     167              : /// Allowed transitions are:
     168              : /// 1. Leader -> SteppedDown
     169              : /// 2. Candidate -> Leader
     170              : #[derive(
     171              :     Eq,
     172              :     PartialEq,
     173              :     Copy,
     174              :     Clone,
     175              :     strum_macros::Display,
     176            0 :     strum_macros::EnumIter,
     177              :     measured::FixedCardinalityLabel,
     178              : )]
     179              : #[strum(serialize_all = "snake_case")]
     180              : pub(crate) enum LeadershipStatus {
     181              :     /// This is the steady state where the storage controller can produce
     182              :     /// side effects in the cluster.
     183              :     Leader,
     184              :     /// We've been notified to step down by another candidate. No reconciliations
     185              :     /// take place in this state.
     186              :     SteppedDown,
     187              :     /// Initial state for a new storage controller instance. Will attempt to assume leadership.
     188              :     #[allow(unused)]
     189              :     Candidate,
     190              : }
     191              : 
     192              : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
     193              : pub const PRIORITY_RECONCILER_CONCURRENCY_DEFAULT: usize = 256;
     194              : 
     195              : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
     196              : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
     197              : // than they're being pushed onto the queue.
     198              : const MAX_DELAYED_RECONCILES: usize = 10000;
     199              : 
     200              : // Top level state available to all HTTP handlers
     201              : struct ServiceState {
     202              :     leadership_status: LeadershipStatus,
     203              : 
     204              :     tenants: BTreeMap<TenantShardId, TenantShard>,
     205              : 
     206              :     nodes: Arc<HashMap<NodeId, Node>>,
     207              : 
     208              :     safekeepers: Arc<HashMap<NodeId, Safekeeper>>,
     209              : 
     210              :     safekeeper_reconcilers: SafekeeperReconcilers,
     211              : 
     212              :     scheduler: Scheduler,
     213              : 
     214              :     /// Ongoing background operation on the cluster if any is running.
     215              :     /// Note that only one such operation may run at any given time,
     216              :     /// hence the type choice.
     217              :     ongoing_operation: Option<OperationHandler>,
     218              : 
     219              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     220              :     delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     221              : }
     222              : 
     223              : /// Transform an error from a pageserver into an error to return to callers of a storage
     224              : /// controller API.
     225            0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
     226            0 :     match e {
     227            0 :         mgmt_api::Error::SendRequest(e) => {
     228            0 :             // Presume errors sending requests are connectivity/availability issues
     229            0 :             ApiError::ResourceUnavailable(format!("{node} error sending request: {e}").into())
     230              :         }
     231            0 :         mgmt_api::Error::ReceiveErrorBody(str) => {
     232            0 :             // Presume errors receiving body are connectivity/availability issues
     233            0 :             ApiError::ResourceUnavailable(
     234            0 :                 format!("{node} error receiving error body: {str}").into(),
     235            0 :             )
     236              :         }
     237            0 :         mgmt_api::Error::ReceiveBody(err) if err.is_decode() => {
     238            0 :             // Return 500 for decoding errors.
     239            0 :             ApiError::InternalServerError(anyhow::Error::from(err).context("error decoding body"))
     240              :         }
     241            0 :         mgmt_api::Error::ReceiveBody(err) => {
     242            0 :             // Presume errors receiving body are connectivity/availability issues except for decoding errors
     243            0 :             let src_str = err.source().map(|e| e.to_string()).unwrap_or_default();
     244            0 :             ApiError::ResourceUnavailable(
     245            0 :                 format!("{node} error receiving error body: {err} {}", src_str).into(),
     246            0 :             )
     247              :         }
     248            0 :         mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
     249            0 :             ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
     250              :         }
     251            0 :         mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
     252            0 :             ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
     253              :         }
     254            0 :         mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
     255            0 :         | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
     256              :             // Auth errors talking to a pageserver are not auth errors for the caller: they are
     257              :             // internal server errors, showing that something is wrong with the pageserver or
     258              :             // storage controller's auth configuration.
     259            0 :             ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
     260              :         }
     261            0 :         mgmt_api::Error::ApiError(status @ StatusCode::TOO_MANY_REQUESTS, msg) => {
     262            0 :             // Pass through 429 errors: if pageserver is asking us to wait + retry, we in
     263            0 :             // turn ask our clients to wait + retry
     264            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     265              :         }
     266            0 :         mgmt_api::Error::ApiError(status, msg) => {
     267            0 :             // Presume general case of pageserver API errors is that we tried to do something
     268            0 :             // that can't be done right now.
     269            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     270              :         }
     271            0 :         mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
     272            0 :         mgmt_api::Error::Timeout(e) => ApiError::Timeout(e.into()),
     273              :     }
     274            0 : }
     275              : 
     276              : impl ServiceState {
     277            0 :     fn new(
     278            0 :         nodes: HashMap<NodeId, Node>,
     279            0 :         safekeepers: HashMap<NodeId, Safekeeper>,
     280            0 :         tenants: BTreeMap<TenantShardId, TenantShard>,
     281            0 :         scheduler: Scheduler,
     282            0 :         delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     283            0 :         initial_leadership_status: LeadershipStatus,
     284            0 :         reconcilers_cancel: CancellationToken,
     285            0 :     ) -> Self {
     286            0 :         metrics::update_leadership_status(initial_leadership_status);
     287            0 : 
     288            0 :         Self {
     289            0 :             leadership_status: initial_leadership_status,
     290            0 :             tenants,
     291            0 :             nodes: Arc::new(nodes),
     292            0 :             safekeepers: Arc::new(safekeepers),
     293            0 :             safekeeper_reconcilers: SafekeeperReconcilers::new(reconcilers_cancel),
     294            0 :             scheduler,
     295            0 :             ongoing_operation: None,
     296            0 :             delayed_reconcile_rx,
     297            0 :         }
     298            0 :     }
     299              : 
     300            0 :     fn parts_mut(
     301            0 :         &mut self,
     302            0 :     ) -> (
     303            0 :         &mut Arc<HashMap<NodeId, Node>>,
     304            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     305            0 :         &mut Scheduler,
     306            0 :     ) {
     307            0 :         (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
     308            0 :     }
     309              : 
     310              :     #[allow(clippy::type_complexity)]
     311            0 :     fn parts_mut_sk(
     312            0 :         &mut self,
     313            0 :     ) -> (
     314            0 :         &mut Arc<HashMap<NodeId, Node>>,
     315            0 :         &mut Arc<HashMap<NodeId, Safekeeper>>,
     316            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     317            0 :         &mut Scheduler,
     318            0 :     ) {
     319            0 :         (
     320            0 :             &mut self.nodes,
     321            0 :             &mut self.safekeepers,
     322            0 :             &mut self.tenants,
     323            0 :             &mut self.scheduler,
     324            0 :         )
     325            0 :     }
     326              : 
     327            0 :     fn get_leadership_status(&self) -> LeadershipStatus {
     328            0 :         self.leadership_status
     329            0 :     }
     330              : 
     331            0 :     fn step_down(&mut self) {
     332            0 :         self.leadership_status = LeadershipStatus::SteppedDown;
     333            0 :         metrics::update_leadership_status(self.leadership_status);
     334            0 :     }
     335              : 
     336            0 :     fn become_leader(&mut self) {
     337            0 :         self.leadership_status = LeadershipStatus::Leader;
     338            0 :         metrics::update_leadership_status(self.leadership_status);
     339            0 :     }
     340              : }
     341              : 
     342              : #[derive(Clone)]
     343              : pub struct Config {
     344              :     // All pageservers managed by one instance of this service must have
     345              :     // the same public key.  This JWT token will be used to authenticate
     346              :     // this service to the pageservers it manages.
     347              :     pub pageserver_jwt_token: Option<String>,
     348              : 
     349              :     // All safekeepers managed by one instance of this service must have
     350              :     // the same public key. This JWT token will be used to authenticate
     351              :     // this service to the safekeepers it manages.
     352              :     pub safekeeper_jwt_token: Option<String>,
     353              : 
     354              :     // This JWT token will be used to authenticate this service to the control plane.
     355              :     pub control_plane_jwt_token: Option<String>,
     356              : 
     357              :     // This JWT token will be used to authenticate with other storage controller instances
     358              :     pub peer_jwt_token: Option<String>,
     359              : 
     360              :     /// Where the compute hook should send notifications of pageserver attachment locations
     361              :     /// (this URL points to the control plane in prod). If this is None, the compute hook will
     362              :     /// assume it is running in a test environment and try to update neon_local.
     363              :     pub compute_hook_url: Option<String>,
     364              : 
     365              :     /// Prefix for storage API endpoints of the control plane. We use this prefix to compute
     366              :     /// URLs that we use to send pageserver and safekeeper attachment locations.
     367              :     /// If this is None, the compute hook will assume it is running in a test environment
     368              :     /// and try to invoke neon_local instead.
     369              :     ///
     370              :     /// For now, there is also `compute_hook_url` which allows configuration of the pageserver
     371              :     /// specific endpoint, but it is in the process of being phased out.
     372              :     pub control_plane_url: Option<String>,
     373              : 
     374              :     /// Grace period within which a pageserver does not respond to heartbeats, but is still
     375              :     /// considered active. Once the grace period elapses, the next heartbeat failure will
     376              :     /// mark the pagseserver offline.
     377              :     pub max_offline_interval: Duration,
     378              : 
     379              :     /// Extended grace period within which pageserver may not respond to heartbeats.
     380              :     /// This extended grace period kicks in after the node has been drained for restart
     381              :     /// and/or upon handling the re-attach request from a node.
     382              :     pub max_warming_up_interval: Duration,
     383              : 
     384              :     /// How many normal-priority Reconcilers may be spawned concurrently
     385              :     pub reconciler_concurrency: usize,
     386              : 
     387              :     /// How many high-priority Reconcilers may be spawned concurrently
     388              :     pub priority_reconciler_concurrency: usize,
     389              : 
     390              :     /// How many API requests per second to allow per tenant, across all
     391              :     /// tenant-scoped API endpoints. Further API requests queue until ready.
     392              :     pub tenant_rate_limit: NonZeroU32,
     393              : 
     394              :     /// If a tenant shard's largest timeline (max_logical_size) exceeds this value, all tenant
     395              :     /// shards will be split in 2 until they fall below split_threshold (up to max_split_shards).
     396              :     ///
     397              :     /// This will greedily split into as many shards as necessary to fall below split_threshold, as
     398              :     /// powers of 2: if a tenant shard is 7 times larger than split_threshold, it will split into 8
     399              :     /// immediately, rather than first 2 then 4 then 8.
     400              :     ///
     401              :     /// None or 0 disables auto-splitting.
     402              :     ///
     403              :     /// TODO: consider using total logical size of all timelines instead.
     404              :     pub split_threshold: Option<u64>,
     405              : 
     406              :     /// The maximum number of shards a tenant can be split into during autosplits. Does not affect
     407              :     /// manual split requests. 0 or 1 disables autosplits, as we already have 1 shard.
     408              :     pub max_split_shards: u8,
     409              : 
     410              :     /// The size at which an unsharded tenant should initially split. Ingestion is significantly
     411              :     /// faster with multiple shards, so eagerly splitting below split_threshold will typically speed
     412              :     /// up initial ingestion of large tenants.
     413              :     ///
     414              :     /// This should be below split_threshold, but it is not required. If both split_threshold and
     415              :     /// initial_split_threshold qualify, the largest number of target shards will be used.
     416              :     ///
     417              :     /// Does not apply to already sharded tenants: changing initial_split_threshold or
     418              :     /// initial_split_shards is not retroactive for already-sharded tenants.
     419              :     ///
     420              :     /// None or 0 disables initial splits.
     421              :     pub initial_split_threshold: Option<u64>,
     422              : 
     423              :     /// The number of shards to split into when reaching initial_split_threshold. Will
     424              :     /// be clamped to max_split_shards.
     425              :     ///
     426              :     /// 0 or 1 disables initial splits. Has no effect if initial_split_threshold is disabled.
     427              :     pub initial_split_shards: u8,
     428              : 
     429              :     // TODO: make this cfg(feature  = "testing")
     430              :     pub neon_local_repo_dir: Option<PathBuf>,
     431              : 
     432              :     // Maximum acceptable download lag for the secondary location
     433              :     // while draining a node. If the secondary location is lagging
     434              :     // by more than the configured amount, then the secondary is not
     435              :     // upgraded to primary.
     436              :     pub max_secondary_lag_bytes: Option<u64>,
     437              : 
     438              :     pub heartbeat_interval: Duration,
     439              : 
     440              :     pub address_for_peers: Option<Uri>,
     441              : 
     442              :     pub start_as_candidate: bool,
     443              : 
     444              :     pub long_reconcile_threshold: Duration,
     445              : 
     446              :     pub use_https_pageserver_api: bool,
     447              : 
     448              :     pub use_https_safekeeper_api: bool,
     449              : 
     450              :     pub ssl_ca_certs: Vec<Certificate>,
     451              : 
     452              :     pub timelines_onto_safekeepers: bool,
     453              : 
     454              :     pub use_local_compute_notifications: bool,
     455              : }
     456              : 
     457              : impl From<DatabaseError> for ApiError {
     458            0 :     fn from(err: DatabaseError) -> ApiError {
     459            0 :         match err {
     460            0 :             DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
     461              :             // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
     462              :             DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
     463            0 :                 ApiError::ShuttingDown
     464              :             }
     465            0 :             DatabaseError::Logical(reason) | DatabaseError::Migration(reason) => {
     466            0 :                 ApiError::InternalServerError(anyhow::anyhow!(reason))
     467              :             }
     468              :         }
     469            0 :     }
     470              : }
     471              : 
     472              : enum InitialShardScheduleOutcome {
     473              :     Scheduled(TenantCreateResponseShard),
     474              :     NotScheduled,
     475              :     ShardScheduleError(ScheduleError),
     476              : }
     477              : 
     478              : pub struct Service {
     479              :     inner: Arc<std::sync::RwLock<ServiceState>>,
     480              :     config: Config,
     481              :     persistence: Arc<Persistence>,
     482              :     compute_hook: Arc<ComputeHook>,
     483              :     result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResultRequest>,
     484              : 
     485              :     heartbeater_ps: Heartbeater<Node, PageserverState>,
     486              :     heartbeater_sk: Heartbeater<Safekeeper, SafekeeperState>,
     487              : 
     488              :     // Channel for background cleanup from failed operations that require cleanup, such as shard split
     489              :     abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
     490              : 
     491              :     // Locking on a tenant granularity (covers all shards in the tenant):
     492              :     // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
     493              :     // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
     494              :     tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
     495              : 
     496              :     // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
     497              :     // that transition it to/from Active.
     498              :     node_op_locks: IdLockMap<NodeId, NodeOperations>,
     499              : 
     500              :     // Limit how many Reconcilers we will spawn concurrently for normal-priority tasks such as background reconciliations
     501              :     // and reconciliation on startup.
     502              :     reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     503              : 
     504              :     // Limit how many Reconcilers we will spawn concurrently for high-priority tasks such as tenant/timeline CRUD, which
     505              :     // a human user might be waiting for.
     506              :     priority_reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     507              : 
     508              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     509              :     /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
     510              :     ///
     511              :     /// Note that this state logically lives inside ServiceState, but carrying Sender here makes the code simpler
     512              :     /// by avoiding needing a &mut ref to something inside the ServiceState.  This could be optimized to
     513              :     /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
     514              :     delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
     515              : 
     516              :     // Process shutdown will fire this token
     517              :     cancel: CancellationToken,
     518              : 
     519              :     // Child token of [`Service::cancel`] used by reconcilers
     520              :     reconcilers_cancel: CancellationToken,
     521              : 
     522              :     // Background tasks will hold this gate
     523              :     gate: Gate,
     524              : 
     525              :     // Reconcilers background tasks will hold this gate
     526              :     reconcilers_gate: Gate,
     527              : 
     528              :     /// This waits for initial reconciliation with pageservers to complete.  Until this barrier
     529              :     /// passes, it isn't safe to do any actions that mutate tenants.
     530              :     pub(crate) startup_complete: Barrier,
     531              : 
     532              :     /// HTTP client with proper CA certs.
     533              :     http_client: reqwest::Client,
     534              : }
     535              : 
     536              : impl From<ReconcileWaitError> for ApiError {
     537            0 :     fn from(value: ReconcileWaitError) -> Self {
     538            0 :         match value {
     539            0 :             ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
     540            0 :             e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
     541            0 :             e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
     542              :         }
     543            0 :     }
     544              : }
     545              : 
     546              : impl From<OperationError> for ApiError {
     547            0 :     fn from(value: OperationError) -> Self {
     548            0 :         match value {
     549            0 :             OperationError::NodeStateChanged(err) | OperationError::FinalizeError(err) => {
     550            0 :                 ApiError::InternalServerError(anyhow::anyhow!(err))
     551              :             }
     552            0 :             OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()),
     553              :         }
     554            0 :     }
     555              : }
     556              : 
     557              : #[allow(clippy::large_enum_variant)]
     558              : enum TenantCreateOrUpdate {
     559              :     Create(TenantCreateRequest),
     560              :     Update(Vec<ShardUpdate>),
     561              : }
     562              : 
     563              : struct ShardSplitParams {
     564              :     old_shard_count: ShardCount,
     565              :     new_shard_count: ShardCount,
     566              :     new_stripe_size: Option<ShardStripeSize>,
     567              :     targets: Vec<ShardSplitTarget>,
     568              :     policy: PlacementPolicy,
     569              :     config: TenantConfig,
     570              :     shard_ident: ShardIdentity,
     571              :     preferred_az_id: Option<AvailabilityZone>,
     572              : }
     573              : 
     574              : // When preparing for a shard split, we may either choose to proceed with the split,
     575              : // or find that the work is already done and return NoOp.
     576              : enum ShardSplitAction {
     577              :     Split(Box<ShardSplitParams>),
     578              :     NoOp(TenantShardSplitResponse),
     579              : }
     580              : 
     581              : // A parent shard which will be split
     582              : struct ShardSplitTarget {
     583              :     parent_id: TenantShardId,
     584              :     node: Node,
     585              :     child_ids: Vec<TenantShardId>,
     586              : }
     587              : 
     588              : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
     589              : /// might not be available.  We therefore use a queue of abort operations processed in the background.
     590              : struct TenantShardSplitAbort {
     591              :     tenant_id: TenantId,
     592              :     /// The target values from the request that failed
     593              :     new_shard_count: ShardCount,
     594              :     new_stripe_size: Option<ShardStripeSize>,
     595              :     /// Until this abort op is complete, no other operations may be done on the tenant
     596              :     _tenant_lock: TracingExclusiveGuard<TenantOperations>,
     597              :     /// The reconciler gate for the duration of the split operation, and any included abort.
     598              :     _gate: GateGuard,
     599              : }
     600              : 
     601              : #[derive(thiserror::Error, Debug)]
     602              : enum TenantShardSplitAbortError {
     603              :     #[error(transparent)]
     604              :     Database(#[from] DatabaseError),
     605              :     #[error(transparent)]
     606              :     Remote(#[from] mgmt_api::Error),
     607              :     #[error("Unavailable")]
     608              :     Unavailable,
     609              : }
     610              : 
     611              : /// Inputs for computing a target shard count for a tenant.
     612              : struct ShardSplitInputs {
     613              :     /// Current shard count.
     614              :     shard_count: ShardCount,
     615              :     /// Total size of largest timeline summed across all shards.
     616              :     max_logical_size: u64,
     617              :     /// Size-based split threshold. Zero if size-based splits are disabled.
     618              :     split_threshold: u64,
     619              :     /// Upper bound on target shards. 0 or 1 disables splits.
     620              :     max_split_shards: u8,
     621              :     /// Initial split threshold. Zero if initial splits are disabled.
     622              :     initial_split_threshold: u64,
     623              :     /// Number of shards for initial splits. 0 or 1 disables initial splits.
     624              :     initial_split_shards: u8,
     625              : }
     626              : 
     627              : struct ShardUpdate {
     628              :     tenant_shard_id: TenantShardId,
     629              :     placement_policy: PlacementPolicy,
     630              :     tenant_config: TenantConfig,
     631              : 
     632              :     /// If this is None, generation is not updated.
     633              :     generation: Option<Generation>,
     634              : 
     635              :     /// If this is None, scheduling policy is not updated.
     636              :     scheduling_policy: Option<ShardSchedulingPolicy>,
     637              : }
     638              : 
     639              : enum StopReconciliationsReason {
     640              :     ShuttingDown,
     641              :     SteppingDown,
     642              : }
     643              : 
     644              : impl std::fmt::Display for StopReconciliationsReason {
     645            0 :     fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result {
     646            0 :         let s = match self {
     647            0 :             Self::ShuttingDown => "Shutting down",
     648            0 :             Self::SteppingDown => "Stepping down",
     649              :         };
     650            0 :         write!(writer, "{}", s)
     651            0 :     }
     652              : }
     653              : 
     654              : pub(crate) enum ReconcileResultRequest {
     655              :     ReconcileResult(ReconcileResult),
     656              :     Stop,
     657              : }
     658              : 
     659              : #[derive(Clone)]
     660              : struct MutationLocation {
     661              :     node: Node,
     662              :     generation: Generation,
     663              : }
     664              : 
     665              : #[derive(Clone)]
     666              : struct ShardMutationLocations {
     667              :     latest: MutationLocation,
     668              :     other: Vec<MutationLocation>,
     669              : }
     670              : 
     671              : #[derive(Default, Clone)]
     672              : struct TenantMutationLocations(BTreeMap<TenantShardId, ShardMutationLocations>);
     673              : 
     674              : impl Service {
     675            0 :     pub fn get_config(&self) -> &Config {
     676            0 :         &self.config
     677            0 :     }
     678              : 
     679            0 :     pub fn get_http_client(&self) -> &reqwest::Client {
     680            0 :         &self.http_client
     681            0 :     }
     682              : 
     683              :     /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
     684              :     /// view of the world, and determine which pageservers are responsive.
     685              :     #[instrument(skip_all)]
     686              :     async fn startup_reconcile(
     687              :         self: &Arc<Service>,
     688              :         current_leader: Option<ControllerPersistence>,
     689              :         leader_step_down_state: Option<GlobalObservedState>,
     690              :         bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
     691              :             Result<(), (TenantShardId, NotifyError)>,
     692              :         >,
     693              :     ) {
     694              :         // Startup reconciliation does I/O to other services: whether they
     695              :         // are responsive or not, we should aim to finish within our deadline, because:
     696              :         // - If we don't, a k8s readiness hook watching /ready will kill us.
     697              :         // - While we're waiting for startup reconciliation, we are not fully
     698              :         //   available for end user operations like creating/deleting tenants and timelines.
     699              :         //
     700              :         // We set multiple deadlines to break up the time available between the phases of work: this is
     701              :         // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
     702              :         let start_at = Instant::now();
     703              :         let node_scan_deadline = start_at
     704              :             .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
     705              :             .expect("Reconcile timeout is a modest constant");
     706              : 
     707              :         let observed = if let Some(state) = leader_step_down_state {
     708              :             tracing::info!(
     709              :                 "Using observed state received from leader at {}",
     710              :                 current_leader.as_ref().unwrap().address
     711              :             );
     712              : 
     713              :             state
     714              :         } else {
     715              :             self.build_global_observed_state(node_scan_deadline).await
     716              :         };
     717              : 
     718              :         // Accumulate a list of any tenant locations that ought to be detached
     719              :         let mut cleanup = Vec::new();
     720              : 
     721              :         // Send initial heartbeat requests to all nodes loaded from the database
     722              :         let all_nodes = {
     723              :             let locked = self.inner.read().unwrap();
     724              :             locked.nodes.clone()
     725              :         };
     726              :         let (mut nodes_online, mut sks_online) =
     727              :             self.initial_heartbeat_round(all_nodes.keys()).await;
     728              : 
     729              :         // List of tenants for which we will attempt to notify compute of their location at startup
     730              :         let mut compute_notifications = Vec::new();
     731              : 
     732              :         // Populate intent and observed states for all tenants, based on reported state on pageservers
     733              :         tracing::info!("Populating tenant shards' states from initial pageserver scan...");
     734              :         let shard_count = {
     735              :             let mut locked = self.inner.write().unwrap();
     736              :             let (nodes, safekeepers, tenants, scheduler) = locked.parts_mut_sk();
     737              : 
     738              :             // Mark nodes online if they responded to us: nodes are offline by default after a restart.
     739              :             let mut new_nodes = (**nodes).clone();
     740              :             for (node_id, node) in new_nodes.iter_mut() {
     741              :                 if let Some(utilization) = nodes_online.remove(node_id) {
     742              :                     node.set_availability(NodeAvailability::Active(utilization));
     743              :                     scheduler.node_upsert(node);
     744              :                 }
     745              :             }
     746              :             *nodes = Arc::new(new_nodes);
     747              : 
     748              :             let mut new_sks = (**safekeepers).clone();
     749              :             for (node_id, node) in new_sks.iter_mut() {
     750              :                 if let Some((utilization, last_seen_at)) = sks_online.remove(node_id) {
     751              :                     node.set_availability(SafekeeperState::Available {
     752              :                         utilization,
     753              :                         last_seen_at,
     754              :                     });
     755              :                 }
     756              :             }
     757              :             *safekeepers = Arc::new(new_sks);
     758              : 
     759              :             for (tenant_shard_id, observed_state) in observed.0 {
     760              :                 let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
     761              :                     for node_id in observed_state.locations.keys() {
     762              :                         cleanup.push((tenant_shard_id, *node_id));
     763              :                     }
     764              : 
     765              :                     continue;
     766              :                 };
     767              : 
     768              :                 tenant_shard.observed = observed_state;
     769              :             }
     770              : 
     771              :             // Populate each tenant's intent state
     772              :             let mut schedule_context = ScheduleContext::default();
     773              :             for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
     774              :                 if tenant_shard_id.shard_number == ShardNumber(0) {
     775              :                     // Reset scheduling context each time we advance to the next Tenant
     776              :                     schedule_context = ScheduleContext::default();
     777              :                 }
     778              : 
     779              :                 tenant_shard.intent_from_observed(scheduler);
     780              :                 if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
     781              :                     // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
     782              :                     // not enough pageservers are available.  The tenant may well still be available
     783              :                     // to clients.
     784              :                     tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
     785              :                 } else {
     786              :                     // If we're both intending and observed to be attached at a particular node, we will
     787              :                     // emit a compute notification for this. In the case where our observed state does not
     788              :                     // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
     789              :                     if let Some(attached_at) = tenant_shard.stably_attached() {
     790              :                         compute_notifications.push(compute_hook::ShardUpdate {
     791              :                             tenant_shard_id: *tenant_shard_id,
     792              :                             node_id: attached_at,
     793              :                             stripe_size: tenant_shard.shard.stripe_size,
     794              :                             preferred_az: tenant_shard
     795              :                                 .preferred_az()
     796            0 :                                 .map(|az| Cow::Owned(az.clone())),
     797              :                         });
     798              :                     }
     799              :                 }
     800              :             }
     801              : 
     802              :             tenants.len()
     803              :         };
     804              : 
     805              :         // Before making any obeservable changes to the cluster, persist self
     806              :         // as leader in database and memory.
     807              :         let leadership = Leadership::new(
     808              :             self.persistence.clone(),
     809              :             self.config.clone(),
     810              :             self.cancel.child_token(),
     811              :         );
     812              : 
     813              :         if let Err(e) = leadership.become_leader(current_leader).await {
     814              :             tracing::error!("Failed to persist self as leader: {e}. Aborting start-up ...");
     815              :             std::process::exit(1);
     816              :         }
     817              : 
     818              :         let safekeepers = self.inner.read().unwrap().safekeepers.clone();
     819              :         let sk_schedule_requests =
     820              :             match safekeeper_reconciler::load_schedule_requests(self, &safekeepers).await {
     821              :                 Ok(v) => v,
     822              :                 Err(e) => {
     823              :                     tracing::warn!(
     824              :                         "Failed to load safekeeper pending ops at startup: {e}." // Don't abort for now: " Aborting start-up..."
     825              :                     );
     826              :                     // std::process::exit(1);
     827              :                     Vec::new()
     828              :                 }
     829              :             };
     830              : 
     831              :         {
     832              :             let mut locked = self.inner.write().unwrap();
     833              :             locked.become_leader();
     834              : 
     835              :             locked
     836              :                 .safekeeper_reconcilers
     837              :                 .schedule_request_vec(self, sk_schedule_requests);
     838              :         }
     839              : 
     840              :         // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
     841              :         // generation_pageserver in the database.
     842              : 
     843              :         // Emit compute hook notifications for all tenants which are already stably attached.  Other tenants
     844              :         // will emit compute hook notifications when they reconcile.
     845              :         //
     846              :         // Ordering: our calls to notify_background synchronously establish a relative order for these notifications vs. any later
     847              :         // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
     848              :         // calls will be correctly ordered wrt these.
     849              :         //
     850              :         // Concurrency: we call notify_background for all tenants, which will create O(N) tokio tasks, but almost all of them
     851              :         // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
     852              :         // unit and start doing I/O.
     853              :         tracing::info!(
     854              :             "Sending {} compute notifications",
     855              :             compute_notifications.len()
     856              :         );
     857              :         self.compute_hook.notify_background(
     858              :             compute_notifications,
     859              :             bg_compute_notify_result_tx.clone(),
     860              :             &self.cancel,
     861              :         );
     862              : 
     863              :         // Finally, now that the service is up and running, launch reconcile operations for any tenants
     864              :         // which require it: under normal circumstances this should only include tenants that were in some
     865              :         // transient state before we restarted, or any tenants whose compute hooks failed above.
     866              :         tracing::info!("Checking for shards in need of reconciliation...");
     867              :         let reconcile_tasks = self.reconcile_all();
     868              :         // We will not wait for these reconciliation tasks to run here: we're now done with startup and
     869              :         // normal operations may proceed.
     870              : 
     871              :         // Clean up any tenants that were found on pageservers but are not known to us.  Do this in the
     872              :         // background because it does not need to complete in order to proceed with other work.
     873              :         if !cleanup.is_empty() {
     874              :             tracing::info!("Cleaning up {} locations in the background", cleanup.len());
     875              :             tokio::task::spawn({
     876              :                 let cleanup_self = self.clone();
     877            0 :                 async move { cleanup_self.cleanup_locations(cleanup).await }
     878              :             });
     879              :         }
     880              : 
     881              :         tracing::info!(
     882              :             "Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)"
     883              :         );
     884              :     }
     885              : 
     886            0 :     async fn initial_heartbeat_round<'a>(
     887            0 :         &self,
     888            0 :         node_ids: impl Iterator<Item = &'a NodeId>,
     889            0 :     ) -> (
     890            0 :         HashMap<NodeId, PageserverUtilization>,
     891            0 :         HashMap<NodeId, (SafekeeperUtilization, Instant)>,
     892            0 :     ) {
     893            0 :         assert!(!self.startup_complete.is_ready());
     894              : 
     895            0 :         let all_nodes = {
     896            0 :             let locked = self.inner.read().unwrap();
     897            0 :             locked.nodes.clone()
     898            0 :         };
     899            0 : 
     900            0 :         let mut nodes_to_heartbeat = HashMap::new();
     901            0 :         for node_id in node_ids {
     902            0 :             match all_nodes.get(node_id) {
     903            0 :                 Some(node) => {
     904            0 :                     nodes_to_heartbeat.insert(*node_id, node.clone());
     905            0 :                 }
     906              :                 None => {
     907            0 :                     tracing::warn!("Node {node_id} was removed during start-up");
     908              :                 }
     909              :             }
     910              :         }
     911              : 
     912            0 :         let all_sks = {
     913            0 :             let locked = self.inner.read().unwrap();
     914            0 :             locked.safekeepers.clone()
     915            0 :         };
     916            0 : 
     917            0 :         tracing::info!("Sending initial heartbeats...");
     918            0 :         let (res_ps, res_sk) = tokio::join!(
     919            0 :             self.heartbeater_ps.heartbeat(Arc::new(nodes_to_heartbeat)),
     920            0 :             self.heartbeater_sk.heartbeat(all_sks)
     921            0 :         );
     922              : 
     923            0 :         let mut online_nodes = HashMap::new();
     924            0 :         if let Ok(deltas) = res_ps {
     925            0 :             for (node_id, status) in deltas.0 {
     926            0 :                 match status {
     927            0 :                     PageserverState::Available { utilization, .. } => {
     928            0 :                         online_nodes.insert(node_id, utilization);
     929            0 :                     }
     930            0 :                     PageserverState::Offline => {}
     931              :                     PageserverState::WarmingUp { .. } => {
     932            0 :                         unreachable!("Nodes are never marked warming-up during startup reconcile")
     933              :                     }
     934              :                 }
     935              :             }
     936            0 :         }
     937              : 
     938            0 :         let mut online_sks = HashMap::new();
     939            0 :         if let Ok(deltas) = res_sk {
     940            0 :             for (node_id, status) in deltas.0 {
     941            0 :                 match status {
     942              :                     SafekeeperState::Available {
     943            0 :                         utilization,
     944            0 :                         last_seen_at,
     945            0 :                     } => {
     946            0 :                         online_sks.insert(node_id, (utilization, last_seen_at));
     947            0 :                     }
     948            0 :                     SafekeeperState::Offline => {}
     949              :                 }
     950              :             }
     951            0 :         }
     952              : 
     953            0 :         (online_nodes, online_sks)
     954            0 :     }
     955              : 
     956              :     /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
     957              :     ///
     958              :     /// The result includes only nodes which responded within the deadline
     959            0 :     async fn scan_node_locations(
     960            0 :         &self,
     961            0 :         deadline: Instant,
     962            0 :     ) -> HashMap<NodeId, LocationConfigListResponse> {
     963            0 :         let nodes = {
     964            0 :             let locked = self.inner.read().unwrap();
     965            0 :             locked.nodes.clone()
     966            0 :         };
     967            0 : 
     968            0 :         let mut node_results = HashMap::new();
     969            0 : 
     970            0 :         let mut node_list_futs = FuturesUnordered::new();
     971            0 : 
     972            0 :         tracing::info!("Scanning shards on {} nodes...", nodes.len());
     973            0 :         for node in nodes.values() {
     974            0 :             node_list_futs.push({
     975            0 :                 async move {
     976            0 :                     tracing::info!("Scanning shards on node {node}...");
     977            0 :                     let timeout = Duration::from_secs(5);
     978            0 :                     let response = node
     979            0 :                         .with_client_retries(
     980            0 :                             |client| async move { client.list_location_config().await },
     981            0 :                             &self.http_client,
     982            0 :                             &self.config.pageserver_jwt_token,
     983            0 :                             1,
     984            0 :                             5,
     985            0 :                             timeout,
     986            0 :                             &self.cancel,
     987            0 :                         )
     988            0 :                         .await;
     989            0 :                     (node.get_id(), response)
     990            0 :                 }
     991            0 :             });
     992            0 :         }
     993              : 
     994              :         loop {
     995            0 :             let (node_id, result) = tokio::select! {
     996            0 :                 next = node_list_futs.next() => {
     997            0 :                     match next {
     998            0 :                         Some(result) => result,
     999              :                         None =>{
    1000              :                             // We got results for all our nodes
    1001            0 :                             break;
    1002              :                         }
    1003              : 
    1004              :                     }
    1005              :                 },
    1006            0 :                 _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
    1007              :                     // Give up waiting for anyone who hasn't responded: we will yield the results that we have
    1008            0 :                     tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
    1009            0 :                     break;
    1010              :                 }
    1011              :             };
    1012              : 
    1013            0 :             let Some(list_response) = result else {
    1014            0 :                 tracing::info!("Shutdown during startup_reconcile");
    1015            0 :                 break;
    1016              :             };
    1017              : 
    1018            0 :             match list_response {
    1019            0 :                 Err(e) => {
    1020            0 :                     tracing::warn!("Could not scan node {} ({e})", node_id);
    1021              :                 }
    1022            0 :                 Ok(listing) => {
    1023            0 :                     node_results.insert(node_id, listing);
    1024            0 :                 }
    1025              :             }
    1026              :         }
    1027              : 
    1028            0 :         node_results
    1029            0 :     }
    1030              : 
    1031            0 :     async fn build_global_observed_state(&self, deadline: Instant) -> GlobalObservedState {
    1032            0 :         let node_listings = self.scan_node_locations(deadline).await;
    1033            0 :         let mut observed = GlobalObservedState::default();
    1034              : 
    1035            0 :         for (node_id, location_confs) in node_listings {
    1036            0 :             tracing::info!(
    1037            0 :                 "Received {} shard statuses from pageserver {}",
    1038            0 :                 location_confs.tenant_shards.len(),
    1039              :                 node_id
    1040              :             );
    1041              : 
    1042            0 :             for (tid, location_conf) in location_confs.tenant_shards {
    1043            0 :                 let entry = observed.0.entry(tid).or_default();
    1044            0 :                 entry.locations.insert(
    1045            0 :                     node_id,
    1046            0 :                     ObservedStateLocation {
    1047            0 :                         conf: location_conf,
    1048            0 :                     },
    1049            0 :                 );
    1050            0 :             }
    1051              :         }
    1052              : 
    1053            0 :         observed
    1054            0 :     }
    1055              : 
    1056              :     /// Used during [`Self::startup_reconcile`]: detach a list of unknown-to-us tenants from pageservers.
    1057              :     ///
    1058              :     /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
    1059              :     /// tenants, then it is probably something incompletely deleted before: we will not fight with any
    1060              :     /// other task trying to attach it.
    1061              :     #[instrument(skip_all)]
    1062              :     async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
    1063              :         let nodes = self.inner.read().unwrap().nodes.clone();
    1064              : 
    1065              :         for (tenant_shard_id, node_id) in cleanup {
    1066              :             // A node reported a tenant_shard_id which is unknown to us: detach it.
    1067              :             let Some(node) = nodes.get(&node_id) else {
    1068              :                 // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
    1069              :                 // a location to clean up on a node that has since been removed.
    1070              :                 tracing::info!(
    1071              :                     "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
    1072              :                 );
    1073              :                 continue;
    1074              :             };
    1075              : 
    1076              :             if self.cancel.is_cancelled() {
    1077              :                 break;
    1078              :             }
    1079              : 
    1080              :             let client = PageserverClient::new(
    1081              :                 node.get_id(),
    1082              :                 self.http_client.clone(),
    1083              :                 node.base_url(),
    1084              :                 self.config.pageserver_jwt_token.as_deref(),
    1085              :             );
    1086              :             match client
    1087              :                 .location_config(
    1088              :                     tenant_shard_id,
    1089              :                     LocationConfig {
    1090              :                         mode: LocationConfigMode::Detached,
    1091              :                         generation: None,
    1092              :                         secondary_conf: None,
    1093              :                         shard_number: tenant_shard_id.shard_number.0,
    1094              :                         shard_count: tenant_shard_id.shard_count.literal(),
    1095              :                         shard_stripe_size: 0,
    1096              :                         tenant_conf: models::TenantConfig::default(),
    1097              :                     },
    1098              :                     None,
    1099              :                     false,
    1100              :                 )
    1101              :                 .await
    1102              :             {
    1103              :                 Ok(()) => {
    1104              :                     tracing::info!(
    1105              :                         "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
    1106              :                     );
    1107              :                 }
    1108              :                 Err(e) => {
    1109              :                     // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
    1110              :                     // break anything.
    1111              :                     tracing::error!(
    1112              :                         "Failed to detach unknown shard {tenant_shard_id} on pageserver {node_id}: {e}"
    1113              :                     );
    1114              :                 }
    1115              :             }
    1116              :         }
    1117              :     }
    1118              : 
    1119              :     /// Long running background task that periodically wakes up and looks for shards that need
    1120              :     /// reconciliation.  Reconciliation is fallible, so any reconciliation tasks that fail during
    1121              :     /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
    1122              :     /// for those retries.
    1123              :     #[instrument(skip_all)]
    1124              :     async fn background_reconcile(self: &Arc<Self>) {
    1125              :         self.startup_complete.clone().wait().await;
    1126              : 
    1127              :         const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
    1128              :         let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
    1129              :         while !self.reconcilers_cancel.is_cancelled() {
    1130              :             tokio::select! {
    1131              :               _ = interval.tick() => {
    1132              :                 let reconciles_spawned = self.reconcile_all();
    1133              :                 if reconciles_spawned == 0 {
    1134              :                     // Run optimizer only when we didn't find any other work to do
    1135              :                     self.optimize_all().await;
    1136              :                 }
    1137              :                 // Always attempt autosplits. Sharding is crucial for bulk ingest performance, so we
    1138              :                 // must be responsive when new projects begin ingesting and reach the threshold.
    1139              :                 self.autosplit_tenants().await;
    1140              :             }
    1141              :               _ = self.reconcilers_cancel.cancelled() => return
    1142              :             }
    1143              :         }
    1144              :     }
    1145              :     /// Heartbeat all storage nodes once in a while.
    1146              :     #[instrument(skip_all)]
    1147              :     async fn spawn_heartbeat_driver(&self) {
    1148              :         self.startup_complete.clone().wait().await;
    1149              : 
    1150              :         let mut interval = tokio::time::interval(self.config.heartbeat_interval);
    1151              :         while !self.cancel.is_cancelled() {
    1152              :             tokio::select! {
    1153              :               _ = interval.tick() => { }
    1154              :               _ = self.cancel.cancelled() => return
    1155              :             };
    1156              : 
    1157              :             let nodes = {
    1158              :                 let locked = self.inner.read().unwrap();
    1159              :                 locked.nodes.clone()
    1160              :             };
    1161              : 
    1162              :             let safekeepers = {
    1163              :                 let locked = self.inner.read().unwrap();
    1164              :                 locked.safekeepers.clone()
    1165              :             };
    1166              : 
    1167              :             let (res_ps, res_sk) = tokio::join!(
    1168              :                 self.heartbeater_ps.heartbeat(nodes),
    1169              :                 self.heartbeater_sk.heartbeat(safekeepers)
    1170              :             );
    1171              : 
    1172              :             if let Ok(deltas) = res_ps {
    1173              :                 let mut to_handle = Vec::default();
    1174              : 
    1175              :                 for (node_id, state) in deltas.0 {
    1176              :                     let new_availability = match state {
    1177              :                         PageserverState::Available { utilization, .. } => {
    1178              :                             NodeAvailability::Active(utilization)
    1179              :                         }
    1180              :                         PageserverState::WarmingUp { started_at } => {
    1181              :                             NodeAvailability::WarmingUp(started_at)
    1182              :                         }
    1183              :                         PageserverState::Offline => {
    1184              :                             // The node might have been placed in the WarmingUp state
    1185              :                             // while the heartbeat round was on-going. Hence, filter out
    1186              :                             // offline transitions for WarmingUp nodes that are still within
    1187              :                             // their grace period.
    1188              :                             if let Ok(NodeAvailability::WarmingUp(started_at)) = self
    1189              :                                 .get_node(node_id)
    1190              :                                 .await
    1191              :                                 .as_ref()
    1192            0 :                                 .map(|n| n.get_availability())
    1193              :                             {
    1194              :                                 let now = Instant::now();
    1195              :                                 if now - *started_at >= self.config.max_warming_up_interval {
    1196              :                                     NodeAvailability::Offline
    1197              :                                 } else {
    1198              :                                     NodeAvailability::WarmingUp(*started_at)
    1199              :                                 }
    1200              :                             } else {
    1201              :                                 NodeAvailability::Offline
    1202              :                             }
    1203              :                         }
    1204              :                     };
    1205              : 
    1206              :                     let node_lock = trace_exclusive_lock(
    1207              :                         &self.node_op_locks,
    1208              :                         node_id,
    1209              :                         NodeOperations::Configure,
    1210              :                     )
    1211              :                     .await;
    1212              : 
    1213              :                     pausable_failpoint!("heartbeat-pre-node-state-configure");
    1214              : 
    1215              :                     // This is the code path for geniune availability transitions (i.e node
    1216              :                     // goes unavailable and/or comes back online).
    1217              :                     let res = self
    1218              :                         .node_state_configure(node_id, Some(new_availability), None, &node_lock)
    1219              :                         .await;
    1220              : 
    1221              :                     match res {
    1222              :                         Ok(transition) => {
    1223              :                             // Keep hold of the lock until the availability transitions
    1224              :                             // have been handled in
    1225              :                             // [`Service::handle_node_availability_transitions`] in order avoid
    1226              :                             // racing with [`Service::external_node_configure`].
    1227              :                             to_handle.push((node_id, node_lock, transition));
    1228              :                         }
    1229              :                         Err(ApiError::NotFound(_)) => {
    1230              :                             // This should be rare, but legitimate since the heartbeats are done
    1231              :                             // on a snapshot of the nodes.
    1232              :                             tracing::info!("Node {} was not found after heartbeat round", node_id);
    1233              :                         }
    1234              :                         Err(ApiError::ShuttingDown) => {
    1235              :                             // No-op: we're shutting down, no need to try and update any nodes' statuses
    1236              :                         }
    1237              :                         Err(err) => {
    1238              :                             // Transition to active involves reconciling: if a node responds to a heartbeat then
    1239              :                             // becomes unavailable again, we may get an error here.
    1240              :                             tracing::error!(
    1241              :                                 "Failed to update node state {} after heartbeat round: {}",
    1242              :                                 node_id,
    1243              :                                 err
    1244              :                             );
    1245              :                         }
    1246              :                     }
    1247              :                 }
    1248              : 
    1249              :                 // We collected all the transitions above and now we handle them.
    1250              :                 let res = self.handle_node_availability_transitions(to_handle).await;
    1251              :                 if let Err(errs) = res {
    1252              :                     for (node_id, err) in errs {
    1253              :                         match err {
    1254              :                             ApiError::NotFound(_) => {
    1255              :                                 // This should be rare, but legitimate since the heartbeats are done
    1256              :                                 // on a snapshot of the nodes.
    1257              :                                 tracing::info!(
    1258              :                                     "Node {} was not found after heartbeat round",
    1259              :                                     node_id
    1260              :                                 );
    1261              :                             }
    1262              :                             err => {
    1263              :                                 tracing::error!(
    1264              :                                     "Failed to handle availability transition for {} after heartbeat round: {}",
    1265              :                                     node_id,
    1266              :                                     err
    1267              :                                 );
    1268              :                             }
    1269              :                         }
    1270              :                     }
    1271              :                 }
    1272              :             }
    1273              :             if let Ok(deltas) = res_sk {
    1274              :                 let mut locked = self.inner.write().unwrap();
    1275              :                 let mut safekeepers = (*locked.safekeepers).clone();
    1276              :                 for (id, state) in deltas.0 {
    1277              :                     let Some(sk) = safekeepers.get_mut(&id) else {
    1278              :                         tracing::info!(
    1279              :                             "Couldn't update safekeeper safekeeper state for id {id} from heartbeat={state:?}"
    1280              :                         );
    1281              :                         continue;
    1282              :                     };
    1283              :                     sk.set_availability(state);
    1284              :                 }
    1285              :                 locked.safekeepers = Arc::new(safekeepers);
    1286              :             }
    1287              :         }
    1288              :     }
    1289              : 
    1290              :     /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
    1291              :     /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
    1292              :     /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
    1293              :     /// will indicate that reconciliation is not needed.
    1294              :     #[instrument(skip_all, fields(
    1295              :         seq=%result.sequence,
    1296              :         tenant_id=%result.tenant_shard_id.tenant_id,
    1297              :         shard_id=%result.tenant_shard_id.shard_slug(),
    1298              :     ))]
    1299              :     fn process_result(&self, result: ReconcileResult) {
    1300              :         let mut locked = self.inner.write().unwrap();
    1301              :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    1302              :         let Some(tenant) = tenants.get_mut(&result.tenant_shard_id) else {
    1303              :             // A reconciliation result might race with removing a tenant: drop results for
    1304              :             // tenants that aren't in our map.
    1305              :             return;
    1306              :         };
    1307              : 
    1308              :         // Usually generation should only be updated via this path, so the max() isn't
    1309              :         // needed, but it is used to handle out-of-band updates via. e.g. test hook.
    1310              :         tenant.generation = std::cmp::max(tenant.generation, result.generation);
    1311              : 
    1312              :         // If the reconciler signals that it failed to notify compute, set this state on
    1313              :         // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
    1314              :         tenant.pending_compute_notification = result.pending_compute_notification;
    1315              : 
    1316              :         // Let the TenantShard know it is idle.
    1317              :         tenant.reconcile_complete(result.sequence);
    1318              : 
    1319              :         // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1320              :         // make to the tenant
    1321            0 :         let deltas = result.observed_deltas.into_iter().flat_map(|delta| {
    1322              :             // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1323              :             // make to the tenant
    1324            0 :             let node = nodes.get(delta.node_id())?;
    1325              : 
    1326            0 :             if node.is_available() {
    1327            0 :                 return Some(delta);
    1328            0 :             }
    1329            0 : 
    1330            0 :             // In case a node became unavailable concurrently with the reconcile, observed
    1331            0 :             // locations on it are now uncertain. By convention, set them to None in order
    1332            0 :             // for them to get refreshed when the node comes back online.
    1333            0 :             Some(ObservedStateDelta::Upsert(Box::new((
    1334            0 :                 node.get_id(),
    1335            0 :                 ObservedStateLocation { conf: None },
    1336            0 :             ))))
    1337            0 :         });
    1338              : 
    1339              :         match result.result {
    1340              :             Ok(()) => {
    1341              :                 tenant.apply_observed_deltas(deltas);
    1342              :                 tenant.waiter.advance(result.sequence);
    1343              :             }
    1344              :             Err(e) => {
    1345              :                 match e {
    1346              :                     ReconcileError::Cancel => {
    1347              :                         tracing::info!("Reconciler was cancelled");
    1348              :                     }
    1349              :                     ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
    1350              :                         // This might be due to the reconciler getting cancelled, or it might
    1351              :                         // be due to the `Node` being marked offline.
    1352              :                         tracing::info!("Reconciler cancelled during pageserver API call");
    1353              :                     }
    1354              :                     _ => {
    1355              :                         tracing::warn!("Reconcile error: {}", e);
    1356              :                     }
    1357              :                 }
    1358              : 
    1359              :                 // Ordering: populate last_error before advancing error_seq,
    1360              :                 // so that waiters will see the correct error after waiting.
    1361              :                 tenant.set_last_error(result.sequence, e);
    1362              : 
    1363              :                 // Skip deletions on reconcile failures
    1364              :                 let upsert_deltas =
    1365            0 :                     deltas.filter(|delta| matches!(delta, ObservedStateDelta::Upsert(_)));
    1366              :                 tenant.apply_observed_deltas(upsert_deltas);
    1367              :             }
    1368              :         }
    1369              : 
    1370              :         // If we just finished detaching all shards for a tenant, it might be time to drop it from memory.
    1371              :         if tenant.policy == PlacementPolicy::Detached {
    1372              :             // We may only drop a tenant from memory while holding the exclusive lock on the tenant ID: this protects us
    1373              :             // from concurrent execution wrt a request handler that might expect the tenant to remain in memory for the
    1374              :             // duration of the request.
    1375              :             let guard = self.tenant_op_locks.try_exclusive(
    1376              :                 tenant.tenant_shard_id.tenant_id,
    1377              :                 TenantOperations::DropDetached,
    1378              :             );
    1379              :             if let Some(guard) = guard {
    1380              :                 self.maybe_drop_tenant(tenant.tenant_shard_id.tenant_id, &mut locked, &guard);
    1381              :             }
    1382              :         }
    1383              : 
    1384              :         // Maybe some other work can proceed now that this job finished.
    1385              :         //
    1386              :         // Only bother with this if we have some semaphore units available in the normal-priority semaphore (these
    1387              :         // reconciles are scheduled at `[ReconcilerPriority::Normal]`).
    1388              :         if self.reconciler_concurrency.available_permits() > 0 {
    1389              :             while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
    1390              :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    1391              :                 if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
    1392              :                     shard.delayed_reconcile = false;
    1393              :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    1394              :                 }
    1395              : 
    1396              :                 if self.reconciler_concurrency.available_permits() == 0 {
    1397              :                     break;
    1398              :                 }
    1399              :             }
    1400              :         }
    1401              :     }
    1402              : 
    1403            0 :     async fn process_results(
    1404            0 :         &self,
    1405            0 :         mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResultRequest>,
    1406            0 :         mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
    1407            0 :             Result<(), (TenantShardId, NotifyError)>,
    1408            0 :         >,
    1409            0 :     ) {
    1410              :         loop {
    1411              :             // Wait for the next result, or for cancellation
    1412            0 :             tokio::select! {
    1413            0 :                 r = result_rx.recv() => {
    1414            0 :                     match r {
    1415            0 :                         Some(ReconcileResultRequest::ReconcileResult(result)) => {self.process_result(result);},
    1416            0 :                         None | Some(ReconcileResultRequest::Stop) => {break;}
    1417              :                     }
    1418              :                 }
    1419            0 :                 _ = async{
    1420            0 :                     match bg_compute_hook_result_rx.recv().await {
    1421            0 :                         Some(result) => {
    1422            0 :                             if let Err((tenant_shard_id, notify_error)) = result {
    1423            0 :                                 tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
    1424            0 :                                 let mut locked = self.inner.write().unwrap();
    1425            0 :                                 if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
    1426            0 :                                     shard.pending_compute_notification = true;
    1427            0 :                                 }
    1428              : 
    1429            0 :                             }
    1430              :                         },
    1431              :                         None => {
    1432              :                             // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
    1433            0 :                             self.cancel.cancelled().await;
    1434              :                         }
    1435              :                     }
    1436            0 :                 } => {},
    1437            0 :                 _ = self.cancel.cancelled() => {
    1438            0 :                     break;
    1439              :                 }
    1440              :             };
    1441              :         }
    1442            0 :     }
    1443              : 
    1444            0 :     async fn process_aborts(
    1445            0 :         &self,
    1446            0 :         mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
    1447            0 :     ) {
    1448              :         loop {
    1449              :             // Wait for the next result, or for cancellation
    1450            0 :             let op = tokio::select! {
    1451            0 :                 r = abort_rx.recv() => {
    1452            0 :                     match r {
    1453            0 :                         Some(op) => {op},
    1454            0 :                         None => {break;}
    1455              :                     }
    1456              :                 }
    1457            0 :                 _ = self.cancel.cancelled() => {
    1458            0 :                     break;
    1459              :                 }
    1460              :             };
    1461              : 
    1462              :             // Retry until shutdown: we must keep this request object alive until it is properly
    1463              :             // processed, as it holds a lock guard that prevents other operations trying to do things
    1464              :             // to the tenant while it is in a weird part-split state.
    1465            0 :             while !self.reconcilers_cancel.is_cancelled() {
    1466            0 :                 match self.abort_tenant_shard_split(&op).await {
    1467            0 :                     Ok(_) => break,
    1468            0 :                     Err(e) => {
    1469            0 :                         tracing::warn!(
    1470            0 :                             "Failed to abort shard split on {}, will retry: {e}",
    1471              :                             op.tenant_id
    1472              :                         );
    1473              : 
    1474              :                         // If a node is unavailable, we hope that it has been properly marked Offline
    1475              :                         // when we retry, so that the abort op will succeed.  If the abort op is failing
    1476              :                         // for some other reason, we will keep retrying forever, or until a human notices
    1477              :                         // and does something about it (either fixing a pageserver or restarting the controller).
    1478            0 :                         tokio::time::timeout(
    1479            0 :                             Duration::from_secs(5),
    1480            0 :                             self.reconcilers_cancel.cancelled(),
    1481            0 :                         )
    1482            0 :                         .await
    1483            0 :                         .ok();
    1484              :                     }
    1485              :                 }
    1486              :             }
    1487              :         }
    1488            0 :     }
    1489              : 
    1490            0 :     pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
    1491            0 :         let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
    1492            0 :         let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
    1493            0 : 
    1494            0 :         let leadership_cancel = CancellationToken::new();
    1495            0 :         let leadership = Leadership::new(persistence.clone(), config.clone(), leadership_cancel);
    1496            0 :         let (leader, leader_step_down_state) = leadership.step_down_current_leader().await?;
    1497              : 
    1498              :         // Apply the migrations **after** the current leader has stepped down
    1499              :         // (or we've given up waiting for it), but **before** reading from the
    1500              :         // database. The only exception is reading the current leader before
    1501              :         // migrating.
    1502            0 :         persistence.migration_run().await?;
    1503              : 
    1504            0 :         tracing::info!("Loading nodes from database...");
    1505            0 :         let nodes = persistence
    1506            0 :             .list_nodes()
    1507            0 :             .await?
    1508            0 :             .into_iter()
    1509            0 :             .map(|x| Node::from_persistent(x, config.use_https_pageserver_api))
    1510            0 :             .collect::<anyhow::Result<Vec<Node>>>()?;
    1511            0 :         let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
    1512            0 :         tracing::info!("Loaded {} nodes from database.", nodes.len());
    1513            0 :         metrics::METRICS_REGISTRY
    1514            0 :             .metrics_group
    1515            0 :             .storage_controller_pageserver_nodes
    1516            0 :             .set(nodes.len() as i64);
    1517            0 :         metrics::METRICS_REGISTRY
    1518            0 :             .metrics_group
    1519            0 :             .storage_controller_https_pageserver_nodes
    1520            0 :             .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    1521            0 : 
    1522            0 :         tracing::info!("Loading safekeepers from database...");
    1523            0 :         let safekeepers = persistence
    1524            0 :             .list_safekeepers()
    1525            0 :             .await?
    1526            0 :             .into_iter()
    1527            0 :             .map(|skp| {
    1528            0 :                 Safekeeper::from_persistence(
    1529            0 :                     skp,
    1530            0 :                     CancellationToken::new(),
    1531            0 :                     config.use_https_safekeeper_api,
    1532            0 :                 )
    1533            0 :             })
    1534            0 :             .collect::<anyhow::Result<Vec<_>>>()?;
    1535            0 :         let safekeepers: HashMap<NodeId, Safekeeper> =
    1536            0 :             safekeepers.into_iter().map(|n| (n.get_id(), n)).collect();
    1537            0 :         tracing::info!("Loaded {} safekeepers from database.", safekeepers.len());
    1538            0 :         metrics::METRICS_REGISTRY
    1539            0 :             .metrics_group
    1540            0 :             .storage_controller_safekeeper_nodes
    1541            0 :             .set(safekeepers.len() as i64);
    1542            0 :         metrics::METRICS_REGISTRY
    1543            0 :             .metrics_group
    1544            0 :             .storage_controller_https_safekeeper_nodes
    1545            0 :             .set(safekeepers.values().filter(|s| s.has_https_port()).count() as i64);
    1546            0 : 
    1547            0 :         tracing::info!("Loading shards from database...");
    1548            0 :         let mut tenant_shard_persistence = persistence.load_active_tenant_shards().await?;
    1549            0 :         tracing::info!(
    1550            0 :             "Loaded {} shards from database.",
    1551            0 :             tenant_shard_persistence.len()
    1552              :         );
    1553              : 
    1554              :         // If any shard splits were in progress, reset the database state to abort them
    1555            0 :         let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
    1556            0 :             HashMap::new();
    1557            0 :         for tsp in &mut tenant_shard_persistence {
    1558            0 :             let shard = tsp.get_shard_identity()?;
    1559            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1560            0 :             let entry = tenant_shard_count_min_max
    1561            0 :                 .entry(tenant_shard_id.tenant_id)
    1562            0 :                 .or_insert_with(|| (shard.count, shard.count));
    1563            0 :             entry.0 = std::cmp::min(entry.0, shard.count);
    1564            0 :             entry.1 = std::cmp::max(entry.1, shard.count);
    1565            0 :         }
    1566              : 
    1567            0 :         for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
    1568            0 :             if count_min != count_max {
    1569              :                 // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
    1570              :                 // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
    1571              :                 // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
    1572            0 :                 tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
    1573            0 :                 let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
    1574              : 
    1575              :                 // We may never see the Complete status here: if the split was complete, we wouldn't have
    1576              :                 // identified this tenant has having mismatching min/max counts.
    1577            0 :                 assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
    1578              : 
    1579              :                 // Clear the splitting status in-memory, to reflect that we just aborted in the database
    1580            0 :                 tenant_shard_persistence.iter_mut().for_each(|tsp| {
    1581            0 :                     // Set idle split state on those shards that we will retain.
    1582            0 :                     let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
    1583            0 :                     if tsp_tenant_id == tenant_id
    1584            0 :                         && tsp.get_shard_identity().unwrap().count == count_min
    1585            0 :                     {
    1586            0 :                         tsp.splitting = SplitState::Idle;
    1587            0 :                     } else if tsp_tenant_id == tenant_id {
    1588              :                         // Leave the splitting state on the child shards: this will be used next to
    1589              :                         // drop them.
    1590            0 :                         tracing::info!(
    1591            0 :                             "Shard {tsp_tenant_id} will be dropped after shard split abort",
    1592              :                         );
    1593            0 :                     }
    1594            0 :                 });
    1595            0 : 
    1596            0 :                 // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
    1597            0 :                 tenant_shard_persistence.retain(|tsp| {
    1598            0 :                     TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
    1599            0 :                         || tsp.splitting == SplitState::Idle
    1600            0 :                 });
    1601            0 :             }
    1602              :         }
    1603              : 
    1604            0 :         let mut tenants = BTreeMap::new();
    1605            0 : 
    1606            0 :         let mut scheduler = Scheduler::new(nodes.values());
    1607              : 
    1608              :         #[cfg(feature = "testing")]
    1609              :         {
    1610              :             use pageserver_api::controller_api::AvailabilityZone;
    1611              : 
    1612              :             // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
    1613              :             // tests only store the shards, not the nodes.  The nodes will be loaded shortly
    1614              :             // after when pageservers start up and register.
    1615            0 :             let mut node_ids = HashSet::new();
    1616            0 :             for tsp in &tenant_shard_persistence {
    1617            0 :                 if let Some(node_id) = tsp.generation_pageserver {
    1618            0 :                     node_ids.insert(node_id);
    1619            0 :                 }
    1620              :             }
    1621            0 :             for node_id in node_ids {
    1622            0 :                 tracing::info!("Creating node {} in scheduler for tests", node_id);
    1623            0 :                 let node = Node::new(
    1624            0 :                     NodeId(node_id as u64),
    1625            0 :                     "".to_string(),
    1626            0 :                     123,
    1627            0 :                     None,
    1628            0 :                     "".to_string(),
    1629            0 :                     123,
    1630            0 :                     AvailabilityZone("test_az".to_string()),
    1631            0 :                     false,
    1632            0 :                 )
    1633            0 :                 .unwrap();
    1634            0 : 
    1635            0 :                 scheduler.node_upsert(&node);
    1636              :             }
    1637              :         }
    1638            0 :         for tsp in tenant_shard_persistence {
    1639            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1640              : 
    1641              :             // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
    1642              :             // it with what we can infer: the node for which a generation was most recently issued.
    1643            0 :             let mut intent = IntentState::new(
    1644            0 :                 tsp.preferred_az_id
    1645            0 :                     .as_ref()
    1646            0 :                     .map(|az| AvailabilityZone(az.clone())),
    1647            0 :             );
    1648            0 :             if let Some(generation_pageserver) = tsp.generation_pageserver.map(|n| NodeId(n as u64))
    1649              :             {
    1650            0 :                 if nodes.contains_key(&generation_pageserver) {
    1651            0 :                     intent.set_attached(&mut scheduler, Some(generation_pageserver));
    1652            0 :                 } else {
    1653              :                     // If a node was removed before being completely drained, it is legal for it to leave behind a `generation_pageserver` referring
    1654              :                     // to a non-existent node, because node deletion doesn't block on completing the reconciliations that will issue new generations
    1655              :                     // on different pageservers.
    1656            0 :                     tracing::warn!(
    1657            0 :                         "Tenant shard {tenant_shard_id} references non-existent node {generation_pageserver} in database, will be rescheduled"
    1658              :                     );
    1659              :                 }
    1660            0 :             }
    1661            0 :             let new_tenant = TenantShard::from_persistent(tsp, intent)?;
    1662              : 
    1663            0 :             tenants.insert(tenant_shard_id, new_tenant);
    1664              :         }
    1665              : 
    1666            0 :         let (startup_completion, startup_complete) = utils::completion::channel();
    1667            0 : 
    1668            0 :         // This channel is continuously consumed by process_results, so doesn't need to be very large.
    1669            0 :         let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
    1670            0 :             tokio::sync::mpsc::channel(512);
    1671            0 : 
    1672            0 :         let (delayed_reconcile_tx, delayed_reconcile_rx) =
    1673            0 :             tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
    1674            0 : 
    1675            0 :         let cancel = CancellationToken::new();
    1676            0 :         let reconcilers_cancel = cancel.child_token();
    1677            0 : 
    1678            0 :         let mut http_client = reqwest::Client::builder();
    1679            0 :         // We intentionally disable the connection pool, so every request will create its own TCP connection.
    1680            0 :         // It's especially important for heartbeaters to notice more network problems.
    1681            0 :         //
    1682            0 :         // TODO: It makes sense to use this client only in heartbeaters and create a second one with
    1683            0 :         // connection pooling for everything else. But reqwest::Client may create a connection without
    1684            0 :         // ever using it (it uses hyper's Client under the hood):
    1685            0 :         // https://github.com/hyperium/hyper-util/blob/d51318df3461d40e5f5e5ca163cb3905ac960209/src/client/legacy/client.rs#L415
    1686            0 :         //
    1687            0 :         // Because of a bug in hyper0::Connection::graceful_shutdown such connections hang during
    1688            0 :         // graceful server shutdown: https://github.com/hyperium/hyper/issues/2730
    1689            0 :         //
    1690            0 :         // The bug has been fixed in hyper v1, so keep alive may be enabled only after we migrate to hyper1.
    1691            0 :         http_client = http_client.pool_max_idle_per_host(0);
    1692            0 :         for ssl_ca_cert in &config.ssl_ca_certs {
    1693            0 :             http_client = http_client.add_root_certificate(ssl_ca_cert.clone());
    1694            0 :         }
    1695            0 :         let http_client = http_client.build()?;
    1696              : 
    1697            0 :         let heartbeater_ps = Heartbeater::new(
    1698            0 :             http_client.clone(),
    1699            0 :             config.pageserver_jwt_token.clone(),
    1700            0 :             config.max_offline_interval,
    1701            0 :             config.max_warming_up_interval,
    1702            0 :             cancel.clone(),
    1703            0 :         );
    1704            0 : 
    1705            0 :         let heartbeater_sk = Heartbeater::new(
    1706            0 :             http_client.clone(),
    1707            0 :             config.safekeeper_jwt_token.clone(),
    1708            0 :             config.max_offline_interval,
    1709            0 :             config.max_warming_up_interval,
    1710            0 :             cancel.clone(),
    1711            0 :         );
    1712              : 
    1713            0 :         let initial_leadership_status = if config.start_as_candidate {
    1714            0 :             LeadershipStatus::Candidate
    1715              :         } else {
    1716            0 :             LeadershipStatus::Leader
    1717              :         };
    1718              : 
    1719            0 :         let this = Arc::new(Self {
    1720            0 :             inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
    1721            0 :                 nodes,
    1722            0 :                 safekeepers,
    1723            0 :                 tenants,
    1724            0 :                 scheduler,
    1725            0 :                 delayed_reconcile_rx,
    1726            0 :                 initial_leadership_status,
    1727            0 :                 reconcilers_cancel.clone(),
    1728            0 :             ))),
    1729            0 :             config: config.clone(),
    1730            0 :             persistence,
    1731            0 :             compute_hook: Arc::new(ComputeHook::new(config.clone())?),
    1732            0 :             result_tx,
    1733            0 :             heartbeater_ps,
    1734            0 :             heartbeater_sk,
    1735            0 :             reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1736            0 :                 config.reconciler_concurrency,
    1737            0 :             )),
    1738            0 :             priority_reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1739            0 :                 config.priority_reconciler_concurrency,
    1740            0 :             )),
    1741            0 :             delayed_reconcile_tx,
    1742            0 :             abort_tx,
    1743            0 :             startup_complete: startup_complete.clone(),
    1744            0 :             cancel,
    1745            0 :             reconcilers_cancel,
    1746            0 :             gate: Gate::default(),
    1747            0 :             reconcilers_gate: Gate::default(),
    1748            0 :             tenant_op_locks: Default::default(),
    1749            0 :             node_op_locks: Default::default(),
    1750            0 :             http_client,
    1751            0 :         });
    1752            0 : 
    1753            0 :         let result_task_this = this.clone();
    1754            0 :         tokio::task::spawn(async move {
    1755              :             // Block shutdown until we're done (we must respect self.cancel)
    1756            0 :             if let Ok(_gate) = result_task_this.gate.enter() {
    1757            0 :                 result_task_this
    1758            0 :                     .process_results(result_rx, bg_compute_notify_result_rx)
    1759            0 :                     .await
    1760            0 :             }
    1761            0 :         });
    1762            0 : 
    1763            0 :         tokio::task::spawn({
    1764            0 :             let this = this.clone();
    1765            0 :             async move {
    1766              :                 // Block shutdown until we're done (we must respect self.cancel)
    1767            0 :                 if let Ok(_gate) = this.gate.enter() {
    1768            0 :                     this.process_aborts(abort_rx).await
    1769            0 :                 }
    1770            0 :             }
    1771            0 :         });
    1772            0 : 
    1773            0 :         tokio::task::spawn({
    1774            0 :             let this = this.clone();
    1775            0 :             async move {
    1776            0 :                 if let Ok(_gate) = this.gate.enter() {
    1777              :                     loop {
    1778            0 :                         tokio::select! {
    1779            0 :                             _ = this.cancel.cancelled() => {
    1780            0 :                                 break;
    1781              :                             },
    1782            0 :                             _ = tokio::time::sleep(Duration::from_secs(60)) => {}
    1783            0 :                         };
    1784            0 :                         this.tenant_op_locks.housekeeping();
    1785              :                     }
    1786            0 :                 }
    1787            0 :             }
    1788            0 :         });
    1789            0 : 
    1790            0 :         tokio::task::spawn({
    1791            0 :             let this = this.clone();
    1792            0 :             // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
    1793            0 :             // is done.
    1794            0 :             let startup_completion = startup_completion.clone();
    1795            0 :             async move {
    1796              :                 // Block shutdown until we're done (we must respect self.cancel)
    1797            0 :                 let Ok(_gate) = this.gate.enter() else {
    1798            0 :                     return;
    1799              :                 };
    1800              : 
    1801            0 :                 this.startup_reconcile(leader, leader_step_down_state, bg_compute_notify_result_tx)
    1802            0 :                     .await;
    1803              : 
    1804            0 :                 drop(startup_completion);
    1805            0 :             }
    1806            0 :         });
    1807            0 : 
    1808            0 :         tokio::task::spawn({
    1809            0 :             let this = this.clone();
    1810            0 :             let startup_complete = startup_complete.clone();
    1811            0 :             async move {
    1812            0 :                 startup_complete.wait().await;
    1813            0 :                 this.background_reconcile().await;
    1814            0 :             }
    1815            0 :         });
    1816            0 : 
    1817            0 :         tokio::task::spawn({
    1818            0 :             let this = this.clone();
    1819            0 :             let startup_complete = startup_complete.clone();
    1820            0 :             async move {
    1821            0 :                 startup_complete.wait().await;
    1822            0 :                 this.spawn_heartbeat_driver().await;
    1823            0 :             }
    1824            0 :         });
    1825            0 : 
    1826            0 :         Ok(this)
    1827            0 :     }
    1828              : 
    1829            0 :     pub(crate) async fn attach_hook(
    1830            0 :         &self,
    1831            0 :         attach_req: AttachHookRequest,
    1832            0 :     ) -> anyhow::Result<AttachHookResponse> {
    1833            0 :         let _tenant_lock = trace_exclusive_lock(
    1834            0 :             &self.tenant_op_locks,
    1835            0 :             attach_req.tenant_shard_id.tenant_id,
    1836            0 :             TenantOperations::AttachHook,
    1837            0 :         )
    1838            0 :         .await;
    1839              : 
    1840              :         // This is a test hook.  To enable using it on tenants that were created directly with
    1841              :         // the pageserver API (not via this service), we will auto-create any missing tenant
    1842              :         // shards with default state.
    1843            0 :         let insert = {
    1844            0 :             match self
    1845            0 :                 .maybe_load_tenant(attach_req.tenant_shard_id.tenant_id, &_tenant_lock)
    1846            0 :                 .await
    1847              :             {
    1848            0 :                 Ok(_) => false,
    1849            0 :                 Err(ApiError::NotFound(_)) => true,
    1850            0 :                 Err(e) => return Err(e.into()),
    1851              :             }
    1852              :         };
    1853              : 
    1854            0 :         if insert {
    1855            0 :             let config = attach_req.config.clone().unwrap_or_default();
    1856            0 :             let tsp = TenantShardPersistence {
    1857            0 :                 tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
    1858            0 :                 shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
    1859            0 :                 shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
    1860            0 :                 shard_stripe_size: 0,
    1861            0 :                 generation: attach_req.generation_override.or(Some(0)),
    1862            0 :                 generation_pageserver: None,
    1863            0 :                 placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
    1864            0 :                 config: serde_json::to_string(&config).unwrap(),
    1865            0 :                 splitting: SplitState::default(),
    1866            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    1867            0 :                     .unwrap(),
    1868            0 :                 preferred_az_id: None,
    1869            0 :             };
    1870            0 : 
    1871            0 :             match self.persistence.insert_tenant_shards(vec![tsp]).await {
    1872            0 :                 Err(e) => match e {
    1873              :                     DatabaseError::Query(diesel::result::Error::DatabaseError(
    1874              :                         DatabaseErrorKind::UniqueViolation,
    1875              :                         _,
    1876              :                     )) => {
    1877            0 :                         tracing::info!(
    1878            0 :                             "Raced with another request to insert tenant {}",
    1879              :                             attach_req.tenant_shard_id
    1880              :                         )
    1881              :                     }
    1882            0 :                     _ => return Err(e.into()),
    1883              :                 },
    1884              :                 Ok(()) => {
    1885            0 :                     tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
    1886              : 
    1887            0 :                     let mut shard = TenantShard::new(
    1888            0 :                         attach_req.tenant_shard_id,
    1889            0 :                         ShardIdentity::unsharded(),
    1890            0 :                         PlacementPolicy::Attached(0),
    1891            0 :                         None,
    1892            0 :                     );
    1893            0 :                     shard.config = config;
    1894            0 : 
    1895            0 :                     let mut locked = self.inner.write().unwrap();
    1896            0 :                     locked.tenants.insert(attach_req.tenant_shard_id, shard);
    1897            0 :                     tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
    1898              :                 }
    1899              :             }
    1900            0 :         }
    1901              : 
    1902            0 :         let new_generation = if let Some(req_node_id) = attach_req.node_id {
    1903            0 :             let maybe_tenant_conf = {
    1904            0 :                 let locked = self.inner.write().unwrap();
    1905            0 :                 locked
    1906            0 :                     .tenants
    1907            0 :                     .get(&attach_req.tenant_shard_id)
    1908            0 :                     .map(|t| t.config.clone())
    1909            0 :             };
    1910            0 : 
    1911            0 :             match maybe_tenant_conf {
    1912            0 :                 Some(conf) => {
    1913            0 :                     let new_generation = self
    1914            0 :                         .persistence
    1915            0 :                         .increment_generation(attach_req.tenant_shard_id, req_node_id)
    1916            0 :                         .await?;
    1917              : 
    1918              :                     // Persist the placement policy update. This is required
    1919              :                     // when we reattaching a detached tenant.
    1920            0 :                     self.persistence
    1921            0 :                         .update_tenant_shard(
    1922            0 :                             TenantFilter::Shard(attach_req.tenant_shard_id),
    1923            0 :                             Some(PlacementPolicy::Attached(0)),
    1924            0 :                             Some(conf),
    1925            0 :                             None,
    1926            0 :                             None,
    1927            0 :                         )
    1928            0 :                         .await?;
    1929            0 :                     Some(new_generation)
    1930              :                 }
    1931              :                 None => {
    1932            0 :                     anyhow::bail!("Attach hook handling raced with tenant removal")
    1933              :                 }
    1934              :             }
    1935              :         } else {
    1936            0 :             self.persistence.detach(attach_req.tenant_shard_id).await?;
    1937            0 :             None
    1938              :         };
    1939              : 
    1940            0 :         let mut locked = self.inner.write().unwrap();
    1941            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    1942            0 : 
    1943            0 :         let tenant_shard = tenants
    1944            0 :             .get_mut(&attach_req.tenant_shard_id)
    1945            0 :             .expect("Checked for existence above");
    1946              : 
    1947            0 :         if let Some(new_generation) = new_generation {
    1948            0 :             tenant_shard.generation = Some(new_generation);
    1949            0 :             tenant_shard.policy = PlacementPolicy::Attached(0);
    1950            0 :         } else {
    1951              :             // This is a detach notification.  We must update placement policy to avoid re-attaching
    1952              :             // during background scheduling/reconciliation, or during storage controller restart.
    1953            0 :             assert!(attach_req.node_id.is_none());
    1954            0 :             tenant_shard.policy = PlacementPolicy::Detached;
    1955              :         }
    1956              : 
    1957            0 :         if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
    1958            0 :             tracing::info!(
    1959              :                 tenant_id = %attach_req.tenant_shard_id,
    1960              :                 ps_id = %attaching_pageserver,
    1961              :                 generation = ?tenant_shard.generation,
    1962            0 :                 "issuing",
    1963              :             );
    1964            0 :         } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
    1965            0 :             tracing::info!(
    1966              :                 tenant_id = %attach_req.tenant_shard_id,
    1967              :                 %ps_id,
    1968              :                 generation = ?tenant_shard.generation,
    1969            0 :                 "dropping",
    1970              :             );
    1971              :         } else {
    1972            0 :             tracing::info!(
    1973              :             tenant_id = %attach_req.tenant_shard_id,
    1974            0 :             "no-op: tenant already has no pageserver");
    1975              :         }
    1976            0 :         tenant_shard
    1977            0 :             .intent
    1978            0 :             .set_attached(scheduler, attach_req.node_id);
    1979            0 : 
    1980            0 :         tracing::info!(
    1981            0 :             "attach_hook: tenant {} set generation {:?}, pageserver {}, config {:?}",
    1982            0 :             attach_req.tenant_shard_id,
    1983            0 :             tenant_shard.generation,
    1984            0 :             // TODO: this is an odd number of 0xf's
    1985            0 :             attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff)),
    1986              :             attach_req.config,
    1987              :         );
    1988              : 
    1989              :         // Trick the reconciler into not doing anything for this tenant: this helps
    1990              :         // tests that manually configure a tenant on the pagesrever, and then call this
    1991              :         // attach hook: they don't want background reconciliation to modify what they
    1992              :         // did to the pageserver.
    1993              :         #[cfg(feature = "testing")]
    1994              :         {
    1995            0 :             if let Some(node_id) = attach_req.node_id {
    1996            0 :                 tenant_shard.observed.locations = HashMap::from([(
    1997            0 :                     node_id,
    1998            0 :                     ObservedStateLocation {
    1999            0 :                         conf: Some(attached_location_conf(
    2000            0 :                             tenant_shard.generation.unwrap(),
    2001            0 :                             &tenant_shard.shard,
    2002            0 :                             &tenant_shard.config,
    2003            0 :                             &PlacementPolicy::Attached(0),
    2004            0 :                         )),
    2005            0 :                     },
    2006            0 :                 )]);
    2007            0 :             } else {
    2008            0 :                 tenant_shard.observed.locations.clear();
    2009            0 :             }
    2010              :         }
    2011              : 
    2012            0 :         Ok(AttachHookResponse {
    2013            0 :             generation: attach_req
    2014            0 :                 .node_id
    2015            0 :                 .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
    2016            0 :         })
    2017            0 :     }
    2018              : 
    2019            0 :     pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
    2020            0 :         let locked = self.inner.read().unwrap();
    2021            0 : 
    2022            0 :         let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
    2023            0 : 
    2024            0 :         InspectResponse {
    2025            0 :             attachment: tenant_shard.and_then(|s| {
    2026            0 :                 s.intent
    2027            0 :                     .get_attached()
    2028            0 :                     .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
    2029            0 :             }),
    2030            0 :         }
    2031            0 :     }
    2032              : 
    2033              :     // When the availability state of a node transitions to active, we must do a full reconciliation
    2034              :     // of LocationConfigs on that node.  This is because while a node was offline:
    2035              :     // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
    2036              :     // - aborting a tenant shard split might have left rogue child shards behind on this node.
    2037              :     //
    2038              :     // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
    2039              :     // Reconcilers might communicate with the node, and these must not overlap with the work we do in
    2040              :     // this function.
    2041              :     //
    2042              :     // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
    2043              :     // for written for a single node rather than as a batch job for all nodes.
    2044              :     #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
    2045              :     async fn node_activate_reconcile(
    2046              :         &self,
    2047              :         mut node: Node,
    2048              :         _lock: &TracingExclusiveGuard<NodeOperations>,
    2049              :     ) -> Result<(), ApiError> {
    2050              :         // This Node is a mutable local copy: we will set it active so that we can use its
    2051              :         // API client to reconcile with the node.  The Node in [`Self::nodes`] will get updated
    2052              :         // later.
    2053              :         node.set_availability(NodeAvailability::Active(PageserverUtilization::full()));
    2054              : 
    2055              :         let configs = match node
    2056              :             .with_client_retries(
    2057            0 :                 |client| async move { client.list_location_config().await },
    2058              :                 &self.http_client,
    2059              :                 &self.config.pageserver_jwt_token,
    2060              :                 1,
    2061              :                 5,
    2062              :                 SHORT_RECONCILE_TIMEOUT,
    2063              :                 &self.cancel,
    2064              :             )
    2065              :             .await
    2066              :         {
    2067              :             None => {
    2068              :                 // We're shutting down (the Node's cancellation token can't have fired, because
    2069              :                 // we're the only scope that has a reference to it, and we didn't fire it).
    2070              :                 return Err(ApiError::ShuttingDown);
    2071              :             }
    2072              :             Some(Err(e)) => {
    2073              :                 // This node didn't succeed listing its locations: it may not proceed to active state
    2074              :                 // as it is apparently unavailable.
    2075              :                 return Err(ApiError::PreconditionFailed(
    2076              :                     format!("Failed to query node location configs, cannot activate ({e})").into(),
    2077              :                 ));
    2078              :             }
    2079              :             Some(Ok(configs)) => configs,
    2080              :         };
    2081              :         tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
    2082              : 
    2083              :         let mut cleanup = Vec::new();
    2084              :         let mut mismatched_locations = 0;
    2085              :         {
    2086              :             let mut locked = self.inner.write().unwrap();
    2087              : 
    2088              :             for (tenant_shard_id, reported) in configs.tenant_shards {
    2089              :                 let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
    2090              :                     cleanup.push(tenant_shard_id);
    2091              :                     continue;
    2092              :                 };
    2093              : 
    2094              :                 let on_record = &mut tenant_shard
    2095              :                     .observed
    2096              :                     .locations
    2097              :                     .entry(node.get_id())
    2098            0 :                     .or_insert_with(|| ObservedStateLocation { conf: None })
    2099              :                     .conf;
    2100              : 
    2101              :                 // If the location reported by the node does not match our observed state,
    2102              :                 // then we mark it as uncertain and let the background reconciliation loop
    2103              :                 // deal with it.
    2104              :                 //
    2105              :                 // Note that this also covers net new locations reported by the node.
    2106              :                 if *on_record != reported {
    2107              :                     mismatched_locations += 1;
    2108              :                     *on_record = None;
    2109              :                 }
    2110              :             }
    2111              :         }
    2112              : 
    2113              :         if mismatched_locations > 0 {
    2114              :             tracing::info!(
    2115              :                 "Set observed state to None for {mismatched_locations} mismatched locations"
    2116              :             );
    2117              :         }
    2118              : 
    2119              :         for tenant_shard_id in cleanup {
    2120              :             tracing::info!("Detaching {tenant_shard_id}");
    2121              :             match node
    2122              :                 .with_client_retries(
    2123            0 :                     |client| async move {
    2124            0 :                         let config = LocationConfig {
    2125            0 :                             mode: LocationConfigMode::Detached,
    2126            0 :                             generation: None,
    2127            0 :                             secondary_conf: None,
    2128            0 :                             shard_number: tenant_shard_id.shard_number.0,
    2129            0 :                             shard_count: tenant_shard_id.shard_count.literal(),
    2130            0 :                             shard_stripe_size: 0,
    2131            0 :                             tenant_conf: models::TenantConfig::default(),
    2132            0 :                         };
    2133            0 :                         client
    2134            0 :                             .location_config(tenant_shard_id, config, None, false)
    2135            0 :                             .await
    2136            0 :                     },
    2137              :                     &self.http_client,
    2138              :                     &self.config.pageserver_jwt_token,
    2139              :                     1,
    2140              :                     5,
    2141              :                     SHORT_RECONCILE_TIMEOUT,
    2142              :                     &self.cancel,
    2143              :                 )
    2144              :                 .await
    2145              :             {
    2146              :                 None => {
    2147              :                     // We're shutting down (the Node's cancellation token can't have fired, because
    2148              :                     // we're the only scope that has a reference to it, and we didn't fire it).
    2149              :                     return Err(ApiError::ShuttingDown);
    2150              :                 }
    2151              :                 Some(Err(e)) => {
    2152              :                     // Do not let the node proceed to Active state if it is not responsive to requests
    2153              :                     // to detach.  This could happen if e.g. a shutdown bug in the pageserver is preventing
    2154              :                     // detach completing: we should not let this node back into the set of nodes considered
    2155              :                     // okay for scheduling.
    2156              :                     return Err(ApiError::Conflict(format!(
    2157              :                         "Node {node} failed to detach {tenant_shard_id}: {e}"
    2158              :                     )));
    2159              :                 }
    2160              :                 Some(Ok(_)) => {}
    2161              :             };
    2162              :         }
    2163              : 
    2164              :         Ok(())
    2165              :     }
    2166              : 
    2167            0 :     pub(crate) async fn re_attach(
    2168            0 :         &self,
    2169            0 :         reattach_req: ReAttachRequest,
    2170            0 :     ) -> Result<ReAttachResponse, ApiError> {
    2171            0 :         if let Some(register_req) = reattach_req.register {
    2172            0 :             self.node_register(register_req).await?;
    2173            0 :         }
    2174              : 
    2175              :         // Ordering: we must persist generation number updates before making them visible in the in-memory state
    2176            0 :         let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
    2177              : 
    2178            0 :         tracing::info!(
    2179              :             node_id=%reattach_req.node_id,
    2180            0 :             "Incremented {} tenant shards' generations",
    2181            0 :             incremented_generations.len()
    2182              :         );
    2183              : 
    2184              :         // Apply the updated generation to our in-memory state, and
    2185              :         // gather discover secondary locations.
    2186            0 :         let mut locked = self.inner.write().unwrap();
    2187            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    2188            0 : 
    2189            0 :         let mut response = ReAttachResponse {
    2190            0 :             tenants: Vec::new(),
    2191            0 :         };
    2192              : 
    2193              :         // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
    2194              :         // to call location_conf API with an old generation.  Wait for cancellation to complete
    2195              :         // before responding to this request.  Requires well implemented CancellationToken logic
    2196              :         // all the way to where we call location_conf.  Even then, there can still be a location_conf
    2197              :         // request in flight over the network: TODO handle that by making location_conf API refuse
    2198              :         // to go backward in generations.
    2199              : 
    2200              :         // Scan through all shards, applying updates for ones where we updated generation
    2201              :         // and identifying shards that intend to have a secondary location on this node.
    2202            0 :         for (tenant_shard_id, shard) in tenants {
    2203            0 :             if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
    2204            0 :                 let new_gen = *new_gen;
    2205            0 :                 response.tenants.push(ReAttachResponseTenant {
    2206            0 :                     id: *tenant_shard_id,
    2207            0 :                     r#gen: Some(new_gen.into().unwrap()),
    2208            0 :                     // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
    2209            0 :                     // execution.  If a pageserver is restarted during that process, then the reconcile pass will
    2210            0 :                     // fail, and start from scratch, so it doesn't make sense for us to try and preserve
    2211            0 :                     // the stale/multi states at this point.
    2212            0 :                     mode: LocationConfigMode::AttachedSingle,
    2213            0 :                 });
    2214            0 : 
    2215            0 :                 shard.generation = std::cmp::max(shard.generation, Some(new_gen));
    2216            0 :                 if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
    2217              :                     // Why can we update `observed` even though we're not sure our response will be received
    2218              :                     // by the pageserver?  Because the pageserver will not proceed with startup until
    2219              :                     // it has processed response: if it loses it, we'll see another request and increment
    2220              :                     // generation again, avoiding any uncertainty about dirtiness of tenant's state.
    2221            0 :                     if let Some(conf) = observed.conf.as_mut() {
    2222            0 :                         conf.generation = new_gen.into();
    2223            0 :                     }
    2224            0 :                 } else {
    2225            0 :                     // This node has no observed state for the shard: perhaps it was offline
    2226            0 :                     // when the pageserver restarted.  Insert a None, so that the Reconciler
    2227            0 :                     // will be prompted to learn the location's state before it makes changes.
    2228            0 :                     shard
    2229            0 :                         .observed
    2230            0 :                         .locations
    2231            0 :                         .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
    2232            0 :                 }
    2233            0 :             } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
    2234            0 :                 // Ordering: pageserver will not accept /location_config requests until it has
    2235            0 :                 // finished processing the response from re-attach.  So we can update our in-memory state
    2236            0 :                 // now, and be confident that we are not stamping on the result of some later location config.
    2237            0 :                 // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
    2238            0 :                 // so we might update observed state here, and then get over-written by some racing
    2239            0 :                 // ReconcileResult.  The impact is low however, since we have set state on pageserver something
    2240            0 :                 // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
    2241            0 : 
    2242            0 :                 response.tenants.push(ReAttachResponseTenant {
    2243            0 :                     id: *tenant_shard_id,
    2244            0 :                     r#gen: None,
    2245            0 :                     mode: LocationConfigMode::Secondary,
    2246            0 :                 });
    2247            0 : 
    2248            0 :                 // We must not update observed, because we have no guarantee that our
    2249            0 :                 // response will be received by the pageserver. This could leave it
    2250            0 :                 // falsely dirty, but the resulting reconcile should be idempotent.
    2251            0 :             }
    2252              :         }
    2253              : 
    2254              :         // We consider a node Active once we have composed a re-attach response, but we
    2255              :         // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
    2256              :         // implicitly synchronizes the LocationConfigs on the node.
    2257              :         //
    2258              :         // Setting a node active unblocks any Reconcilers that might write to the location config API,
    2259              :         // but those requests will not be accepted by the node until it has finished processing
    2260              :         // the re-attach response.
    2261              :         //
    2262              :         // Additionally, reset the nodes scheduling policy to match the conditional update done
    2263              :         // in [`Persistence::re_attach`].
    2264            0 :         if let Some(node) = nodes.get(&reattach_req.node_id) {
    2265            0 :             let reset_scheduling = matches!(
    2266            0 :                 node.get_scheduling(),
    2267              :                 NodeSchedulingPolicy::PauseForRestart
    2268              :                     | NodeSchedulingPolicy::Draining
    2269              :                     | NodeSchedulingPolicy::Filling
    2270              :             );
    2271              : 
    2272            0 :             let mut new_nodes = (**nodes).clone();
    2273            0 :             if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
    2274            0 :                 if reset_scheduling {
    2275            0 :                     node.set_scheduling(NodeSchedulingPolicy::Active);
    2276            0 :                 }
    2277              : 
    2278            0 :                 tracing::info!("Marking {} warming-up on reattach", reattach_req.node_id);
    2279            0 :                 node.set_availability(NodeAvailability::WarmingUp(std::time::Instant::now()));
    2280            0 : 
    2281            0 :                 scheduler.node_upsert(node);
    2282            0 :                 let new_nodes = Arc::new(new_nodes);
    2283            0 :                 *nodes = new_nodes;
    2284              :             } else {
    2285            0 :                 tracing::error!(
    2286            0 :                     "Reattaching node {} was removed while processing the request",
    2287              :                     reattach_req.node_id
    2288              :                 );
    2289              :             }
    2290            0 :         }
    2291              : 
    2292            0 :         Ok(response)
    2293            0 :     }
    2294              : 
    2295            0 :     pub(crate) async fn validate(
    2296            0 :         &self,
    2297            0 :         validate_req: ValidateRequest,
    2298            0 :     ) -> Result<ValidateResponse, DatabaseError> {
    2299              :         // Fast in-memory check: we may reject validation on anything that doesn't match our
    2300              :         // in-memory generation for a shard
    2301            0 :         let in_memory_result = {
    2302            0 :             let mut in_memory_result = Vec::new();
    2303            0 :             let locked = self.inner.read().unwrap();
    2304            0 :             for req_tenant in validate_req.tenants {
    2305            0 :                 if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
    2306            0 :                     let valid = tenant_shard.generation == Some(Generation::new(req_tenant.r#gen));
    2307            0 :                     tracing::info!(
    2308            0 :                         "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
    2309              :                         req_tenant.id,
    2310              :                         req_tenant.r#gen,
    2311              :                         tenant_shard.generation
    2312              :                     );
    2313              : 
    2314            0 :                     in_memory_result.push((
    2315            0 :                         req_tenant.id,
    2316            0 :                         Generation::new(req_tenant.r#gen),
    2317            0 :                         valid,
    2318            0 :                     ));
    2319              :                 } else {
    2320              :                     // This is legal: for example during a shard split the pageserver may still
    2321              :                     // have deletions in its queue from the old pre-split shard, or after deletion
    2322              :                     // of a tenant that was busy with compaction/gc while being deleted.
    2323            0 :                     tracing::info!(
    2324            0 :                         "Refusing deletion validation for missing shard {}",
    2325              :                         req_tenant.id
    2326              :                     );
    2327              :                 }
    2328              :             }
    2329              : 
    2330            0 :             in_memory_result
    2331              :         };
    2332              : 
    2333              :         // Database calls to confirm validity for anything that passed the in-memory check.  We must do this
    2334              :         // in case of controller split-brain, where some other controller process might have incremented the generation.
    2335            0 :         let db_generations = self
    2336            0 :             .persistence
    2337            0 :             .shard_generations(
    2338            0 :                 in_memory_result
    2339            0 :                     .iter()
    2340            0 :                     .filter_map(|i| if i.2 { Some(&i.0) } else { None }),
    2341            0 :             )
    2342            0 :             .await?;
    2343            0 :         let db_generations = db_generations.into_iter().collect::<HashMap<_, _>>();
    2344            0 : 
    2345            0 :         let mut response = ValidateResponse {
    2346            0 :             tenants: Vec::new(),
    2347            0 :         };
    2348            0 :         for (tenant_shard_id, validate_generation, valid) in in_memory_result.into_iter() {
    2349            0 :             let valid = if valid {
    2350            0 :                 let db_generation = db_generations.get(&tenant_shard_id);
    2351            0 :                 db_generation == Some(&Some(validate_generation))
    2352              :             } else {
    2353              :                 // If in-memory state says it's invalid, trust that.  It's always safe to fail a validation, at worst
    2354              :                 // this prevents a pageserver from cleaning up an object in S3.
    2355            0 :                 false
    2356              :             };
    2357              : 
    2358            0 :             response.tenants.push(ValidateResponseTenant {
    2359            0 :                 id: tenant_shard_id,
    2360            0 :                 valid,
    2361            0 :             })
    2362              :         }
    2363              : 
    2364            0 :         Ok(response)
    2365            0 :     }
    2366              : 
    2367            0 :     pub(crate) async fn tenant_create(
    2368            0 :         &self,
    2369            0 :         create_req: TenantCreateRequest,
    2370            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    2371            0 :         let tenant_id = create_req.new_tenant_id.tenant_id;
    2372              : 
    2373              :         // Exclude any concurrent attempts to create/access the same tenant ID
    2374            0 :         let _tenant_lock = trace_exclusive_lock(
    2375            0 :             &self.tenant_op_locks,
    2376            0 :             create_req.new_tenant_id.tenant_id,
    2377            0 :             TenantOperations::Create,
    2378            0 :         )
    2379            0 :         .await;
    2380            0 :         let (response, waiters) = self.do_tenant_create(create_req).await?;
    2381              : 
    2382            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    2383              :             // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
    2384              :             // accept compute notifications while it is in the process of creating.  Reconciliation will
    2385              :             // be retried in the background.
    2386            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
    2387            0 :         }
    2388            0 :         Ok(response)
    2389            0 :     }
    2390              : 
    2391            0 :     pub(crate) async fn do_tenant_create(
    2392            0 :         &self,
    2393            0 :         create_req: TenantCreateRequest,
    2394            0 :     ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
    2395            0 :         let placement_policy = create_req
    2396            0 :             .placement_policy
    2397            0 :             .clone()
    2398            0 :             // As a default, zero secondaries is convenient for tests that don't choose a policy.
    2399            0 :             .unwrap_or(PlacementPolicy::Attached(0));
    2400              : 
    2401              :         // This service expects to handle sharding itself: it is an error to try and directly create
    2402              :         // a particular shard here.
    2403            0 :         let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
    2404            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    2405            0 :                 "Attempted to create a specific shard, this API is for creating the whole tenant"
    2406            0 :             )));
    2407              :         } else {
    2408            0 :             create_req.new_tenant_id.tenant_id
    2409            0 :         };
    2410            0 : 
    2411            0 :         tracing::info!(
    2412            0 :             "Creating tenant {}, shard_count={:?}",
    2413              :             create_req.new_tenant_id,
    2414              :             create_req.shard_parameters.count,
    2415              :         );
    2416              : 
    2417            0 :         let create_ids = (0..create_req.shard_parameters.count.count())
    2418            0 :             .map(|i| TenantShardId {
    2419            0 :                 tenant_id,
    2420            0 :                 shard_number: ShardNumber(i),
    2421            0 :                 shard_count: create_req.shard_parameters.count,
    2422            0 :             })
    2423            0 :             .collect::<Vec<_>>();
    2424              : 
    2425              :         // If the caller specifies a None generation, it means "start from default".  This is different
    2426              :         // to [`Self::tenant_location_config`], where a None generation is used to represent
    2427              :         // an incompletely-onboarded tenant.
    2428            0 :         let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
    2429            0 :             tracing::info!(
    2430            0 :                 "tenant_create: secondary mode, generation is_some={}",
    2431            0 :                 create_req.generation.is_some()
    2432              :             );
    2433            0 :             create_req.generation.map(Generation::new)
    2434              :         } else {
    2435            0 :             tracing::info!(
    2436            0 :                 "tenant_create: not secondary mode, generation is_some={}",
    2437            0 :                 create_req.generation.is_some()
    2438              :             );
    2439            0 :             Some(
    2440            0 :                 create_req
    2441            0 :                     .generation
    2442            0 :                     .map(Generation::new)
    2443            0 :                     .unwrap_or(INITIAL_GENERATION),
    2444            0 :             )
    2445              :         };
    2446              : 
    2447            0 :         let preferred_az_id = {
    2448            0 :             let locked = self.inner.read().unwrap();
    2449              :             // Idempotency: take the existing value if the tenant already exists
    2450            0 :             if let Some(shard) = locked.tenants.get(create_ids.first().unwrap()) {
    2451            0 :                 shard.preferred_az().cloned()
    2452              :             } else {
    2453            0 :                 locked.scheduler.get_az_for_new_tenant()
    2454              :             }
    2455              :         };
    2456              : 
    2457              :         // Ordering: we persist tenant shards before creating them on the pageserver.  This enables a caller
    2458              :         // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
    2459              :         // during the creation, rather than risking leaving orphan objects in S3.
    2460            0 :         let persist_tenant_shards = create_ids
    2461            0 :             .iter()
    2462            0 :             .map(|tenant_shard_id| TenantShardPersistence {
    2463            0 :                 tenant_id: tenant_shard_id.tenant_id.to_string(),
    2464            0 :                 shard_number: tenant_shard_id.shard_number.0 as i32,
    2465            0 :                 shard_count: tenant_shard_id.shard_count.literal() as i32,
    2466            0 :                 shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
    2467            0 :                 generation: initial_generation.map(|g| g.into().unwrap() as i32),
    2468            0 :                 // The pageserver is not known until scheduling happens: we will set this column when
    2469            0 :                 // incrementing the generation the first time we attach to a pageserver.
    2470            0 :                 generation_pageserver: None,
    2471            0 :                 placement_policy: serde_json::to_string(&placement_policy).unwrap(),
    2472            0 :                 config: serde_json::to_string(&create_req.config).unwrap(),
    2473            0 :                 splitting: SplitState::default(),
    2474            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    2475            0 :                     .unwrap(),
    2476            0 :                 preferred_az_id: preferred_az_id.as_ref().map(|az| az.to_string()),
    2477            0 :             })
    2478            0 :             .collect();
    2479            0 : 
    2480            0 :         match self
    2481            0 :             .persistence
    2482            0 :             .insert_tenant_shards(persist_tenant_shards)
    2483            0 :             .await
    2484              :         {
    2485            0 :             Ok(_) => {}
    2486              :             Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
    2487              :                 DatabaseErrorKind::UniqueViolation,
    2488              :                 _,
    2489              :             ))) => {
    2490              :                 // Unique key violation: this is probably a retry.  Because the shard count is part of the unique key,
    2491              :                 // if we see a unique key violation it means that the creation request's shard count matches the previous
    2492              :                 // creation's shard count.
    2493            0 :                 tracing::info!(
    2494            0 :                     "Tenant shards already present in database, proceeding with idempotent creation..."
    2495              :                 );
    2496              :             }
    2497              :             // Any other database error is unexpected and a bug.
    2498            0 :             Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
    2499              :         };
    2500              : 
    2501            0 :         let mut schedule_context = ScheduleContext::default();
    2502            0 :         let mut schedule_error = None;
    2503            0 :         let mut response_shards = Vec::new();
    2504            0 :         for tenant_shard_id in create_ids {
    2505            0 :             tracing::info!("Creating shard {tenant_shard_id}...");
    2506              : 
    2507            0 :             let outcome = self
    2508            0 :                 .do_initial_shard_scheduling(
    2509            0 :                     tenant_shard_id,
    2510            0 :                     initial_generation,
    2511            0 :                     &create_req.shard_parameters,
    2512            0 :                     create_req.config.clone(),
    2513            0 :                     placement_policy.clone(),
    2514            0 :                     preferred_az_id.as_ref(),
    2515            0 :                     &mut schedule_context,
    2516            0 :                 )
    2517            0 :                 .await;
    2518              : 
    2519            0 :             match outcome {
    2520            0 :                 InitialShardScheduleOutcome::Scheduled(resp) => response_shards.push(resp),
    2521            0 :                 InitialShardScheduleOutcome::NotScheduled => {}
    2522            0 :                 InitialShardScheduleOutcome::ShardScheduleError(err) => {
    2523            0 :                     schedule_error = Some(err);
    2524            0 :                 }
    2525              :             }
    2526              :         }
    2527              : 
    2528              :         // If we failed to schedule shards, then they are still created in the controller,
    2529              :         // but we return an error to the requester to avoid a silent failure when someone
    2530              :         // tries to e.g. create a tenant whose placement policy requires more nodes than
    2531              :         // are present in the system.  We do this here rather than in the above loop, to
    2532              :         // avoid situations where we only create a subset of shards in the tenant.
    2533            0 :         if let Some(e) = schedule_error {
    2534            0 :             return Err(ApiError::Conflict(format!(
    2535            0 :                 "Failed to schedule shard(s): {e}"
    2536            0 :             )));
    2537            0 :         }
    2538            0 : 
    2539            0 :         let waiters = {
    2540            0 :             let mut locked = self.inner.write().unwrap();
    2541            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    2542            0 :             let config = ReconcilerConfigBuilder::new(ReconcilerPriority::High)
    2543            0 :                 .tenant_creation_hint(true)
    2544            0 :                 .build();
    2545            0 :             tenants
    2546            0 :                 .range_mut(TenantShardId::tenant_range(tenant_id))
    2547            0 :                 .filter_map(|(_shard_id, shard)| {
    2548            0 :                     self.maybe_configured_reconcile_shard(shard, nodes, config)
    2549            0 :                 })
    2550            0 :                 .collect::<Vec<_>>()
    2551            0 :         };
    2552            0 : 
    2553            0 :         Ok((
    2554            0 :             TenantCreateResponse {
    2555            0 :                 shards: response_shards,
    2556            0 :             },
    2557            0 :             waiters,
    2558            0 :         ))
    2559            0 :     }
    2560              : 
    2561              :     /// Helper for tenant creation that does the scheduling for an individual shard. Covers both the
    2562              :     /// case of a new tenant and a pre-existing one.
    2563              :     #[allow(clippy::too_many_arguments)]
    2564            0 :     async fn do_initial_shard_scheduling(
    2565            0 :         &self,
    2566            0 :         tenant_shard_id: TenantShardId,
    2567            0 :         initial_generation: Option<Generation>,
    2568            0 :         shard_params: &ShardParameters,
    2569            0 :         config: TenantConfig,
    2570            0 :         placement_policy: PlacementPolicy,
    2571            0 :         preferred_az_id: Option<&AvailabilityZone>,
    2572            0 :         schedule_context: &mut ScheduleContext,
    2573            0 :     ) -> InitialShardScheduleOutcome {
    2574            0 :         let mut locked = self.inner.write().unwrap();
    2575            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    2576              : 
    2577              :         use std::collections::btree_map::Entry;
    2578            0 :         match tenants.entry(tenant_shard_id) {
    2579            0 :             Entry::Occupied(mut entry) => {
    2580            0 :                 tracing::info!("Tenant shard {tenant_shard_id} already exists while creating");
    2581              : 
    2582            0 :                 if let Err(err) = entry.get_mut().schedule(scheduler, schedule_context) {
    2583            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(err);
    2584            0 :                 }
    2585              : 
    2586            0 :                 if let Some(node_id) = entry.get().intent.get_attached() {
    2587            0 :                     let generation = entry
    2588            0 :                         .get()
    2589            0 :                         .generation
    2590            0 :                         .expect("Generation is set when in attached mode");
    2591            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2592            0 :                         shard_id: tenant_shard_id,
    2593            0 :                         node_id: *node_id,
    2594            0 :                         generation: generation.into().unwrap(),
    2595            0 :                     })
    2596              :                 } else {
    2597            0 :                     InitialShardScheduleOutcome::NotScheduled
    2598              :                 }
    2599              :             }
    2600            0 :             Entry::Vacant(entry) => {
    2601            0 :                 let state = entry.insert(TenantShard::new(
    2602            0 :                     tenant_shard_id,
    2603            0 :                     ShardIdentity::from_params(tenant_shard_id.shard_number, shard_params),
    2604            0 :                     placement_policy,
    2605            0 :                     preferred_az_id.cloned(),
    2606            0 :                 ));
    2607            0 : 
    2608            0 :                 state.generation = initial_generation;
    2609            0 :                 state.config = config;
    2610            0 :                 if let Err(e) = state.schedule(scheduler, schedule_context) {
    2611            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(e);
    2612            0 :                 }
    2613              : 
    2614              :                 // Only include shards in result if we are attaching: the purpose
    2615              :                 // of the response is to tell the caller where the shards are attached.
    2616            0 :                 if let Some(node_id) = state.intent.get_attached() {
    2617            0 :                     let generation = state
    2618            0 :                         .generation
    2619            0 :                         .expect("Generation is set when in attached mode");
    2620            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2621            0 :                         shard_id: tenant_shard_id,
    2622            0 :                         node_id: *node_id,
    2623            0 :                         generation: generation.into().unwrap(),
    2624            0 :                     })
    2625              :                 } else {
    2626            0 :                     InitialShardScheduleOutcome::NotScheduled
    2627              :                 }
    2628              :             }
    2629              :         }
    2630            0 :     }
    2631              : 
    2632              :     /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
    2633              :     /// wait for reconciliation to complete before responding.
    2634            0 :     async fn await_waiters(
    2635            0 :         &self,
    2636            0 :         waiters: Vec<ReconcilerWaiter>,
    2637            0 :         timeout: Duration,
    2638            0 :     ) -> Result<(), ReconcileWaitError> {
    2639            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2640            0 :         for waiter in waiters {
    2641            0 :             let timeout = deadline.duration_since(Instant::now());
    2642            0 :             waiter.wait_timeout(timeout).await?;
    2643              :         }
    2644              : 
    2645            0 :         Ok(())
    2646            0 :     }
    2647              : 
    2648              :     /// Same as [`Service::await_waiters`], but returns the waiters which are still
    2649              :     /// in progress
    2650            0 :     async fn await_waiters_remainder(
    2651            0 :         &self,
    2652            0 :         waiters: Vec<ReconcilerWaiter>,
    2653            0 :         timeout: Duration,
    2654            0 :     ) -> Vec<ReconcilerWaiter> {
    2655            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2656            0 :         for waiter in waiters.iter() {
    2657            0 :             let timeout = deadline.duration_since(Instant::now());
    2658            0 :             let _ = waiter.wait_timeout(timeout).await;
    2659              :         }
    2660              : 
    2661            0 :         waiters
    2662            0 :             .into_iter()
    2663            0 :             .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress))
    2664            0 :             .collect::<Vec<_>>()
    2665            0 :     }
    2666              : 
    2667              :     /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
    2668              :     /// and transform it into either a tenant creation of a series of shard updates.
    2669              :     ///
    2670              :     /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
    2671              :     /// still be returned.
    2672            0 :     fn tenant_location_config_prepare(
    2673            0 :         &self,
    2674            0 :         tenant_id: TenantId,
    2675            0 :         req: TenantLocationConfigRequest,
    2676            0 :     ) -> TenantCreateOrUpdate {
    2677            0 :         let mut updates = Vec::new();
    2678            0 :         let mut locked = self.inner.write().unwrap();
    2679            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    2680            0 :         let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2681              : 
    2682              :         // Use location config mode as an indicator of policy.
    2683            0 :         let placement_policy = match req.config.mode {
    2684            0 :             LocationConfigMode::Detached => PlacementPolicy::Detached,
    2685            0 :             LocationConfigMode::Secondary => PlacementPolicy::Secondary,
    2686              :             LocationConfigMode::AttachedMulti
    2687              :             | LocationConfigMode::AttachedSingle
    2688              :             | LocationConfigMode::AttachedStale => {
    2689            0 :                 if nodes.len() > 1 {
    2690            0 :                     PlacementPolicy::Attached(1)
    2691              :                 } else {
    2692              :                     // Convenience for dev/test: if we just have one pageserver, import
    2693              :                     // tenants into non-HA mode so that scheduling will succeed.
    2694            0 :                     PlacementPolicy::Attached(0)
    2695              :                 }
    2696              :             }
    2697              :         };
    2698              : 
    2699              :         // Ordinarily we do not update scheduling policy, but when making major changes
    2700              :         // like detaching or demoting to secondary-only, we need to force the scheduling
    2701              :         // mode to Active, or the caller's expected outcome (detach it) will not happen.
    2702            0 :         let scheduling_policy = match req.config.mode {
    2703              :             LocationConfigMode::Detached | LocationConfigMode::Secondary => {
    2704              :                 // Special case: when making major changes like detaching or demoting to secondary-only,
    2705              :                 // we need to force the scheduling mode to Active, or nothing will happen.
    2706            0 :                 Some(ShardSchedulingPolicy::Active)
    2707              :             }
    2708              :             LocationConfigMode::AttachedMulti
    2709              :             | LocationConfigMode::AttachedSingle
    2710              :             | LocationConfigMode::AttachedStale => {
    2711              :                 // While attached, continue to respect whatever the existing scheduling mode is.
    2712            0 :                 None
    2713              :             }
    2714              :         };
    2715              : 
    2716            0 :         let mut create = true;
    2717            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    2718              :             // Saw an existing shard: this is not a creation
    2719            0 :             create = false;
    2720              : 
    2721              :             // Shards may have initially been created by a Secondary request, where we
    2722              :             // would have left generation as None.
    2723              :             //
    2724              :             // We only update generation the first time we see an attached-mode request,
    2725              :             // and if there is no existing generation set. The caller is responsible for
    2726              :             // ensuring that no non-storage-controller pageserver ever uses a higher
    2727              :             // generation than they passed in here.
    2728              :             use LocationConfigMode::*;
    2729            0 :             let set_generation = match req.config.mode {
    2730            0 :                 AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
    2731            0 :                     req.config.generation.map(Generation::new)
    2732              :                 }
    2733            0 :                 _ => None,
    2734              :             };
    2735              : 
    2736            0 :             updates.push(ShardUpdate {
    2737            0 :                 tenant_shard_id: *shard_id,
    2738            0 :                 placement_policy: placement_policy.clone(),
    2739            0 :                 tenant_config: req.config.tenant_conf.clone(),
    2740            0 :                 generation: set_generation,
    2741            0 :                 scheduling_policy,
    2742            0 :             });
    2743              :         }
    2744              : 
    2745            0 :         if create {
    2746              :             use LocationConfigMode::*;
    2747            0 :             let generation = match req.config.mode {
    2748            0 :                 AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
    2749              :                 // If a caller provided a generation in a non-attached request, ignore it
    2750              :                 // and leave our generation as None: this enables a subsequent update to set
    2751              :                 // the generation when setting an attached mode for the first time.
    2752            0 :                 _ => None,
    2753              :             };
    2754              : 
    2755            0 :             TenantCreateOrUpdate::Create(
    2756            0 :                 // Synthesize a creation request
    2757            0 :                 TenantCreateRequest {
    2758            0 :                     new_tenant_id: tenant_shard_id,
    2759            0 :                     generation,
    2760            0 :                     shard_parameters: ShardParameters {
    2761            0 :                         count: tenant_shard_id.shard_count,
    2762            0 :                         // We only import un-sharded or single-sharded tenants, so stripe
    2763            0 :                         // size can be made up arbitrarily here.
    2764            0 :                         stripe_size: DEFAULT_STRIPE_SIZE,
    2765            0 :                     },
    2766            0 :                     placement_policy: Some(placement_policy),
    2767            0 :                     config: req.config.tenant_conf,
    2768            0 :                 },
    2769            0 :             )
    2770              :         } else {
    2771            0 :             assert!(!updates.is_empty());
    2772            0 :             TenantCreateOrUpdate::Update(updates)
    2773              :         }
    2774            0 :     }
    2775              : 
    2776              :     /// For APIs that might act on tenants with [`PlacementPolicy::Detached`], first check if
    2777              :     /// the tenant is present in memory. If not, load it from the database.  If it is found
    2778              :     /// in neither location, return a NotFound error.
    2779              :     ///
    2780              :     /// Caller must demonstrate they hold a lock guard, as otherwise two callers might try and load
    2781              :     /// it at the same time, or we might race with [`Self::maybe_drop_tenant`]
    2782            0 :     async fn maybe_load_tenant(
    2783            0 :         &self,
    2784            0 :         tenant_id: TenantId,
    2785            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    2786            0 :     ) -> Result<(), ApiError> {
    2787              :         // Check if the tenant is present in memory, and select an AZ to use when loading
    2788              :         // if we will load it.
    2789            0 :         let load_in_az = {
    2790            0 :             let locked = self.inner.read().unwrap();
    2791            0 :             let existing = locked
    2792            0 :                 .tenants
    2793            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    2794            0 :                 .next();
    2795            0 : 
    2796            0 :             // If the tenant is not present in memory, we expect to load it from database,
    2797            0 :             // so let's figure out what AZ to load it into while we have self.inner locked.
    2798            0 :             if existing.is_none() {
    2799            0 :                 locked
    2800            0 :                     .scheduler
    2801            0 :                     .get_az_for_new_tenant()
    2802            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    2803            0 :                         "No AZ with nodes found to load tenant"
    2804            0 :                     )))?
    2805              :             } else {
    2806              :                 // We already have this tenant in memory
    2807            0 :                 return Ok(());
    2808              :             }
    2809              :         };
    2810              : 
    2811            0 :         let tenant_shards = self.persistence.load_tenant(tenant_id).await?;
    2812            0 :         if tenant_shards.is_empty() {
    2813            0 :             return Err(ApiError::NotFound(
    2814            0 :                 anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    2815            0 :             ));
    2816            0 :         }
    2817            0 : 
    2818            0 :         // Update the persistent shards with the AZ that we are about to apply to in-memory state
    2819            0 :         self.persistence
    2820            0 :             .set_tenant_shard_preferred_azs(
    2821            0 :                 tenant_shards
    2822            0 :                     .iter()
    2823            0 :                     .map(|t| {
    2824            0 :                         (
    2825            0 :                             t.get_tenant_shard_id().expect("Corrupt shard in database"),
    2826            0 :                             Some(load_in_az.clone()),
    2827            0 :                         )
    2828            0 :                     })
    2829            0 :                     .collect(),
    2830            0 :             )
    2831            0 :             .await?;
    2832              : 
    2833            0 :         let mut locked = self.inner.write().unwrap();
    2834            0 :         tracing::info!(
    2835            0 :             "Loaded {} shards for tenant {}",
    2836            0 :             tenant_shards.len(),
    2837              :             tenant_id
    2838              :         );
    2839              : 
    2840            0 :         locked.tenants.extend(tenant_shards.into_iter().map(|p| {
    2841            0 :             let intent = IntentState::new(Some(load_in_az.clone()));
    2842            0 :             let shard =
    2843            0 :                 TenantShard::from_persistent(p, intent).expect("Corrupt shard row in database");
    2844            0 : 
    2845            0 :             // Sanity check: when loading on-demand, we should always be loaded something Detached
    2846            0 :             debug_assert!(shard.policy == PlacementPolicy::Detached);
    2847            0 :             if shard.policy != PlacementPolicy::Detached {
    2848            0 :                 tracing::error!(
    2849            0 :                     "Tenant shard {} loaded on-demand, but has non-Detached policy {:?}",
    2850              :                     shard.tenant_shard_id,
    2851              :                     shard.policy
    2852              :                 );
    2853            0 :             }
    2854              : 
    2855            0 :             (shard.tenant_shard_id, shard)
    2856            0 :         }));
    2857            0 : 
    2858            0 :         Ok(())
    2859            0 :     }
    2860              : 
    2861              :     /// If all shards for a tenant are detached, and in a fully quiescent state (no observed locations on pageservers),
    2862              :     /// and have no reconciler running, then we can drop the tenant from memory.  It will be reloaded on-demand
    2863              :     /// if we are asked to attach it again (see [`Self::maybe_load_tenant`]).
    2864              :     ///
    2865              :     /// Caller must demonstrate they hold a lock guard, as otherwise it is unsafe to drop a tenant from
    2866              :     /// memory while some other function might assume it continues to exist while not holding the lock on Self::inner.
    2867            0 :     fn maybe_drop_tenant(
    2868            0 :         &self,
    2869            0 :         tenant_id: TenantId,
    2870            0 :         locked: &mut std::sync::RwLockWriteGuard<ServiceState>,
    2871            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    2872            0 :     ) {
    2873            0 :         let mut tenant_shards = locked.tenants.range(TenantShardId::tenant_range(tenant_id));
    2874            0 :         if tenant_shards.all(|(_id, shard)| {
    2875            0 :             shard.policy == PlacementPolicy::Detached
    2876            0 :                 && shard.reconciler.is_none()
    2877            0 :                 && shard.observed.is_empty()
    2878            0 :         }) {
    2879            0 :             let keys = locked
    2880            0 :                 .tenants
    2881            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    2882            0 :                 .map(|(id, _)| id)
    2883            0 :                 .copied()
    2884            0 :                 .collect::<Vec<_>>();
    2885            0 :             for key in keys {
    2886            0 :                 tracing::info!("Dropping detached tenant shard {} from memory", key);
    2887            0 :                 locked.tenants.remove(&key);
    2888              :             }
    2889            0 :         }
    2890            0 :     }
    2891              : 
    2892              :     /// This API is used by the cloud control plane to migrate unsharded tenants that it created
    2893              :     /// directly with pageservers into this service.
    2894              :     ///
    2895              :     /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
    2896              :     /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
    2897              :     /// Think of the first attempt to call this API as a transfer of absolute authority over the
    2898              :     /// tenant's source of generation numbers.
    2899              :     ///
    2900              :     /// The mode in this request coarse-grained control of tenants:
    2901              :     /// - Call with mode Attached* to upsert the tenant.
    2902              :     /// - Call with mode Secondary to either onboard a tenant without attaching it, or
    2903              :     ///   to set an existing tenant to PolicyMode::Secondary
    2904              :     /// - Call with mode Detached to switch to PolicyMode::Detached
    2905            0 :     pub(crate) async fn tenant_location_config(
    2906            0 :         &self,
    2907            0 :         tenant_shard_id: TenantShardId,
    2908            0 :         req: TenantLocationConfigRequest,
    2909            0 :     ) -> Result<TenantLocationConfigResponse, ApiError> {
    2910              :         // We require an exclusive lock, because we are updating both persistent and in-memory state
    2911            0 :         let _tenant_lock = trace_exclusive_lock(
    2912            0 :             &self.tenant_op_locks,
    2913            0 :             tenant_shard_id.tenant_id,
    2914            0 :             TenantOperations::LocationConfig,
    2915            0 :         )
    2916            0 :         .await;
    2917              : 
    2918            0 :         let tenant_id = if !tenant_shard_id.is_unsharded() {
    2919            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    2920            0 :                 "This API is for importing single-sharded or unsharded tenants"
    2921            0 :             )));
    2922              :         } else {
    2923            0 :             tenant_shard_id.tenant_id
    2924            0 :         };
    2925            0 : 
    2926            0 :         // In case we are waking up a Detached tenant
    2927            0 :         match self.maybe_load_tenant(tenant_id, &_tenant_lock).await {
    2928            0 :             Ok(()) | Err(ApiError::NotFound(_)) => {
    2929            0 :                 // This is a creation or an update
    2930            0 :             }
    2931            0 :             Err(e) => {
    2932            0 :                 return Err(e);
    2933              :             }
    2934              :         };
    2935              : 
    2936              :         // First check if this is a creation or an update
    2937            0 :         let create_or_update = self.tenant_location_config_prepare(tenant_id, req);
    2938            0 : 
    2939            0 :         let mut result = TenantLocationConfigResponse {
    2940            0 :             shards: Vec::new(),
    2941            0 :             stripe_size: None,
    2942            0 :         };
    2943            0 :         let waiters = match create_or_update {
    2944            0 :             TenantCreateOrUpdate::Create(create_req) => {
    2945            0 :                 let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
    2946            0 :                 result.shards = create_resp
    2947            0 :                     .shards
    2948            0 :                     .into_iter()
    2949            0 :                     .map(|s| TenantShardLocation {
    2950            0 :                         node_id: s.node_id,
    2951            0 :                         shard_id: s.shard_id,
    2952            0 :                     })
    2953            0 :                     .collect();
    2954            0 :                 waiters
    2955              :             }
    2956            0 :             TenantCreateOrUpdate::Update(updates) => {
    2957            0 :                 // Persist updates
    2958            0 :                 // Ordering: write to the database before applying changes in-memory, so that
    2959            0 :                 // we will not appear time-travel backwards on a restart.
    2960            0 : 
    2961            0 :                 let mut schedule_context = ScheduleContext::default();
    2962              :                 for ShardUpdate {
    2963            0 :                     tenant_shard_id,
    2964            0 :                     placement_policy,
    2965            0 :                     tenant_config,
    2966            0 :                     generation,
    2967            0 :                     scheduling_policy,
    2968            0 :                 } in &updates
    2969              :                 {
    2970            0 :                     self.persistence
    2971            0 :                         .update_tenant_shard(
    2972            0 :                             TenantFilter::Shard(*tenant_shard_id),
    2973            0 :                             Some(placement_policy.clone()),
    2974            0 :                             Some(tenant_config.clone()),
    2975            0 :                             *generation,
    2976            0 :                             *scheduling_policy,
    2977            0 :                         )
    2978            0 :                         .await?;
    2979              :                 }
    2980              : 
    2981              :                 // Apply updates in-memory
    2982            0 :                 let mut waiters = Vec::new();
    2983            0 :                 {
    2984            0 :                     let mut locked = self.inner.write().unwrap();
    2985            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    2986              : 
    2987              :                     for ShardUpdate {
    2988            0 :                         tenant_shard_id,
    2989            0 :                         placement_policy,
    2990            0 :                         tenant_config,
    2991            0 :                         generation: update_generation,
    2992            0 :                         scheduling_policy,
    2993            0 :                     } in updates
    2994              :                     {
    2995            0 :                         let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    2996            0 :                             tracing::warn!("Shard {tenant_shard_id} removed while updating");
    2997            0 :                             continue;
    2998              :                         };
    2999              : 
    3000              :                         // Update stripe size
    3001            0 :                         if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
    3002            0 :                             result.stripe_size = Some(shard.shard.stripe_size);
    3003            0 :                         }
    3004              : 
    3005            0 :                         shard.policy = placement_policy;
    3006            0 :                         shard.config = tenant_config;
    3007            0 :                         if let Some(generation) = update_generation {
    3008            0 :                             shard.generation = Some(generation);
    3009            0 :                         }
    3010              : 
    3011            0 :                         if let Some(scheduling_policy) = scheduling_policy {
    3012            0 :                             shard.set_scheduling_policy(scheduling_policy);
    3013            0 :                         }
    3014              : 
    3015            0 :                         shard.schedule(scheduler, &mut schedule_context)?;
    3016              : 
    3017            0 :                         let maybe_waiter =
    3018            0 :                             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3019            0 :                         if let Some(waiter) = maybe_waiter {
    3020            0 :                             waiters.push(waiter);
    3021            0 :                         }
    3022              : 
    3023            0 :                         if let Some(node_id) = shard.intent.get_attached() {
    3024            0 :                             result.shards.push(TenantShardLocation {
    3025            0 :                                 shard_id: tenant_shard_id,
    3026            0 :                                 node_id: *node_id,
    3027            0 :                             })
    3028            0 :                         }
    3029              :                     }
    3030              :                 }
    3031            0 :                 waiters
    3032              :             }
    3033              :         };
    3034              : 
    3035            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3036              :             // Do not treat a reconcile error as fatal: we have already applied any requested
    3037              :             // Intent changes, and the reconcile can fail for external reasons like unavailable
    3038              :             // compute notification API.  In these cases, it is important that we do not
    3039              :             // cause the cloud control plane to retry forever on this API.
    3040            0 :             tracing::warn!(
    3041            0 :                 "Failed to reconcile after /location_config: {e}, returning success anyway"
    3042              :             );
    3043            0 :         }
    3044              : 
    3045              :         // Logging the full result is useful because it lets us cross-check what the cloud control
    3046              :         // plane's tenant_shards table should contain.
    3047            0 :         tracing::info!("Complete, returning {result:?}");
    3048              : 
    3049            0 :         Ok(result)
    3050            0 :     }
    3051              : 
    3052            0 :     pub(crate) async fn tenant_config_patch(
    3053            0 :         &self,
    3054            0 :         req: TenantConfigPatchRequest,
    3055            0 :     ) -> Result<(), ApiError> {
    3056            0 :         let _tenant_lock = trace_exclusive_lock(
    3057            0 :             &self.tenant_op_locks,
    3058            0 :             req.tenant_id,
    3059            0 :             TenantOperations::ConfigPatch,
    3060            0 :         )
    3061            0 :         .await;
    3062              : 
    3063            0 :         let tenant_id = req.tenant_id;
    3064            0 :         let patch = req.config;
    3065            0 : 
    3066            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3067              : 
    3068            0 :         let base = {
    3069            0 :             let locked = self.inner.read().unwrap();
    3070            0 :             let shards = locked
    3071            0 :                 .tenants
    3072            0 :                 .range(TenantShardId::tenant_range(req.tenant_id));
    3073            0 : 
    3074            0 :             let mut configs = shards.map(|(_sid, shard)| &shard.config).peekable();
    3075              : 
    3076            0 :             let first = match configs.peek() {
    3077            0 :                 Some(first) => (*first).clone(),
    3078              :                 None => {
    3079            0 :                     return Err(ApiError::NotFound(
    3080            0 :                         anyhow::anyhow!("Tenant {} not found", req.tenant_id).into(),
    3081            0 :                     ));
    3082              :                 }
    3083              :             };
    3084              : 
    3085            0 :             if !configs.all_equal() {
    3086            0 :                 tracing::error!("Tenant configs for {} are mismatched. ", req.tenant_id);
    3087              :                 // This can't happen because we atomically update the database records
    3088              :                 // of all shards to the new value in [`Self::set_tenant_config_and_reconcile`].
    3089            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3090            0 :                     "Tenant configs for {} are mismatched",
    3091            0 :                     req.tenant_id
    3092            0 :                 )));
    3093            0 :             }
    3094            0 : 
    3095            0 :             first
    3096              :         };
    3097              : 
    3098            0 :         let updated_config = base
    3099            0 :             .apply_patch(patch)
    3100            0 :             .map_err(|err| ApiError::BadRequest(anyhow::anyhow!(err)))?;
    3101            0 :         self.set_tenant_config_and_reconcile(tenant_id, updated_config)
    3102            0 :             .await
    3103            0 :     }
    3104              : 
    3105            0 :     pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
    3106              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3107            0 :         let _tenant_lock = trace_exclusive_lock(
    3108            0 :             &self.tenant_op_locks,
    3109            0 :             req.tenant_id,
    3110            0 :             TenantOperations::ConfigSet,
    3111            0 :         )
    3112            0 :         .await;
    3113              : 
    3114            0 :         self.maybe_load_tenant(req.tenant_id, &_tenant_lock).await?;
    3115              : 
    3116            0 :         self.set_tenant_config_and_reconcile(req.tenant_id, req.config)
    3117            0 :             .await
    3118            0 :     }
    3119              : 
    3120            0 :     async fn set_tenant_config_and_reconcile(
    3121            0 :         &self,
    3122            0 :         tenant_id: TenantId,
    3123            0 :         config: TenantConfig,
    3124            0 :     ) -> Result<(), ApiError> {
    3125            0 :         self.persistence
    3126            0 :             .update_tenant_shard(
    3127            0 :                 TenantFilter::Tenant(tenant_id),
    3128            0 :                 None,
    3129            0 :                 Some(config.clone()),
    3130            0 :                 None,
    3131            0 :                 None,
    3132            0 :             )
    3133            0 :             .await?;
    3134              : 
    3135            0 :         let waiters = {
    3136            0 :             let mut waiters = Vec::new();
    3137            0 :             let mut locked = self.inner.write().unwrap();
    3138            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    3139            0 :             for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3140            0 :                 shard.config = config.clone();
    3141            0 :                 if let Some(waiter) =
    3142            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3143            0 :                 {
    3144            0 :                     waiters.push(waiter);
    3145            0 :                 }
    3146              :             }
    3147            0 :             waiters
    3148              :         };
    3149              : 
    3150            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3151              :             // Treat this as success because we have stored the configuration.  If e.g.
    3152              :             // a node was unavailable at this time, it should not stop us accepting a
    3153              :             // configuration change.
    3154            0 :             tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
    3155            0 :         }
    3156              : 
    3157            0 :         Ok(())
    3158            0 :     }
    3159              : 
    3160            0 :     pub(crate) fn tenant_config_get(
    3161            0 :         &self,
    3162            0 :         tenant_id: TenantId,
    3163            0 :     ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
    3164            0 :         let config = {
    3165            0 :             let locked = self.inner.read().unwrap();
    3166            0 : 
    3167            0 :             match locked
    3168            0 :                 .tenants
    3169            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3170            0 :                 .next()
    3171              :             {
    3172            0 :                 Some((_tenant_shard_id, shard)) => shard.config.clone(),
    3173              :                 None => {
    3174            0 :                     return Err(ApiError::NotFound(
    3175            0 :                         anyhow::anyhow!("Tenant not found").into(),
    3176            0 :                     ));
    3177              :                 }
    3178              :             }
    3179              :         };
    3180              : 
    3181              :         // Unlike the pageserver, we do not have a set of global defaults: the config is
    3182              :         // entirely per-tenant.  Therefore the distinction between `tenant_specific_overrides`
    3183              :         // and `effective_config` in the response is meaningless, but we retain that syntax
    3184              :         // in order to remain compatible with the pageserver API.
    3185              : 
    3186            0 :         let response = HashMap::from([
    3187              :             (
    3188              :                 "tenant_specific_overrides",
    3189            0 :                 serde_json::to_value(&config)
    3190            0 :                     .context("serializing tenant specific overrides")
    3191            0 :                     .map_err(ApiError::InternalServerError)?,
    3192              :             ),
    3193              :             (
    3194            0 :                 "effective_config",
    3195            0 :                 serde_json::to_value(&config)
    3196            0 :                     .context("serializing effective config")
    3197            0 :                     .map_err(ApiError::InternalServerError)?,
    3198              :             ),
    3199              :         ]);
    3200              : 
    3201            0 :         Ok(response)
    3202            0 :     }
    3203              : 
    3204            0 :     pub(crate) async fn tenant_time_travel_remote_storage(
    3205            0 :         &self,
    3206            0 :         time_travel_req: &TenantTimeTravelRequest,
    3207            0 :         tenant_id: TenantId,
    3208            0 :         timestamp: Cow<'_, str>,
    3209            0 :         done_if_after: Cow<'_, str>,
    3210            0 :     ) -> Result<(), ApiError> {
    3211            0 :         let _tenant_lock = trace_exclusive_lock(
    3212            0 :             &self.tenant_op_locks,
    3213            0 :             tenant_id,
    3214            0 :             TenantOperations::TimeTravelRemoteStorage,
    3215            0 :         )
    3216            0 :         .await;
    3217              : 
    3218            0 :         let node = {
    3219            0 :             let mut locked = self.inner.write().unwrap();
    3220              :             // Just a sanity check to prevent misuse: the API expects that the tenant is fully
    3221              :             // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
    3222              :             // but only at the start of the process, so it's really just to prevent operator
    3223              :             // mistakes.
    3224            0 :             for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
    3225            0 :                 if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
    3226              :                 {
    3227            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3228            0 :                         "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
    3229            0 :                     )));
    3230            0 :                 }
    3231            0 :                 let maybe_attached = shard
    3232            0 :                     .observed
    3233            0 :                     .locations
    3234            0 :                     .iter()
    3235            0 :                     .filter_map(|(node_id, observed_location)| {
    3236            0 :                         observed_location
    3237            0 :                             .conf
    3238            0 :                             .as_ref()
    3239            0 :                             .map(|loc| (node_id, observed_location, loc.mode))
    3240            0 :                     })
    3241            0 :                     .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
    3242            0 :                 if let Some((node_id, _observed_location, mode)) = maybe_attached {
    3243            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3244            0 :                         "We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}"
    3245            0 :                     )));
    3246            0 :                 }
    3247              :             }
    3248            0 :             let scheduler = &mut locked.scheduler;
    3249              :             // Right now we only perform the operation on a single node without parallelization
    3250              :             // TODO fan out the operation to multiple nodes for better performance
    3251            0 :             let node_id = scheduler.any_available_node()?;
    3252            0 :             let node = locked
    3253            0 :                 .nodes
    3254            0 :                 .get(&node_id)
    3255            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3256            0 :             node.clone()
    3257            0 :         };
    3258            0 : 
    3259            0 :         // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
    3260            0 :         let mut counts = time_travel_req
    3261            0 :             .shard_counts
    3262            0 :             .iter()
    3263            0 :             .copied()
    3264            0 :             .collect::<HashSet<_>>()
    3265            0 :             .into_iter()
    3266            0 :             .collect::<Vec<_>>();
    3267            0 :         counts.sort_unstable();
    3268              : 
    3269            0 :         for count in counts {
    3270            0 :             let shard_ids = (0..count.count())
    3271            0 :                 .map(|i| TenantShardId {
    3272            0 :                     tenant_id,
    3273            0 :                     shard_number: ShardNumber(i),
    3274            0 :                     shard_count: count,
    3275            0 :                 })
    3276            0 :                 .collect::<Vec<_>>();
    3277            0 :             for tenant_shard_id in shard_ids {
    3278            0 :                 let client = PageserverClient::new(
    3279            0 :                     node.get_id(),
    3280            0 :                     self.http_client.clone(),
    3281            0 :                     node.base_url(),
    3282            0 :                     self.config.pageserver_jwt_token.as_deref(),
    3283            0 :                 );
    3284            0 : 
    3285            0 :                 tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
    3286              : 
    3287            0 :                 client
    3288            0 :                     .tenant_time_travel_remote_storage(
    3289            0 :                         tenant_shard_id,
    3290            0 :                         &timestamp,
    3291            0 :                         &done_if_after,
    3292            0 :                     )
    3293            0 :                     .await
    3294            0 :                     .map_err(|e| {
    3295            0 :                         ApiError::InternalServerError(anyhow::anyhow!(
    3296            0 :                             "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
    3297            0 :                             node
    3298            0 :                         ))
    3299            0 :                     })?;
    3300              :             }
    3301              :         }
    3302            0 :         Ok(())
    3303            0 :     }
    3304              : 
    3305            0 :     pub(crate) async fn tenant_secondary_download(
    3306            0 :         &self,
    3307            0 :         tenant_id: TenantId,
    3308            0 :         wait: Option<Duration>,
    3309            0 :     ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
    3310            0 :         let _tenant_lock = trace_shared_lock(
    3311            0 :             &self.tenant_op_locks,
    3312            0 :             tenant_id,
    3313            0 :             TenantOperations::SecondaryDownload,
    3314            0 :         )
    3315            0 :         .await;
    3316              : 
    3317              :         // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
    3318            0 :         let targets = {
    3319            0 :             let locked = self.inner.read().unwrap();
    3320            0 :             let mut targets = Vec::new();
    3321              : 
    3322            0 :             for (tenant_shard_id, shard) in
    3323            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    3324              :             {
    3325            0 :                 for node_id in shard.intent.get_secondary() {
    3326            0 :                     let node = locked
    3327            0 :                         .nodes
    3328            0 :                         .get(node_id)
    3329            0 :                         .expect("Pageservers may not be deleted while referenced");
    3330            0 : 
    3331            0 :                     targets.push((*tenant_shard_id, node.clone()));
    3332            0 :                 }
    3333              :             }
    3334            0 :             targets
    3335            0 :         };
    3336            0 : 
    3337            0 :         // Issue concurrent requests to all shards' locations
    3338            0 :         let mut futs = FuturesUnordered::new();
    3339            0 :         for (tenant_shard_id, node) in targets {
    3340            0 :             let client = PageserverClient::new(
    3341            0 :                 node.get_id(),
    3342            0 :                 self.http_client.clone(),
    3343            0 :                 node.base_url(),
    3344            0 :                 self.config.pageserver_jwt_token.as_deref(),
    3345            0 :             );
    3346            0 :             futs.push(async move {
    3347            0 :                 let result = client
    3348            0 :                     .tenant_secondary_download(tenant_shard_id, wait)
    3349            0 :                     .await;
    3350            0 :                 (result, node, tenant_shard_id)
    3351            0 :             })
    3352              :         }
    3353              : 
    3354              :         // Handle any errors returned by pageservers.  This includes cases like this request racing with
    3355              :         // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
    3356              :         // well as more general cases like 503s, 500s, or timeouts.
    3357            0 :         let mut aggregate_progress = SecondaryProgress::default();
    3358            0 :         let mut aggregate_status: Option<StatusCode> = None;
    3359            0 :         let mut error: Option<mgmt_api::Error> = None;
    3360            0 :         while let Some((result, node, tenant_shard_id)) = futs.next().await {
    3361            0 :             match result {
    3362            0 :                 Err(e) => {
    3363            0 :                     // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
    3364            0 :                     // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
    3365            0 :                     // than they had hoped for.
    3366            0 :                     tracing::warn!("Secondary download error from pageserver {node}: {e}",);
    3367            0 :                     error = Some(e)
    3368              :                 }
    3369            0 :                 Ok((status_code, progress)) => {
    3370            0 :                     tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
    3371            0 :                     aggregate_progress.layers_downloaded += progress.layers_downloaded;
    3372            0 :                     aggregate_progress.layers_total += progress.layers_total;
    3373            0 :                     aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
    3374            0 :                     aggregate_progress.bytes_total += progress.bytes_total;
    3375            0 :                     aggregate_progress.heatmap_mtime =
    3376            0 :                         std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
    3377            0 :                     aggregate_status = match aggregate_status {
    3378            0 :                         None => Some(status_code),
    3379            0 :                         Some(StatusCode::OK) => Some(status_code),
    3380            0 :                         Some(cur) => {
    3381            0 :                             // Other status codes (e.g. 202) -- do not overwrite.
    3382            0 :                             Some(cur)
    3383              :                         }
    3384              :                     };
    3385              :                 }
    3386              :             }
    3387              :         }
    3388              : 
    3389              :         // If any of the shards return 202, indicate our result as 202.
    3390            0 :         match aggregate_status {
    3391              :             None => {
    3392            0 :                 match error {
    3393            0 :                     Some(e) => {
    3394            0 :                         // No successes, and an error: surface it
    3395            0 :                         Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
    3396              :                     }
    3397              :                     None => {
    3398              :                         // No shards found
    3399            0 :                         Err(ApiError::NotFound(
    3400            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    3401            0 :                         ))
    3402              :                     }
    3403              :                 }
    3404              :             }
    3405            0 :             Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
    3406              :         }
    3407            0 :     }
    3408              : 
    3409            0 :     pub(crate) async fn tenant_delete(
    3410            0 :         self: &Arc<Self>,
    3411            0 :         tenant_id: TenantId,
    3412            0 :     ) -> Result<StatusCode, ApiError> {
    3413            0 :         let _tenant_lock =
    3414            0 :             trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
    3415              : 
    3416            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3417              : 
    3418              :         // Detach all shards. This also deletes local pageserver shard data.
    3419            0 :         let (detach_waiters, node) = {
    3420            0 :             let mut detach_waiters = Vec::new();
    3421            0 :             let mut locked = self.inner.write().unwrap();
    3422            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    3423            0 :             for (_, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3424              :                 // Update the tenant's intent to remove all attachments
    3425            0 :                 shard.policy = PlacementPolicy::Detached;
    3426            0 :                 shard
    3427            0 :                     .schedule(scheduler, &mut ScheduleContext::default())
    3428            0 :                     .expect("De-scheduling is infallible");
    3429            0 :                 debug_assert!(shard.intent.get_attached().is_none());
    3430            0 :                 debug_assert!(shard.intent.get_secondary().is_empty());
    3431              : 
    3432            0 :                 if let Some(waiter) =
    3433            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3434            0 :                 {
    3435            0 :                     detach_waiters.push(waiter);
    3436            0 :                 }
    3437              :             }
    3438              : 
    3439              :             // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant
    3440              :             // was attached, just has to be able to see the S3 content)
    3441            0 :             let node_id = scheduler.any_available_node()?;
    3442            0 :             let node = nodes
    3443            0 :                 .get(&node_id)
    3444            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3445            0 :             (detach_waiters, node.clone())
    3446            0 :         };
    3447            0 : 
    3448            0 :         // This reconcile wait can fail in a few ways:
    3449            0 :         //  A there is a very long queue for the reconciler semaphore
    3450            0 :         //  B some pageserver is failing to handle a detach promptly
    3451            0 :         //  C some pageserver goes offline right at the moment we send it a request.
    3452            0 :         //
    3453            0 :         // A and C are transient: the semaphore will eventually become available, and once a node is marked offline
    3454            0 :         // the next attempt to reconcile will silently skip detaches for an offline node and succeed.  If B happens,
    3455            0 :         // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while
    3456            0 :         // deleting the underlying data).
    3457            0 :         self.await_waiters(detach_waiters, RECONCILE_TIMEOUT)
    3458            0 :             .await?;
    3459              : 
    3460              :         // Delete the entire tenant (all shards) from remote storage via a random pageserver.
    3461              :         // Passing an unsharded tenant ID will cause the pageserver to remove all remote paths with
    3462              :         // the tenant ID prefix, including all shards (even possibly stale ones).
    3463            0 :         match node
    3464            0 :             .with_client_retries(
    3465            0 :                 |client| async move {
    3466            0 :                     client
    3467            0 :                         .tenant_delete(TenantShardId::unsharded(tenant_id))
    3468            0 :                         .await
    3469            0 :                 },
    3470            0 :                 &self.http_client,
    3471            0 :                 &self.config.pageserver_jwt_token,
    3472            0 :                 1,
    3473            0 :                 3,
    3474            0 :                 RECONCILE_TIMEOUT,
    3475            0 :                 &self.cancel,
    3476            0 :             )
    3477            0 :             .await
    3478            0 :             .unwrap_or(Err(mgmt_api::Error::Cancelled))
    3479              :         {
    3480            0 :             Ok(_) => {}
    3481              :             Err(mgmt_api::Error::Cancelled) => {
    3482            0 :                 return Err(ApiError::ShuttingDown);
    3483              :             }
    3484            0 :             Err(e) => {
    3485            0 :                 // This is unexpected: remote deletion should be infallible, unless the object store
    3486            0 :                 // at large is unavailable.
    3487            0 :                 tracing::error!("Error deleting via node {node}: {e}");
    3488            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    3489              :             }
    3490              :         }
    3491              : 
    3492              :         // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
    3493              :         // our in-memory state and database state.
    3494              : 
    3495              :         // Ordering: we delete persistent state first: if we then
    3496              :         // crash, we will drop the in-memory state.
    3497              : 
    3498              :         // Drop persistent state.
    3499            0 :         self.persistence.delete_tenant(tenant_id).await?;
    3500              : 
    3501              :         // Drop in-memory state
    3502              :         {
    3503            0 :             let mut locked = self.inner.write().unwrap();
    3504            0 :             let (_nodes, tenants, scheduler) = locked.parts_mut();
    3505              : 
    3506              :             // Dereference Scheduler from shards before dropping them
    3507            0 :             for (_tenant_shard_id, shard) in
    3508            0 :                 tenants.range_mut(TenantShardId::tenant_range(tenant_id))
    3509            0 :             {
    3510            0 :                 shard.intent.clear(scheduler);
    3511            0 :             }
    3512              : 
    3513            0 :             tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
    3514            0 :             tracing::info!(
    3515            0 :                 "Deleted tenant {tenant_id}, now have {} tenants",
    3516            0 :                 locked.tenants.len()
    3517              :             );
    3518              :         };
    3519              : 
    3520              :         // Delete the tenant from safekeepers (if needed)
    3521            0 :         self.tenant_delete_safekeepers(tenant_id)
    3522            0 :             .instrument(tracing::info_span!("tenant_delete_safekeepers", %tenant_id))
    3523            0 :             .await?;
    3524              : 
    3525              :         // Success is represented as 404, to imitate the existing pageserver deletion API
    3526            0 :         Ok(StatusCode::NOT_FOUND)
    3527            0 :     }
    3528              : 
    3529              :     /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
    3530              :     /// for a tenant.  The TenantConfig is passed through to pageservers, whereas this function modifies
    3531              :     /// the tenant's policies (configuration) within the storage controller
    3532            0 :     pub(crate) async fn tenant_update_policy(
    3533            0 :         &self,
    3534            0 :         tenant_id: TenantId,
    3535            0 :         req: TenantPolicyRequest,
    3536            0 :     ) -> Result<(), ApiError> {
    3537              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3538            0 :         let _tenant_lock = trace_exclusive_lock(
    3539            0 :             &self.tenant_op_locks,
    3540            0 :             tenant_id,
    3541            0 :             TenantOperations::UpdatePolicy,
    3542            0 :         )
    3543            0 :         .await;
    3544              : 
    3545            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3546              : 
    3547            0 :         failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
    3548              : 
    3549              :         let TenantPolicyRequest {
    3550            0 :             placement,
    3551            0 :             mut scheduling,
    3552            0 :         } = req;
    3553              : 
    3554            0 :         if let Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) = placement {
    3555              :             // When someone configures a tenant to detach, we force the scheduling policy to enable
    3556              :             // this to take effect.
    3557            0 :             if scheduling.is_none() {
    3558            0 :                 scheduling = Some(ShardSchedulingPolicy::Active);
    3559            0 :             }
    3560            0 :         }
    3561              : 
    3562            0 :         self.persistence
    3563            0 :             .update_tenant_shard(
    3564            0 :                 TenantFilter::Tenant(tenant_id),
    3565            0 :                 placement.clone(),
    3566            0 :                 None,
    3567            0 :                 None,
    3568            0 :                 scheduling,
    3569            0 :             )
    3570            0 :             .await?;
    3571              : 
    3572            0 :         let mut schedule_context = ScheduleContext::default();
    3573            0 :         let mut locked = self.inner.write().unwrap();
    3574            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    3575            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3576            0 :             if let Some(placement) = &placement {
    3577            0 :                 shard.policy = placement.clone();
    3578            0 : 
    3579            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3580            0 :                                "Updated placement policy to {placement:?}");
    3581            0 :             }
    3582              : 
    3583            0 :             if let Some(scheduling) = &scheduling {
    3584            0 :                 shard.set_scheduling_policy(*scheduling);
    3585            0 : 
    3586            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3587            0 :                                "Updated scheduling policy to {scheduling:?}");
    3588            0 :             }
    3589              : 
    3590              :             // In case scheduling is being switched back on, try it now.
    3591            0 :             shard.schedule(scheduler, &mut schedule_context).ok();
    3592            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3593              :         }
    3594              : 
    3595            0 :         Ok(())
    3596            0 :     }
    3597              : 
    3598            0 :     pub(crate) async fn tenant_timeline_create_pageservers(
    3599            0 :         &self,
    3600            0 :         tenant_id: TenantId,
    3601            0 :         mut create_req: TimelineCreateRequest,
    3602            0 :     ) -> Result<TimelineInfo, ApiError> {
    3603            0 :         tracing::info!(
    3604            0 :             "Creating timeline {}/{}",
    3605              :             tenant_id,
    3606              :             create_req.new_timeline_id,
    3607              :         );
    3608              : 
    3609            0 :         self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    3610            0 :             if targets.0.is_empty() {
    3611            0 :                 return Err(ApiError::NotFound(
    3612            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3613            0 :                 ));
    3614            0 :             };
    3615            0 : 
    3616            0 :             let (shard_zero_tid, shard_zero_locations) =
    3617            0 :                 targets.0.pop_first().expect("Must have at least one shard");
    3618            0 :             assert!(shard_zero_tid.is_shard_zero());
    3619              : 
    3620            0 :             async fn create_one(
    3621            0 :                 tenant_shard_id: TenantShardId,
    3622            0 :                 locations: ShardMutationLocations,
    3623            0 :                 http_client: reqwest::Client,
    3624            0 :                 jwt: Option<String>,
    3625            0 :                 create_req: TimelineCreateRequest,
    3626            0 :             ) -> Result<TimelineInfo, ApiError> {
    3627            0 :                 let latest = locations.latest.node;
    3628            0 : 
    3629            0 :                 tracing::info!(
    3630            0 :                     "Creating timeline on shard {}/{}, attached to node {latest} in generation {:?}",
    3631              :                     tenant_shard_id,
    3632              :                     create_req.new_timeline_id,
    3633              :                     locations.latest.generation
    3634              :                 );
    3635              : 
    3636            0 :                 let client =
    3637            0 :                     PageserverClient::new(latest.get_id(), http_client.clone(), latest.base_url(), jwt.as_deref());
    3638              : 
    3639            0 :                 let timeline_info = client
    3640            0 :                     .timeline_create(tenant_shard_id, &create_req)
    3641            0 :                     .await
    3642            0 :                     .map_err(|e| passthrough_api_error(&latest, e))?;
    3643              : 
    3644              :                 // We propagate timeline creations to all attached locations such that a compute
    3645              :                 // for the new timeline is able to start regardless of the current state of the
    3646              :                 // tenant shard reconciliation.
    3647            0 :                 for location in locations.other {
    3648            0 :                     tracing::info!(
    3649            0 :                         "Creating timeline on shard {}/{}, stale attached to node {} in generation {:?}",
    3650              :                         tenant_shard_id,
    3651              :                         create_req.new_timeline_id,
    3652              :                         location.node,
    3653              :                         location.generation
    3654              :                     );
    3655              : 
    3656            0 :                     let client = PageserverClient::new(
    3657            0 :                         location.node.get_id(),
    3658            0 :                         http_client.clone(),
    3659            0 :                         location.node.base_url(),
    3660            0 :                         jwt.as_deref(),
    3661            0 :                     );
    3662              : 
    3663            0 :                     let res = client
    3664            0 :                         .timeline_create(tenant_shard_id, &create_req)
    3665            0 :                         .await;
    3666              : 
    3667            0 :                     if let Err(e) = res {
    3668            0 :                         match e {
    3669            0 :                             mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _) => {
    3670            0 :                                 // Tenant might have been detached from the stale location,
    3671            0 :                                 // so ignore 404s.
    3672            0 :                             },
    3673              :                             _ => {
    3674            0 :                                 return Err(passthrough_api_error(&location.node, e));
    3675              :                             }
    3676              :                         }
    3677            0 :                     }
    3678              :                 }
    3679              : 
    3680            0 :                 Ok(timeline_info)
    3681            0 :             }
    3682              : 
    3683              :             // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
    3684              :             // use whatever LSN that shard picked when creating on subsequent shards.  We arbitrarily use shard zero as the shard
    3685              :             // that will get the first creation request, and propagate the LSN to all the >0 shards.
    3686            0 :             let timeline_info = create_one(
    3687            0 :                 shard_zero_tid,
    3688            0 :                 shard_zero_locations,
    3689            0 :                 self.http_client.clone(),
    3690            0 :                 self.config.pageserver_jwt_token.clone(),
    3691            0 :                 create_req.clone(),
    3692            0 :             )
    3693            0 :             .await?;
    3694              : 
    3695              :             // Propagate the LSN that shard zero picked, if caller didn't provide one
    3696            0 :             match &mut create_req.mode {
    3697            0 :                 models::TimelineCreateRequestMode::Branch { ancestor_start_lsn, .. } if ancestor_start_lsn.is_none() => {
    3698            0 :                     *ancestor_start_lsn = timeline_info.ancestor_lsn;
    3699            0 :                 },
    3700            0 :                 _ => {}
    3701              :             }
    3702              : 
    3703              :             // Create timeline on remaining shards with number >0
    3704            0 :             if !targets.0.is_empty() {
    3705              :                 // If we had multiple shards, issue requests for the remainder now.
    3706            0 :                 let jwt = &self.config.pageserver_jwt_token;
    3707            0 :                 self.tenant_for_shards(
    3708            0 :                     targets
    3709            0 :                         .0
    3710            0 :                         .iter()
    3711            0 :                         .map(|t| (*t.0, t.1.latest.node.clone()))
    3712            0 :                         .collect(),
    3713            0 :                     |tenant_shard_id: TenantShardId, _node: Node| {
    3714            0 :                         let create_req = create_req.clone();
    3715            0 :                         let mutation_locations = targets.0.remove(&tenant_shard_id).unwrap();
    3716            0 :                         Box::pin(create_one(
    3717            0 :                             tenant_shard_id,
    3718            0 :                             mutation_locations,
    3719            0 :                             self.http_client.clone(),
    3720            0 :                             jwt.clone(),
    3721            0 :                             create_req,
    3722            0 :                         ))
    3723            0 :                     },
    3724            0 :                 )
    3725            0 :                 .await?;
    3726            0 :             }
    3727              : 
    3728            0 :             Ok(timeline_info)
    3729            0 :         })
    3730            0 :         .await?
    3731            0 :     }
    3732              : 
    3733            0 :     pub(crate) async fn tenant_timeline_create(
    3734            0 :         self: &Arc<Self>,
    3735            0 :         tenant_id: TenantId,
    3736            0 :         create_req: TimelineCreateRequest,
    3737            0 :     ) -> Result<TimelineCreateResponseStorcon, ApiError> {
    3738            0 :         let safekeepers = self.config.timelines_onto_safekeepers;
    3739            0 :         tracing::info!(
    3740              :             %safekeepers,
    3741            0 :             "Creating timeline {}/{}",
    3742              :             tenant_id,
    3743              :             create_req.new_timeline_id,
    3744              :         );
    3745              : 
    3746            0 :         let _tenant_lock = trace_shared_lock(
    3747            0 :             &self.tenant_op_locks,
    3748            0 :             tenant_id,
    3749            0 :             TenantOperations::TimelineCreate,
    3750            0 :         )
    3751            0 :         .await;
    3752            0 :         failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock");
    3753            0 :         let create_mode = create_req.mode.clone();
    3754              : 
    3755            0 :         let timeline_info = self
    3756            0 :             .tenant_timeline_create_pageservers(tenant_id, create_req)
    3757            0 :             .await?;
    3758              : 
    3759            0 :         let safekeepers = if safekeepers {
    3760            0 :             let res = self
    3761            0 :                 .tenant_timeline_create_safekeepers(tenant_id, &timeline_info, create_mode)
    3762            0 :                 .instrument(tracing::info_span!("timeline_create_safekeepers", %tenant_id, timeline_id=%timeline_info.timeline_id))
    3763            0 :                 .await?;
    3764            0 :             Some(res)
    3765              :         } else {
    3766            0 :             None
    3767              :         };
    3768              : 
    3769            0 :         Ok(TimelineCreateResponseStorcon {
    3770            0 :             timeline_info,
    3771            0 :             safekeepers,
    3772            0 :         })
    3773            0 :     }
    3774              : 
    3775            0 :     pub(crate) async fn tenant_timeline_archival_config(
    3776            0 :         &self,
    3777            0 :         tenant_id: TenantId,
    3778            0 :         timeline_id: TimelineId,
    3779            0 :         req: TimelineArchivalConfigRequest,
    3780            0 :     ) -> Result<(), ApiError> {
    3781            0 :         tracing::info!(
    3782            0 :             "Setting archival config of timeline {tenant_id}/{timeline_id} to '{:?}'",
    3783              :             req.state
    3784              :         );
    3785              : 
    3786            0 :         let _tenant_lock = trace_shared_lock(
    3787            0 :             &self.tenant_op_locks,
    3788            0 :             tenant_id,
    3789            0 :             TenantOperations::TimelineArchivalConfig,
    3790            0 :         )
    3791            0 :         .await;
    3792              : 
    3793            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    3794            0 :             if targets.0.is_empty() {
    3795            0 :                 return Err(ApiError::NotFound(
    3796            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3797            0 :                 ));
    3798            0 :             }
    3799            0 :             async fn config_one(
    3800            0 :                 tenant_shard_id: TenantShardId,
    3801            0 :                 timeline_id: TimelineId,
    3802            0 :                 node: Node,
    3803            0 :                 http_client: reqwest::Client,
    3804            0 :                 jwt: Option<String>,
    3805            0 :                 req: TimelineArchivalConfigRequest,
    3806            0 :             ) -> Result<(), ApiError> {
    3807            0 :                 tracing::info!(
    3808            0 :                     "Setting archival config of timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    3809              :                 );
    3810              : 
    3811            0 :                 let client = PageserverClient::new(node.get_id(),  http_client, node.base_url(), jwt.as_deref());
    3812            0 : 
    3813            0 :                 client
    3814            0 :                     .timeline_archival_config(tenant_shard_id, timeline_id, &req)
    3815            0 :                     .await
    3816            0 :                     .map_err(|e| match e {
    3817            0 :                         mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg) => {
    3818            0 :                             ApiError::PreconditionFailed(msg.into_boxed_str())
    3819              :                         }
    3820            0 :                         _ => passthrough_api_error(&node, e),
    3821            0 :                     })
    3822            0 :             }
    3823              : 
    3824              :             // no shard needs to go first/last; the operation should be idempotent
    3825              :             // TODO: it would be great to ensure that all shards return the same error
    3826            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    3827            0 :             let results = self
    3828            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    3829            0 :                     futures::FutureExt::boxed(config_one(
    3830            0 :                         tenant_shard_id,
    3831            0 :                         timeline_id,
    3832            0 :                         node,
    3833            0 :                         self.http_client.clone(),
    3834            0 :                         self.config.pageserver_jwt_token.clone(),
    3835            0 :                         req.clone(),
    3836            0 :                     ))
    3837            0 :                 })
    3838            0 :                 .await?;
    3839            0 :             assert!(!results.is_empty(), "must have at least one result");
    3840              : 
    3841            0 :             Ok(())
    3842            0 :         }).await?
    3843            0 :     }
    3844              : 
    3845            0 :     pub(crate) async fn tenant_timeline_detach_ancestor(
    3846            0 :         &self,
    3847            0 :         tenant_id: TenantId,
    3848            0 :         timeline_id: TimelineId,
    3849            0 :         behavior: Option<DetachBehavior>,
    3850            0 :     ) -> Result<models::detach_ancestor::AncestorDetached, ApiError> {
    3851            0 :         tracing::info!("Detaching timeline {tenant_id}/{timeline_id}",);
    3852              : 
    3853            0 :         let _tenant_lock = trace_shared_lock(
    3854            0 :             &self.tenant_op_locks,
    3855            0 :             tenant_id,
    3856            0 :             TenantOperations::TimelineDetachAncestor,
    3857            0 :         )
    3858            0 :         .await;
    3859              : 
    3860            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    3861            0 :             if targets.0.is_empty() {
    3862            0 :                 return Err(ApiError::NotFound(
    3863            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3864            0 :                 ));
    3865            0 :             }
    3866              : 
    3867            0 :             async fn detach_one(
    3868            0 :                 tenant_shard_id: TenantShardId,
    3869            0 :                 timeline_id: TimelineId,
    3870            0 :                 node: Node,
    3871            0 :                 http_client: reqwest::Client,
    3872            0 :                 jwt: Option<String>,
    3873            0 :                 behavior: Option<DetachBehavior>,
    3874            0 :             ) -> Result<(ShardNumber, models::detach_ancestor::AncestorDetached), ApiError> {
    3875            0 :                 tracing::info!(
    3876            0 :                     "Detaching timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    3877              :                 );
    3878              : 
    3879            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    3880            0 : 
    3881            0 :                 client
    3882            0 :                     .timeline_detach_ancestor(tenant_shard_id, timeline_id, behavior)
    3883            0 :                     .await
    3884            0 :                     .map_err(|e| {
    3885              :                         use mgmt_api::Error;
    3886              : 
    3887            0 :                         match e {
    3888              :                             // no ancestor (ever)
    3889            0 :                             Error::ApiError(StatusCode::CONFLICT, msg) => ApiError::Conflict(format!(
    3890            0 :                                 "{node}: {}",
    3891            0 :                                 msg.strip_prefix("Conflict: ").unwrap_or(&msg)
    3892            0 :                             )),
    3893              :                             // too many ancestors
    3894            0 :                             Error::ApiError(StatusCode::BAD_REQUEST, msg) => {
    3895            0 :                                 ApiError::BadRequest(anyhow::anyhow!("{node}: {msg}"))
    3896              :                             }
    3897            0 :                             Error::ApiError(StatusCode::INTERNAL_SERVER_ERROR, msg) => {
    3898            0 :                                 // avoid turning these into conflicts to remain compatible with
    3899            0 :                                 // pageservers, 500 errors are sadly retryable with timeline ancestor
    3900            0 :                                 // detach
    3901            0 :                                 ApiError::InternalServerError(anyhow::anyhow!("{node}: {msg}"))
    3902              :                             }
    3903              :                             // rest can be mapped as usual
    3904            0 :                             other => passthrough_api_error(&node, other),
    3905              :                         }
    3906            0 :                     })
    3907            0 :                     .map(|res| (tenant_shard_id.shard_number, res))
    3908            0 :             }
    3909              : 
    3910              :             // no shard needs to go first/last; the operation should be idempotent
    3911            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    3912            0 :             let mut results = self
    3913            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    3914            0 :                     futures::FutureExt::boxed(detach_one(
    3915            0 :                         tenant_shard_id,
    3916            0 :                         timeline_id,
    3917            0 :                         node,
    3918            0 :                         self.http_client.clone(),
    3919            0 :                         self.config.pageserver_jwt_token.clone(),
    3920            0 :                         behavior,
    3921            0 :                     ))
    3922            0 :                 })
    3923            0 :                 .await?;
    3924              : 
    3925            0 :             let any = results.pop().expect("we must have at least one response");
    3926            0 : 
    3927            0 :             let mismatching = results
    3928            0 :                 .iter()
    3929            0 :                 .filter(|(_, res)| res != &any.1)
    3930            0 :                 .collect::<Vec<_>>();
    3931            0 :             if !mismatching.is_empty() {
    3932              :                 // this can be hit by races which should not happen because operation lock on cplane
    3933            0 :                 let matching = results.len() - mismatching.len();
    3934            0 :                 tracing::error!(
    3935              :                     matching,
    3936              :                     compared_against=?any,
    3937              :                     ?mismatching,
    3938            0 :                     "shards returned different results"
    3939              :                 );
    3940              : 
    3941            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!("pageservers returned mixed results for ancestor detach; manual intervention is required.")));
    3942            0 :             }
    3943            0 : 
    3944            0 :             Ok(any.1)
    3945            0 :         }).await?
    3946            0 :     }
    3947              : 
    3948            0 :     pub(crate) async fn tenant_timeline_block_unblock_gc(
    3949            0 :         &self,
    3950            0 :         tenant_id: TenantId,
    3951            0 :         timeline_id: TimelineId,
    3952            0 :         dir: BlockUnblock,
    3953            0 :     ) -> Result<(), ApiError> {
    3954            0 :         let _tenant_lock = trace_shared_lock(
    3955            0 :             &self.tenant_op_locks,
    3956            0 :             tenant_id,
    3957            0 :             TenantOperations::TimelineGcBlockUnblock,
    3958            0 :         )
    3959            0 :         .await;
    3960              : 
    3961            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    3962            0 :             if targets.0.is_empty() {
    3963            0 :                 return Err(ApiError::NotFound(
    3964            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3965            0 :                 ));
    3966            0 :             }
    3967              : 
    3968            0 :             async fn do_one(
    3969            0 :                 tenant_shard_id: TenantShardId,
    3970            0 :                 timeline_id: TimelineId,
    3971            0 :                 node: Node,
    3972            0 :                 http_client: reqwest::Client,
    3973            0 :                 jwt: Option<String>,
    3974            0 :                 dir: BlockUnblock,
    3975            0 :             ) -> Result<(), ApiError> {
    3976            0 :                 let client = PageserverClient::new(
    3977            0 :                     node.get_id(),
    3978            0 :                     http_client,
    3979            0 :                     node.base_url(),
    3980            0 :                     jwt.as_deref(),
    3981            0 :                 );
    3982            0 : 
    3983            0 :                 client
    3984            0 :                     .timeline_block_unblock_gc(tenant_shard_id, timeline_id, dir)
    3985            0 :                     .await
    3986            0 :                     .map_err(|e| passthrough_api_error(&node, e))
    3987            0 :             }
    3988              : 
    3989              :             // no shard needs to go first/last; the operation should be idempotent
    3990            0 :             let locations = targets
    3991            0 :                 .0
    3992            0 :                 .iter()
    3993            0 :                 .map(|t| (*t.0, t.1.latest.node.clone()))
    3994            0 :                 .collect();
    3995            0 :             self.tenant_for_shards(locations, |tenant_shard_id, node| {
    3996            0 :                 futures::FutureExt::boxed(do_one(
    3997            0 :                     tenant_shard_id,
    3998            0 :                     timeline_id,
    3999            0 :                     node,
    4000            0 :                     self.http_client.clone(),
    4001            0 :                     self.config.pageserver_jwt_token.clone(),
    4002            0 :                     dir,
    4003            0 :                 ))
    4004            0 :             })
    4005            0 :             .await
    4006            0 :         })
    4007            0 :         .await??;
    4008            0 :         Ok(())
    4009            0 :     }
    4010              : 
    4011            0 :     pub(crate) async fn tenant_timeline_lsn_lease(
    4012            0 :         &self,
    4013            0 :         tenant_id: TenantId,
    4014            0 :         timeline_id: TimelineId,
    4015            0 :         lsn: Lsn,
    4016            0 :     ) -> Result<LsnLease, ApiError> {
    4017            0 :         let _tenant_lock = trace_shared_lock(
    4018            0 :             &self.tenant_op_locks,
    4019            0 :             tenant_id,
    4020            0 :             TenantOperations::TimelineLsnLease,
    4021            0 :         )
    4022            0 :         .await;
    4023              : 
    4024            0 :         let targets = {
    4025            0 :             let locked = self.inner.read().unwrap();
    4026            0 :             let mut targets = Vec::new();
    4027            0 : 
    4028            0 :             // If the request got an unsharded tenant id, then apply
    4029            0 :             // the operation to all shards. Otherwise, apply it to a specific shard.
    4030            0 :             let shards_range = TenantShardId::tenant_range(tenant_id);
    4031              : 
    4032            0 :             for (tenant_shard_id, shard) in locked.tenants.range(shards_range) {
    4033            0 :                 if let Some(node_id) = shard.intent.get_attached() {
    4034            0 :                     let node = locked
    4035            0 :                         .nodes
    4036            0 :                         .get(node_id)
    4037            0 :                         .expect("Pageservers may not be deleted while referenced");
    4038            0 : 
    4039            0 :                     targets.push((*tenant_shard_id, node.clone()));
    4040            0 :                 }
    4041              :             }
    4042            0 :             targets
    4043              :         };
    4044              : 
    4045            0 :         let res = self
    4046            0 :             .tenant_for_shards_api(
    4047            0 :                 targets,
    4048            0 :                 |tenant_shard_id, client| async move {
    4049            0 :                     client
    4050            0 :                         .timeline_lease_lsn(tenant_shard_id, timeline_id, lsn)
    4051            0 :                         .await
    4052            0 :                 },
    4053            0 :                 1,
    4054            0 :                 1,
    4055            0 :                 SHORT_RECONCILE_TIMEOUT,
    4056            0 :                 &self.cancel,
    4057            0 :             )
    4058            0 :             .await;
    4059              : 
    4060            0 :         let mut valid_until = None;
    4061            0 :         for r in res {
    4062            0 :             match r {
    4063            0 :                 Ok(lease) => {
    4064            0 :                     if let Some(ref mut valid_until) = valid_until {
    4065            0 :                         *valid_until = std::cmp::min(*valid_until, lease.valid_until);
    4066            0 :                     } else {
    4067            0 :                         valid_until = Some(lease.valid_until);
    4068            0 :                     }
    4069              :                 }
    4070            0 :                 Err(e) => {
    4071            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    4072              :                 }
    4073              :             }
    4074              :         }
    4075            0 :         Ok(LsnLease {
    4076            0 :             valid_until: valid_until.unwrap_or_else(SystemTime::now),
    4077            0 :         })
    4078            0 :     }
    4079              : 
    4080            0 :     pub(crate) async fn tenant_timeline_download_heatmap_layers(
    4081            0 :         &self,
    4082            0 :         tenant_shard_id: TenantShardId,
    4083            0 :         timeline_id: TimelineId,
    4084            0 :         concurrency: Option<usize>,
    4085            0 :         recurse: bool,
    4086            0 :     ) -> Result<(), ApiError> {
    4087            0 :         let _tenant_lock = trace_shared_lock(
    4088            0 :             &self.tenant_op_locks,
    4089            0 :             tenant_shard_id.tenant_id,
    4090            0 :             TenantOperations::DownloadHeatmapLayers,
    4091            0 :         )
    4092            0 :         .await;
    4093              : 
    4094            0 :         let targets = {
    4095            0 :             let locked = self.inner.read().unwrap();
    4096            0 :             let mut targets = Vec::new();
    4097              : 
    4098              :             // If the request got an unsharded tenant id, then apply
    4099              :             // the operation to all shards. Otherwise, apply it to a specific shard.
    4100            0 :             let shards_range = if tenant_shard_id.is_unsharded() {
    4101            0 :                 TenantShardId::tenant_range(tenant_shard_id.tenant_id)
    4102              :             } else {
    4103            0 :                 tenant_shard_id.range()
    4104              :             };
    4105              : 
    4106            0 :             for (tenant_shard_id, shard) in locked.tenants.range(shards_range) {
    4107            0 :                 if let Some(node_id) = shard.intent.get_attached() {
    4108            0 :                     let node = locked
    4109            0 :                         .nodes
    4110            0 :                         .get(node_id)
    4111            0 :                         .expect("Pageservers may not be deleted while referenced");
    4112            0 : 
    4113            0 :                     targets.push((*tenant_shard_id, node.clone()));
    4114            0 :                 }
    4115              :             }
    4116            0 :             targets
    4117            0 :         };
    4118            0 : 
    4119            0 :         self.tenant_for_shards_api(
    4120            0 :             targets,
    4121            0 :             |tenant_shard_id, client| async move {
    4122            0 :                 client
    4123            0 :                     .timeline_download_heatmap_layers(
    4124            0 :                         tenant_shard_id,
    4125            0 :                         timeline_id,
    4126            0 :                         concurrency,
    4127            0 :                         recurse,
    4128            0 :                     )
    4129            0 :                     .await
    4130            0 :             },
    4131            0 :             1,
    4132            0 :             1,
    4133            0 :             SHORT_RECONCILE_TIMEOUT,
    4134            0 :             &self.cancel,
    4135            0 :         )
    4136            0 :         .await;
    4137              : 
    4138            0 :         Ok(())
    4139            0 :     }
    4140              : 
    4141              :     /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
    4142              :     ///
    4143              :     /// On success, the returned vector contains exactly the same number of elements as the input `locations`
    4144              :     /// and returned element at index `i` is the result for `req_fn(op(locations[i])`.
    4145            0 :     async fn tenant_for_shards<F, R>(
    4146            0 :         &self,
    4147            0 :         locations: Vec<(TenantShardId, Node)>,
    4148            0 :         mut req_fn: F,
    4149            0 :     ) -> Result<Vec<R>, ApiError>
    4150            0 :     where
    4151            0 :         F: FnMut(
    4152            0 :             TenantShardId,
    4153            0 :             Node,
    4154            0 :         )
    4155            0 :             -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
    4156            0 :     {
    4157            0 :         let mut futs = FuturesUnordered::new();
    4158            0 :         let mut results = Vec::with_capacity(locations.len());
    4159              : 
    4160            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    4161            0 :             let fut = req_fn(tenant_shard_id, node);
    4162            0 :             futs.push(async move { (idx, fut.await) });
    4163            0 :         }
    4164              : 
    4165            0 :         while let Some((idx, r)) = futs.next().await {
    4166            0 :             results.push((idx, r?));
    4167              :         }
    4168              : 
    4169            0 :         results.sort_by_key(|(idx, _)| *idx);
    4170            0 :         Ok(results.into_iter().map(|(_, r)| r).collect())
    4171            0 :     }
    4172              : 
    4173              :     /// Concurrently invoke a pageserver API call on many shards at once.
    4174              :     ///
    4175              :     /// The returned Vec has the same length as the `locations` Vec,
    4176              :     /// and returned element at index `i` is the result for `op(locations[i])`.
    4177            0 :     pub(crate) async fn tenant_for_shards_api<T, O, F>(
    4178            0 :         &self,
    4179            0 :         locations: Vec<(TenantShardId, Node)>,
    4180            0 :         op: O,
    4181            0 :         warn_threshold: u32,
    4182            0 :         max_retries: u32,
    4183            0 :         timeout: Duration,
    4184            0 :         cancel: &CancellationToken,
    4185            0 :     ) -> Vec<mgmt_api::Result<T>>
    4186            0 :     where
    4187            0 :         O: Fn(TenantShardId, PageserverClient) -> F + Copy,
    4188            0 :         F: std::future::Future<Output = mgmt_api::Result<T>>,
    4189            0 :     {
    4190            0 :         let mut futs = FuturesUnordered::new();
    4191            0 :         let mut results = Vec::with_capacity(locations.len());
    4192              : 
    4193            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    4194            0 :             futs.push(async move {
    4195            0 :                 let r = node
    4196            0 :                     .with_client_retries(
    4197            0 :                         |client| op(tenant_shard_id, client),
    4198            0 :                         &self.http_client,
    4199            0 :                         &self.config.pageserver_jwt_token,
    4200            0 :                         warn_threshold,
    4201            0 :                         max_retries,
    4202            0 :                         timeout,
    4203            0 :                         cancel,
    4204            0 :                     )
    4205            0 :                     .await;
    4206            0 :                 (idx, r)
    4207            0 :             });
    4208            0 :         }
    4209              : 
    4210            0 :         while let Some((idx, r)) = futs.next().await {
    4211            0 :             results.push((idx, r.unwrap_or(Err(mgmt_api::Error::Cancelled))));
    4212            0 :         }
    4213              : 
    4214            0 :         results.sort_by_key(|(idx, _)| *idx);
    4215            0 :         results.into_iter().map(|(_, r)| r).collect()
    4216            0 :     }
    4217              : 
    4218              :     /// Helper for safely working with the shards in a tenant remotely on pageservers, for example
    4219              :     /// when creating and deleting timelines:
    4220              :     /// - Makes sure shards are attached somewhere if they weren't already
    4221              :     /// - Looks up the shards and the nodes where they were most recently attached
    4222              :     /// - Guarantees that after the inner function returns, the shards' generations haven't moved on: this
    4223              :     ///   ensures that the remote operation acted on the most recent generation, and is therefore durable.
    4224            0 :     async fn tenant_remote_mutation<R, O, F>(
    4225            0 :         &self,
    4226            0 :         tenant_id: TenantId,
    4227            0 :         op: O,
    4228            0 :     ) -> Result<R, ApiError>
    4229            0 :     where
    4230            0 :         O: FnOnce(TenantMutationLocations) -> F,
    4231            0 :         F: std::future::Future<Output = R>,
    4232            0 :     {
    4233            0 :         let mutation_locations = {
    4234            0 :             let mut locations = TenantMutationLocations::default();
    4235              : 
    4236              :             // Load the currently attached pageservers for the latest generation of each shard.  This can
    4237              :             // run concurrently with reconciliations, and it is not guaranteed that the node we find here
    4238              :             // will still be the latest when we're done: we will check generations again at the end of
    4239              :             // this function to handle that.
    4240            0 :             let generations = self.persistence.tenant_generations(tenant_id).await?;
    4241              : 
    4242            0 :             if generations
    4243            0 :                 .iter()
    4244            0 :                 .any(|i| i.generation.is_none() || i.generation_pageserver.is_none())
    4245              :             {
    4246            0 :                 let shard_generations = generations
    4247            0 :                     .into_iter()
    4248            0 :                     .map(|i| (i.tenant_shard_id, (i.generation, i.generation_pageserver)))
    4249            0 :                     .collect::<HashMap<_, _>>();
    4250            0 : 
    4251            0 :                 // One or more shards has not been attached to a pageserver.  Check if this is because it's configured
    4252            0 :                 // to be detached (409: caller should give up), or because it's meant to be attached but isn't yet (503: caller should retry)
    4253            0 :                 let locked = self.inner.read().unwrap();
    4254            0 :                 for (shard_id, shard) in
    4255            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    4256              :                 {
    4257            0 :                     match shard.policy {
    4258              :                         PlacementPolicy::Attached(_) => {
    4259              :                             // This shard is meant to be attached: the caller is not wrong to try and
    4260              :                             // use this function, but we can't service the request right now.
    4261            0 :                             let Some(generation) = shard_generations.get(shard_id) else {
    4262              :                                 // This can only happen if there is a split brain controller modifying the database.  This should
    4263              :                                 // never happen when testing, and if it happens in production we can only log the issue.
    4264            0 :                                 debug_assert!(false);
    4265            0 :                                 tracing::error!(
    4266            0 :                                     "Shard {shard_id} not found in generation state!  Is another rogue controller running?"
    4267              :                                 );
    4268            0 :                                 continue;
    4269              :                             };
    4270            0 :                             let (generation, generation_pageserver) = generation;
    4271            0 :                             if let Some(generation) = generation {
    4272            0 :                                 if generation_pageserver.is_none() {
    4273              :                                     // This is legitimate only in a very narrow window where the shard was only just configured into
    4274              :                                     // Attached mode after being created in Secondary or Detached mode, and it has had its generation
    4275              :                                     // set but not yet had a Reconciler run (reconciler is the only thing that sets generation_pageserver).
    4276            0 :                                     tracing::warn!(
    4277            0 :                                         "Shard {shard_id} generation is set ({generation:?}) but generation_pageserver is None, reconciler not run yet?"
    4278              :                                     );
    4279            0 :                                 }
    4280              :                             } else {
    4281              :                                 // This should never happen: a shard with no generation is only permitted when it was created in some state
    4282              :                                 // other than PlacementPolicy::Attached (and generation is always written to DB before setting Attached in memory)
    4283            0 :                                 debug_assert!(false);
    4284            0 :                                 tracing::error!(
    4285            0 :                                     "Shard {shard_id} generation is None, but it is in PlacementPolicy::Attached mode!"
    4286              :                                 );
    4287            0 :                                 continue;
    4288              :                             }
    4289              :                         }
    4290              :                         PlacementPolicy::Secondary | PlacementPolicy::Detached => {
    4291            0 :                             return Err(ApiError::Conflict(format!(
    4292            0 :                                 "Shard {shard_id} tenant has policy {:?}",
    4293            0 :                                 shard.policy
    4294            0 :                             )));
    4295              :                         }
    4296              :                     }
    4297              :                 }
    4298              : 
    4299            0 :                 return Err(ApiError::ResourceUnavailable(
    4300            0 :                     "One or more shards in tenant is not yet attached".into(),
    4301            0 :                 ));
    4302            0 :             }
    4303            0 : 
    4304            0 :             let locked = self.inner.read().unwrap();
    4305              :             for ShardGenerationState {
    4306            0 :                 tenant_shard_id,
    4307            0 :                 generation,
    4308            0 :                 generation_pageserver,
    4309            0 :             } in generations
    4310              :             {
    4311            0 :                 let node_id = generation_pageserver.expect("We checked for None above");
    4312            0 :                 let node = locked
    4313            0 :                     .nodes
    4314            0 :                     .get(&node_id)
    4315            0 :                     .ok_or(ApiError::Conflict(format!(
    4316            0 :                         "Raced with removal of node {node_id}"
    4317            0 :                     )))?;
    4318            0 :                 let generation = generation.expect("Checked above");
    4319            0 : 
    4320            0 :                 let tenant = locked.tenants.get(&tenant_shard_id);
    4321              : 
    4322              :                 // TODO(vlad): Abstract the logic that finds stale attached locations
    4323              :                 // from observed state into a [`Service`] method.
    4324            0 :                 let other_locations = match tenant {
    4325            0 :                     Some(tenant) => {
    4326            0 :                         let mut other = tenant.attached_locations();
    4327            0 :                         let latest_location_index =
    4328            0 :                             other.iter().position(|&l| l == (node.get_id(), generation));
    4329            0 :                         if let Some(idx) = latest_location_index {
    4330            0 :                             other.remove(idx);
    4331            0 :                         }
    4332              : 
    4333            0 :                         other
    4334              :                     }
    4335            0 :                     None => Vec::default(),
    4336              :                 };
    4337              : 
    4338            0 :                 let location = ShardMutationLocations {
    4339            0 :                     latest: MutationLocation {
    4340            0 :                         node: node.clone(),
    4341            0 :                         generation,
    4342            0 :                     },
    4343            0 :                     other: other_locations
    4344            0 :                         .into_iter()
    4345            0 :                         .filter_map(|(node_id, generation)| {
    4346            0 :                             let node = locked.nodes.get(&node_id)?;
    4347              : 
    4348            0 :                             Some(MutationLocation {
    4349            0 :                                 node: node.clone(),
    4350            0 :                                 generation,
    4351            0 :                             })
    4352            0 :                         })
    4353            0 :                         .collect(),
    4354            0 :                 };
    4355            0 :                 locations.0.insert(tenant_shard_id, location);
    4356            0 :             }
    4357              : 
    4358            0 :             locations
    4359              :         };
    4360              : 
    4361            0 :         let result = op(mutation_locations.clone()).await;
    4362              : 
    4363              :         // Post-check: are all the generations of all the shards the same as they were initially?  This proves that
    4364              :         // our remote operation executed on the latest generation and is therefore persistent.
    4365              :         {
    4366            0 :             let latest_generations = self.persistence.tenant_generations(tenant_id).await?;
    4367            0 :             if latest_generations
    4368            0 :                 .into_iter()
    4369            0 :                 .map(
    4370            0 :                     |ShardGenerationState {
    4371              :                          tenant_shard_id,
    4372              :                          generation,
    4373              :                          generation_pageserver: _,
    4374            0 :                      }| (tenant_shard_id, generation),
    4375            0 :                 )
    4376            0 :                 .collect::<Vec<_>>()
    4377            0 :                 != mutation_locations
    4378            0 :                     .0
    4379            0 :                     .into_iter()
    4380            0 :                     .map(|i| (i.0, Some(i.1.latest.generation)))
    4381            0 :                     .collect::<Vec<_>>()
    4382              :             {
    4383              :                 // We raced with something that incremented the generation, and therefore cannot be
    4384              :                 // confident that our actions are persistent (they might have hit an old generation).
    4385              :                 //
    4386              :                 // This is safe but requires a retry: ask the client to do that by giving them a 503 response.
    4387            0 :                 return Err(ApiError::ResourceUnavailable(
    4388            0 :                     "Tenant attachment changed, please retry".into(),
    4389            0 :                 ));
    4390            0 :             }
    4391            0 :         }
    4392            0 : 
    4393            0 :         Ok(result)
    4394            0 :     }
    4395              : 
    4396            0 :     pub(crate) async fn tenant_timeline_delete(
    4397            0 :         self: &Arc<Self>,
    4398            0 :         tenant_id: TenantId,
    4399            0 :         timeline_id: TimelineId,
    4400            0 :     ) -> Result<StatusCode, ApiError> {
    4401            0 :         tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
    4402            0 :         let _tenant_lock = trace_shared_lock(
    4403            0 :             &self.tenant_op_locks,
    4404            0 :             tenant_id,
    4405            0 :             TenantOperations::TimelineDelete,
    4406            0 :         )
    4407            0 :         .await;
    4408              : 
    4409            0 :         let status_code = self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    4410            0 :             if targets.0.is_empty() {
    4411            0 :                 return Err(ApiError::NotFound(
    4412            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4413            0 :                 ));
    4414            0 :             }
    4415            0 : 
    4416            0 :             let (shard_zero_tid, shard_zero_locations) = targets.0.pop_first().expect("Must have at least one shard");
    4417            0 :             assert!(shard_zero_tid.is_shard_zero());
    4418              : 
    4419            0 :             async fn delete_one(
    4420            0 :                 tenant_shard_id: TenantShardId,
    4421            0 :                 timeline_id: TimelineId,
    4422            0 :                 node: Node,
    4423            0 :                 http_client: reqwest::Client,
    4424            0 :                 jwt: Option<String>,
    4425            0 :             ) -> Result<StatusCode, ApiError> {
    4426            0 :                 tracing::info!(
    4427            0 :                     "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    4428              :                 );
    4429              : 
    4430            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    4431            0 :                 let res = client
    4432            0 :                     .timeline_delete(tenant_shard_id, timeline_id)
    4433            0 :                     .await;
    4434              : 
    4435            0 :                 match res {
    4436            0 :                     Ok(ok) => Ok(ok),
    4437            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::CONFLICT, _)) => Ok(StatusCode::CONFLICT),
    4438            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg)) => Err(ApiError::ResourceUnavailable(msg.into())),
    4439            0 :                     Err(e) => {
    4440            0 :                         Err(
    4441            0 :                             ApiError::InternalServerError(anyhow::anyhow!(
    4442            0 :                                 "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
    4443            0 :                             ))
    4444            0 :                         )
    4445              :                     }
    4446              :                 }
    4447            0 :             }
    4448              : 
    4449            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    4450            0 :             let statuses = self
    4451            0 :                 .tenant_for_shards(locations, |tenant_shard_id: TenantShardId, node: Node| {
    4452            0 :                     Box::pin(delete_one(
    4453            0 :                         tenant_shard_id,
    4454            0 :                         timeline_id,
    4455            0 :                         node,
    4456            0 :                         self.http_client.clone(),
    4457            0 :                         self.config.pageserver_jwt_token.clone(),
    4458            0 :                     ))
    4459            0 :                 })
    4460            0 :                 .await?;
    4461              : 
    4462              :             // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero.
    4463              :             // We return 409 (Conflict) if deletion was already in progress on any of the shards
    4464              :             // and 202 (Accepted) if deletion was not already in progress on any of the shards.
    4465            0 :             if statuses.iter().any(|s| s == &StatusCode::CONFLICT) {
    4466            0 :                 return Ok(StatusCode::CONFLICT);
    4467            0 :             }
    4468            0 : 
    4469            0 :             if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
    4470            0 :                 return Ok(StatusCode::ACCEPTED);
    4471            0 :             }
    4472              : 
    4473              :             // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
    4474              :             // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
    4475            0 :             let shard_zero_status = delete_one(
    4476            0 :                 shard_zero_tid,
    4477            0 :                 timeline_id,
    4478            0 :                 shard_zero_locations.latest.node,
    4479            0 :                 self.http_client.clone(),
    4480            0 :                 self.config.pageserver_jwt_token.clone(),
    4481            0 :             )
    4482            0 :             .await?;
    4483            0 :             Ok(shard_zero_status)
    4484            0 :         }).await?;
    4485              : 
    4486            0 :         self.tenant_timeline_delete_safekeepers(tenant_id, timeline_id)
    4487            0 :             .await?;
    4488              : 
    4489            0 :         status_code
    4490            0 :     }
    4491              :     /// When you know the TenantId but not a specific shard, and would like to get the node holding shard 0.
    4492            0 :     pub(crate) async fn tenant_shard0_node(
    4493            0 :         &self,
    4494            0 :         tenant_id: TenantId,
    4495            0 :     ) -> Result<(Node, TenantShardId), ApiError> {
    4496            0 :         let tenant_shard_id = {
    4497            0 :             let locked = self.inner.read().unwrap();
    4498            0 :             let Some((tenant_shard_id, _shard)) = locked
    4499            0 :                 .tenants
    4500            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4501            0 :                 .next()
    4502              :             else {
    4503            0 :                 return Err(ApiError::NotFound(
    4504            0 :                     anyhow::anyhow!("Tenant {tenant_id} not found").into(),
    4505            0 :                 ));
    4506              :             };
    4507              : 
    4508            0 :             *tenant_shard_id
    4509            0 :         };
    4510            0 : 
    4511            0 :         self.tenant_shard_node(tenant_shard_id)
    4512            0 :             .await
    4513            0 :             .map(|node| (node, tenant_shard_id))
    4514            0 :     }
    4515              : 
    4516              :     /// When you need to send an HTTP request to the pageserver that holds a shard of a tenant, this
    4517              :     /// function looks up and returns node. If the shard isn't found, returns Err(ApiError::NotFound)
    4518            0 :     pub(crate) async fn tenant_shard_node(
    4519            0 :         &self,
    4520            0 :         tenant_shard_id: TenantShardId,
    4521            0 :     ) -> Result<Node, ApiError> {
    4522            0 :         // Look up in-memory state and maybe use the node from there.
    4523            0 :         {
    4524            0 :             let locked = self.inner.read().unwrap();
    4525            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    4526            0 :                 return Err(ApiError::NotFound(
    4527            0 :                     anyhow::anyhow!("Tenant shard {tenant_shard_id} not found").into(),
    4528            0 :                 ));
    4529              :             };
    4530              : 
    4531            0 :             let Some(intent_node_id) = shard.intent.get_attached() else {
    4532            0 :                 tracing::warn!(
    4533            0 :                     tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
    4534            0 :                     "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
    4535              :                     shard.policy
    4536              :                 );
    4537            0 :                 return Err(ApiError::Conflict(
    4538            0 :                     "Cannot call timeline API on non-attached tenant".to_string(),
    4539            0 :                 ));
    4540              :             };
    4541              : 
    4542            0 :             if shard.reconciler.is_none() {
    4543              :                 // Optimization: while no reconcile is in flight, we may trust our in-memory state
    4544              :                 // to tell us which pageserver to use. Otherwise we will fall through and hit the database
    4545            0 :                 let Some(node) = locked.nodes.get(intent_node_id) else {
    4546              :                     // This should never happen
    4547            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4548            0 :                         "Shard refers to nonexistent node"
    4549            0 :                     )));
    4550              :                 };
    4551            0 :                 return Ok(node.clone());
    4552            0 :             }
    4553              :         };
    4554              : 
    4555              :         // Look up the latest attached pageserver location from the database
    4556              :         // generation state: this will reflect the progress of any ongoing migration.
    4557              :         // Note that it is not guaranteed to _stay_ here, our caller must still handle
    4558              :         // the case where they call through to the pageserver and get a 404.
    4559            0 :         let db_result = self
    4560            0 :             .persistence
    4561            0 :             .tenant_generations(tenant_shard_id.tenant_id)
    4562            0 :             .await?;
    4563              :         let Some(ShardGenerationState {
    4564              :             tenant_shard_id: _,
    4565              :             generation: _,
    4566            0 :             generation_pageserver: Some(node_id),
    4567            0 :         }) = db_result
    4568            0 :             .into_iter()
    4569            0 :             .find(|s| s.tenant_shard_id == tenant_shard_id)
    4570              :         else {
    4571              :             // This can happen if we raced with a tenant deletion or a shard split.  On a retry
    4572              :             // the caller will either succeed (shard split case), get a proper 404 (deletion case),
    4573              :             // or a conflict response (case where tenant was detached in background)
    4574            0 :             return Err(ApiError::ResourceUnavailable(
    4575            0 :                 format!("Shard {tenant_shard_id} not found in database, or is not attached").into(),
    4576            0 :             ));
    4577              :         };
    4578            0 :         let locked = self.inner.read().unwrap();
    4579            0 :         let Some(node) = locked.nodes.get(&node_id) else {
    4580              :             // This should never happen
    4581            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4582            0 :                 "Shard refers to nonexistent node"
    4583            0 :             )));
    4584              :         };
    4585              : 
    4586            0 :         Ok(node.clone())
    4587            0 :     }
    4588              : 
    4589            0 :     pub(crate) fn tenant_locate(
    4590            0 :         &self,
    4591            0 :         tenant_id: TenantId,
    4592            0 :     ) -> Result<TenantLocateResponse, ApiError> {
    4593            0 :         let locked = self.inner.read().unwrap();
    4594            0 :         tracing::info!("Locating shards for tenant {tenant_id}");
    4595              : 
    4596            0 :         let mut result = Vec::new();
    4597            0 :         let mut shard_params: Option<ShardParameters> = None;
    4598              : 
    4599            0 :         for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    4600              :         {
    4601            0 :             let node_id =
    4602            0 :                 shard
    4603            0 :                     .intent
    4604            0 :                     .get_attached()
    4605            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    4606            0 :                         "Cannot locate a tenant that is not attached"
    4607            0 :                     )))?;
    4608              : 
    4609            0 :             let node = locked
    4610            0 :                 .nodes
    4611            0 :                 .get(&node_id)
    4612            0 :                 .expect("Pageservers may not be deleted while referenced");
    4613            0 : 
    4614            0 :             result.push(node.shard_location(*tenant_shard_id));
    4615            0 : 
    4616            0 :             match &shard_params {
    4617            0 :                 None => {
    4618            0 :                     shard_params = Some(ShardParameters {
    4619            0 :                         stripe_size: shard.shard.stripe_size,
    4620            0 :                         count: shard.shard.count,
    4621            0 :                     });
    4622            0 :                 }
    4623            0 :                 Some(params) => {
    4624            0 :                     if params.stripe_size != shard.shard.stripe_size {
    4625              :                         // This should never happen.  We enforce at runtime because it's simpler than
    4626              :                         // adding an extra per-tenant data structure to store the things that should be the same
    4627            0 :                         return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4628            0 :                             "Inconsistent shard stripe size parameters!"
    4629            0 :                         )));
    4630            0 :                     }
    4631              :                 }
    4632              :             }
    4633              :         }
    4634              : 
    4635            0 :         if result.is_empty() {
    4636            0 :             return Err(ApiError::NotFound(
    4637            0 :                 anyhow::anyhow!("No shards for this tenant ID found").into(),
    4638            0 :             ));
    4639            0 :         }
    4640            0 :         let shard_params = shard_params.expect("result is non-empty, therefore this is set");
    4641            0 :         tracing::info!(
    4642            0 :             "Located tenant {} with params {:?} on shards {}",
    4643            0 :             tenant_id,
    4644            0 :             shard_params,
    4645            0 :             result
    4646            0 :                 .iter()
    4647            0 :                 .map(|s| format!("{:?}", s))
    4648            0 :                 .collect::<Vec<_>>()
    4649            0 :                 .join(",")
    4650              :         );
    4651              : 
    4652            0 :         Ok(TenantLocateResponse {
    4653            0 :             shards: result,
    4654            0 :             shard_params,
    4655            0 :         })
    4656            0 :     }
    4657              : 
    4658              :     /// Returns None if the input iterator of shards does not include a shard with number=0
    4659            0 :     fn tenant_describe_impl<'a>(
    4660            0 :         &self,
    4661            0 :         shards: impl Iterator<Item = &'a TenantShard>,
    4662            0 :     ) -> Option<TenantDescribeResponse> {
    4663            0 :         let mut shard_zero = None;
    4664            0 :         let mut describe_shards = Vec::new();
    4665              : 
    4666            0 :         for shard in shards {
    4667            0 :             if shard.tenant_shard_id.is_shard_zero() {
    4668            0 :                 shard_zero = Some(shard);
    4669            0 :             }
    4670              : 
    4671            0 :             describe_shards.push(TenantDescribeResponseShard {
    4672            0 :                 tenant_shard_id: shard.tenant_shard_id,
    4673            0 :                 node_attached: *shard.intent.get_attached(),
    4674            0 :                 node_secondary: shard.intent.get_secondary().to_vec(),
    4675            0 :                 last_error: shard
    4676            0 :                     .last_error
    4677            0 :                     .lock()
    4678            0 :                     .unwrap()
    4679            0 :                     .as_ref()
    4680            0 :                     .map(|e| format!("{e}"))
    4681            0 :                     .unwrap_or("".to_string())
    4682            0 :                     .clone(),
    4683            0 :                 is_reconciling: shard.reconciler.is_some(),
    4684            0 :                 is_pending_compute_notification: shard.pending_compute_notification,
    4685            0 :                 is_splitting: matches!(shard.splitting, SplitState::Splitting),
    4686            0 :                 scheduling_policy: shard.get_scheduling_policy(),
    4687            0 :                 preferred_az_id: shard.preferred_az().map(ToString::to_string),
    4688              :             })
    4689              :         }
    4690              : 
    4691            0 :         let shard_zero = shard_zero?;
    4692              : 
    4693            0 :         Some(TenantDescribeResponse {
    4694            0 :             tenant_id: shard_zero.tenant_shard_id.tenant_id,
    4695            0 :             shards: describe_shards,
    4696            0 :             stripe_size: shard_zero.shard.stripe_size,
    4697            0 :             policy: shard_zero.policy.clone(),
    4698            0 :             config: shard_zero.config.clone(),
    4699            0 :         })
    4700            0 :     }
    4701              : 
    4702            0 :     pub(crate) fn tenant_describe(
    4703            0 :         &self,
    4704            0 :         tenant_id: TenantId,
    4705            0 :     ) -> Result<TenantDescribeResponse, ApiError> {
    4706            0 :         let locked = self.inner.read().unwrap();
    4707            0 : 
    4708            0 :         self.tenant_describe_impl(
    4709            0 :             locked
    4710            0 :                 .tenants
    4711            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4712            0 :                 .map(|(_k, v)| v),
    4713            0 :         )
    4714            0 :         .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
    4715            0 :     }
    4716              : 
    4717              :     /// limit & offset are pagination parameters. Since we are walking an in-memory HashMap, `offset` does not
    4718              :     /// avoid traversing data, it just avoid returning it. This is suitable for our purposes, since our in memory
    4719              :     /// maps are small enough to traverse fast, our pagination is just to avoid serializing huge JSON responses
    4720              :     /// in our external API.
    4721            0 :     pub(crate) fn tenant_list(
    4722            0 :         &self,
    4723            0 :         limit: Option<usize>,
    4724            0 :         start_after: Option<TenantId>,
    4725            0 :     ) -> Vec<TenantDescribeResponse> {
    4726            0 :         let locked = self.inner.read().unwrap();
    4727              : 
    4728              :         // Apply start_from parameter
    4729            0 :         let shard_range = match start_after {
    4730            0 :             None => locked.tenants.range(..),
    4731            0 :             Some(tenant_id) => locked.tenants.range(
    4732            0 :                 TenantShardId {
    4733            0 :                     tenant_id,
    4734            0 :                     shard_number: ShardNumber(u8::MAX),
    4735            0 :                     shard_count: ShardCount(u8::MAX),
    4736            0 :                 }..,
    4737            0 :             ),
    4738              :         };
    4739              : 
    4740            0 :         let mut result = Vec::new();
    4741            0 :         for (_tenant_id, tenant_shards) in &shard_range.group_by(|(id, _shard)| id.tenant_id) {
    4742            0 :             result.push(
    4743            0 :                 self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
    4744            0 :                     .expect("Groups are always non-empty"),
    4745            0 :             );
    4746              : 
    4747              :             // Enforce `limit` parameter
    4748            0 :             if let Some(limit) = limit {
    4749            0 :                 if result.len() >= limit {
    4750            0 :                     break;
    4751            0 :                 }
    4752            0 :             }
    4753              :         }
    4754              : 
    4755            0 :         result
    4756            0 :     }
    4757              : 
    4758              :     #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
    4759              :     async fn abort_tenant_shard_split(
    4760              :         &self,
    4761              :         op: &TenantShardSplitAbort,
    4762              :     ) -> Result<(), TenantShardSplitAbortError> {
    4763              :         // Cleaning up a split:
    4764              :         // - Parent shards are not destroyed during a split, just detached.
    4765              :         // - Failed pageserver split API calls can leave the remote node with just the parent attached,
    4766              :         //   just the children attached, or both.
    4767              :         //
    4768              :         // Therefore our work to do is to:
    4769              :         // 1. Clean up storage controller's internal state to just refer to parents, no children
    4770              :         // 2. Call out to pageservers to ensure that children are detached
    4771              :         // 3. Call out to pageservers to ensure that parents are attached.
    4772              :         //
    4773              :         // Crash safety:
    4774              :         // - If the storage controller stops running during this cleanup *after* clearing the splitting state
    4775              :         //   from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
    4776              :         //   and detach them.
    4777              :         // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
    4778              :         //   from our database, then we will re-enter this cleanup routine on startup.
    4779              : 
    4780              :         let TenantShardSplitAbort {
    4781              :             tenant_id,
    4782              :             new_shard_count,
    4783              :             new_stripe_size,
    4784              :             ..
    4785              :         } = op;
    4786              : 
    4787              :         // First abort persistent state, if any exists.
    4788              :         match self
    4789              :             .persistence
    4790              :             .abort_shard_split(*tenant_id, *new_shard_count)
    4791              :             .await?
    4792              :         {
    4793              :             AbortShardSplitStatus::Aborted => {
    4794              :                 // Proceed to roll back any child shards created on pageservers
    4795              :             }
    4796              :             AbortShardSplitStatus::Complete => {
    4797              :                 // The split completed (we might hit that path if e.g. our database transaction
    4798              :                 // to write the completion landed in the database, but we dropped connection
    4799              :                 // before seeing the result).
    4800              :                 //
    4801              :                 // We must update in-memory state to reflect the successful split.
    4802              :                 self.tenant_shard_split_commit_inmem(
    4803              :                     *tenant_id,
    4804              :                     *new_shard_count,
    4805              :                     *new_stripe_size,
    4806              :                 );
    4807              :                 return Ok(());
    4808              :             }
    4809              :         }
    4810              : 
    4811              :         // Clean up in-memory state, and accumulate the list of child locations that need detaching
    4812              :         let detach_locations: Vec<(Node, TenantShardId)> = {
    4813              :             let mut detach_locations = Vec::new();
    4814              :             let mut locked = self.inner.write().unwrap();
    4815              :             let (nodes, tenants, scheduler) = locked.parts_mut();
    4816              : 
    4817              :             for (tenant_shard_id, shard) in
    4818              :                 tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
    4819              :             {
    4820              :                 if shard.shard.count == op.new_shard_count {
    4821              :                     // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
    4822              :                     // is infallible, so if we got an error we shouldn't have got that far.
    4823              :                     tracing::warn!(
    4824              :                         "During split abort, child shard {tenant_shard_id} found in-memory"
    4825              :                     );
    4826              :                     continue;
    4827              :                 }
    4828              : 
    4829              :                 // Add the children of this shard to this list of things to detach
    4830              :                 if let Some(node_id) = shard.intent.get_attached() {
    4831              :                     for child_id in tenant_shard_id.split(*new_shard_count) {
    4832              :                         detach_locations.push((
    4833              :                             nodes
    4834              :                                 .get(node_id)
    4835              :                                 .expect("Intent references nonexistent node")
    4836              :                                 .clone(),
    4837              :                             child_id,
    4838              :                         ));
    4839              :                     }
    4840              :                 } else {
    4841              :                     tracing::warn!(
    4842              :                         "During split abort, shard {tenant_shard_id} has no attached location"
    4843              :                     );
    4844              :                 }
    4845              : 
    4846              :                 tracing::info!("Restoring parent shard {tenant_shard_id}");
    4847              : 
    4848              :                 // Drop any intents that refer to unavailable nodes, to enable this abort to proceed even
    4849              :                 // if the original attachment location is offline.
    4850              :                 if let Some(node_id) = shard.intent.get_attached() {
    4851              :                     if !nodes.get(node_id).unwrap().is_available() {
    4852              :                         tracing::info!(
    4853              :                             "Demoting attached intent for {tenant_shard_id} on unavailable node {node_id}"
    4854              :                         );
    4855              :                         shard.intent.demote_attached(scheduler, *node_id);
    4856              :                     }
    4857              :                 }
    4858              :                 for node_id in shard.intent.get_secondary().clone() {
    4859              :                     if !nodes.get(&node_id).unwrap().is_available() {
    4860              :                         tracing::info!(
    4861              :                             "Dropping secondary intent for {tenant_shard_id} on unavailable node {node_id}"
    4862              :                         );
    4863              :                         shard.intent.remove_secondary(scheduler, node_id);
    4864              :                     }
    4865              :                 }
    4866              : 
    4867              :                 shard.splitting = SplitState::Idle;
    4868              :                 if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
    4869              :                     // If this shard can't be scheduled now (perhaps due to offline nodes or
    4870              :                     // capacity issues), that must not prevent us rolling back a split.  In this
    4871              :                     // case it should be eventually scheduled in the background.
    4872              :                     tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
    4873              :                 }
    4874              : 
    4875              :                 self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    4876              :             }
    4877              : 
    4878              :             // We don't expect any new_shard_count shards to exist here, but drop them just in case
    4879            0 :             tenants.retain(|_id, s| s.shard.count != *new_shard_count);
    4880              : 
    4881              :             detach_locations
    4882              :         };
    4883              : 
    4884              :         for (node, child_id) in detach_locations {
    4885              :             if !node.is_available() {
    4886              :                 // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
    4887              :                 // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
    4888              :                 // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
    4889              :                 // them from the node.
    4890              :                 tracing::warn!(
    4891              :                     "Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated."
    4892              :                 );
    4893              :                 continue;
    4894              :             }
    4895              : 
    4896              :             // Detach the remote child.  If the pageserver split API call is still in progress, this call will get
    4897              :             // a 503 and retry, up to our limit.
    4898              :             tracing::info!("Detaching {child_id} on {node}...");
    4899              :             match node
    4900              :                 .with_client_retries(
    4901            0 :                     |client| async move {
    4902            0 :                         let config = LocationConfig {
    4903            0 :                             mode: LocationConfigMode::Detached,
    4904            0 :                             generation: None,
    4905            0 :                             secondary_conf: None,
    4906            0 :                             shard_number: child_id.shard_number.0,
    4907            0 :                             shard_count: child_id.shard_count.literal(),
    4908            0 :                             // Stripe size and tenant config don't matter when detaching
    4909            0 :                             shard_stripe_size: 0,
    4910            0 :                             tenant_conf: TenantConfig::default(),
    4911            0 :                         };
    4912            0 : 
    4913            0 :                         client.location_config(child_id, config, None, false).await
    4914            0 :                     },
    4915              :                     &self.http_client,
    4916              :                     &self.config.pageserver_jwt_token,
    4917              :                     1,
    4918              :                     10,
    4919              :                     Duration::from_secs(5),
    4920              :                     &self.reconcilers_cancel,
    4921              :                 )
    4922              :                 .await
    4923              :             {
    4924              :                 Some(Ok(_)) => {}
    4925              :                 Some(Err(e)) => {
    4926              :                     // We failed to communicate with the remote node.  This is problematic: we may be
    4927              :                     // leaving it with a rogue child shard.
    4928              :                     tracing::warn!(
    4929              :                         "Failed to detach child {child_id} from node {node} during abort"
    4930              :                     );
    4931              :                     return Err(e.into());
    4932              :                 }
    4933              :                 None => {
    4934              :                     // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
    4935              :                     // clean up on restart. The node going offline requires a retry.
    4936              :                     return Err(TenantShardSplitAbortError::Unavailable);
    4937              :                 }
    4938              :             };
    4939              :         }
    4940              : 
    4941              :         tracing::info!("Successfully aborted split");
    4942              :         Ok(())
    4943              :     }
    4944              : 
    4945              :     /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
    4946              :     /// of the tenant map to reflect the child shards that exist after the split.
    4947            0 :     fn tenant_shard_split_commit_inmem(
    4948            0 :         &self,
    4949            0 :         tenant_id: TenantId,
    4950            0 :         new_shard_count: ShardCount,
    4951            0 :         new_stripe_size: Option<ShardStripeSize>,
    4952            0 :     ) -> (
    4953            0 :         TenantShardSplitResponse,
    4954            0 :         Vec<(TenantShardId, NodeId, ShardStripeSize)>,
    4955            0 :         Vec<ReconcilerWaiter>,
    4956            0 :     ) {
    4957            0 :         let mut response = TenantShardSplitResponse {
    4958            0 :             new_shards: Vec::new(),
    4959            0 :         };
    4960            0 :         let mut child_locations = Vec::new();
    4961            0 :         let mut waiters = Vec::new();
    4962            0 : 
    4963            0 :         {
    4964            0 :             let mut locked = self.inner.write().unwrap();
    4965            0 : 
    4966            0 :             let parent_ids = locked
    4967            0 :                 .tenants
    4968            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4969            0 :                 .map(|(shard_id, _)| *shard_id)
    4970            0 :                 .collect::<Vec<_>>();
    4971            0 : 
    4972            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    4973            0 :             for parent_id in parent_ids {
    4974            0 :                 let child_ids = parent_id.split(new_shard_count);
    4975              : 
    4976            0 :                 let (pageserver, generation, policy, parent_ident, config, preferred_az) = {
    4977            0 :                     let mut old_state = tenants
    4978            0 :                         .remove(&parent_id)
    4979            0 :                         .expect("It was present, we just split it");
    4980            0 : 
    4981            0 :                     // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
    4982            0 :                     // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
    4983            0 :                     // nothing else can clear this.
    4984            0 :                     assert!(matches!(old_state.splitting, SplitState::Splitting));
    4985              : 
    4986            0 :                     let old_attached = old_state.intent.get_attached().unwrap();
    4987            0 :                     old_state.intent.clear(scheduler);
    4988            0 :                     let generation = old_state.generation.expect("Shard must have been attached");
    4989            0 :                     (
    4990            0 :                         old_attached,
    4991            0 :                         generation,
    4992            0 :                         old_state.policy.clone(),
    4993            0 :                         old_state.shard,
    4994            0 :                         old_state.config.clone(),
    4995            0 :                         old_state.preferred_az().cloned(),
    4996            0 :                     )
    4997            0 :                 };
    4998            0 : 
    4999            0 :                 let mut schedule_context = ScheduleContext::default();
    5000            0 :                 for child in child_ids {
    5001            0 :                     let mut child_shard = parent_ident;
    5002            0 :                     child_shard.number = child.shard_number;
    5003            0 :                     child_shard.count = child.shard_count;
    5004            0 :                     if let Some(stripe_size) = new_stripe_size {
    5005            0 :                         child_shard.stripe_size = stripe_size;
    5006            0 :                     }
    5007              : 
    5008            0 :                     let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
    5009            0 :                     child_observed.insert(
    5010            0 :                         pageserver,
    5011            0 :                         ObservedStateLocation {
    5012            0 :                             conf: Some(attached_location_conf(
    5013            0 :                                 generation,
    5014            0 :                                 &child_shard,
    5015            0 :                                 &config,
    5016            0 :                                 &policy,
    5017            0 :                             )),
    5018            0 :                         },
    5019            0 :                     );
    5020            0 : 
    5021            0 :                     let mut child_state =
    5022            0 :                         TenantShard::new(child, child_shard, policy.clone(), preferred_az.clone());
    5023            0 :                     child_state.intent =
    5024            0 :                         IntentState::single(scheduler, Some(pageserver), preferred_az.clone());
    5025            0 :                     child_state.observed = ObservedState {
    5026            0 :                         locations: child_observed,
    5027            0 :                     };
    5028            0 :                     child_state.generation = Some(generation);
    5029            0 :                     child_state.config = config.clone();
    5030            0 : 
    5031            0 :                     // The child's TenantShard::splitting is intentionally left at the default value of Idle,
    5032            0 :                     // as at this point in the split process we have succeeded and this part is infallible:
    5033            0 :                     // we will never need to do any special recovery from this state.
    5034            0 : 
    5035            0 :                     child_locations.push((child, pageserver, child_shard.stripe_size));
    5036              : 
    5037            0 :                     if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
    5038              :                         // This is not fatal, because we've implicitly already got an attached
    5039              :                         // location for the child shard.  Failure here just means we couldn't
    5040              :                         // find a secondary (e.g. because cluster is overloaded).
    5041            0 :                         tracing::warn!("Failed to schedule child shard {child}: {e}");
    5042            0 :                     }
    5043              :                     // In the background, attach secondary locations for the new shards
    5044            0 :                     if let Some(waiter) = self.maybe_reconcile_shard(
    5045            0 :                         &mut child_state,
    5046            0 :                         nodes,
    5047            0 :                         ReconcilerPriority::High,
    5048            0 :                     ) {
    5049            0 :                         waiters.push(waiter);
    5050            0 :                     }
    5051              : 
    5052            0 :                     tenants.insert(child, child_state);
    5053            0 :                     response.new_shards.push(child);
    5054              :                 }
    5055              :             }
    5056            0 :             (response, child_locations, waiters)
    5057            0 :         }
    5058            0 :     }
    5059              : 
    5060            0 :     async fn tenant_shard_split_start_secondaries(
    5061            0 :         &self,
    5062            0 :         tenant_id: TenantId,
    5063            0 :         waiters: Vec<ReconcilerWaiter>,
    5064            0 :     ) {
    5065              :         // Wait for initial reconcile of child shards, this creates the secondary locations
    5066            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    5067              :             // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
    5068              :             // their secondaries couldn't be attached.
    5069            0 :             tracing::warn!("Failed to reconcile after split: {e}");
    5070            0 :             return;
    5071            0 :         }
    5072              : 
    5073              :         // Take the state lock to discover the attached & secondary intents for all shards
    5074            0 :         let (attached, secondary) = {
    5075            0 :             let locked = self.inner.read().unwrap();
    5076            0 :             let mut attached = Vec::new();
    5077            0 :             let mut secondary = Vec::new();
    5078              : 
    5079            0 :             for (tenant_shard_id, shard) in
    5080            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5081              :             {
    5082            0 :                 let Some(node_id) = shard.intent.get_attached() else {
    5083              :                     // Unexpected.  Race with a PlacementPolicy change?
    5084            0 :                     tracing::warn!(
    5085            0 :                         "No attached node on {tenant_shard_id} immediately after shard split!"
    5086              :                     );
    5087            0 :                     continue;
    5088              :                 };
    5089              : 
    5090            0 :                 let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
    5091              :                     // No secondary location.  Nothing for us to do.
    5092            0 :                     continue;
    5093              :                 };
    5094              : 
    5095            0 :                 let attached_node = locked
    5096            0 :                     .nodes
    5097            0 :                     .get(node_id)
    5098            0 :                     .expect("Pageservers may not be deleted while referenced");
    5099            0 : 
    5100            0 :                 let secondary_node = locked
    5101            0 :                     .nodes
    5102            0 :                     .get(secondary_node_id)
    5103            0 :                     .expect("Pageservers may not be deleted while referenced");
    5104            0 : 
    5105            0 :                 attached.push((*tenant_shard_id, attached_node.clone()));
    5106            0 :                 secondary.push((*tenant_shard_id, secondary_node.clone()));
    5107              :             }
    5108            0 :             (attached, secondary)
    5109            0 :         };
    5110            0 : 
    5111            0 :         if secondary.is_empty() {
    5112              :             // No secondary locations; nothing for us to do
    5113            0 :             return;
    5114            0 :         }
    5115              : 
    5116            0 :         for result in self
    5117            0 :             .tenant_for_shards_api(
    5118            0 :                 attached,
    5119            0 :                 |tenant_shard_id, client| async move {
    5120            0 :                     client.tenant_heatmap_upload(tenant_shard_id).await
    5121            0 :                 },
    5122            0 :                 1,
    5123            0 :                 1,
    5124            0 :                 SHORT_RECONCILE_TIMEOUT,
    5125            0 :                 &self.cancel,
    5126            0 :             )
    5127            0 :             .await
    5128              :         {
    5129            0 :             if let Err(e) = result {
    5130            0 :                 tracing::warn!("Error calling heatmap upload after shard split: {e}");
    5131            0 :                 return;
    5132            0 :             }
    5133              :         }
    5134              : 
    5135            0 :         for result in self
    5136            0 :             .tenant_for_shards_api(
    5137            0 :                 secondary,
    5138            0 :                 |tenant_shard_id, client| async move {
    5139            0 :                     client
    5140            0 :                         .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
    5141            0 :                         .await
    5142            0 :                 },
    5143            0 :                 1,
    5144            0 :                 1,
    5145            0 :                 SHORT_RECONCILE_TIMEOUT,
    5146            0 :                 &self.cancel,
    5147            0 :             )
    5148            0 :             .await
    5149              :         {
    5150            0 :             if let Err(e) = result {
    5151            0 :                 tracing::warn!("Error calling secondary download after shard split: {e}");
    5152            0 :                 return;
    5153            0 :             }
    5154              :         }
    5155            0 :     }
    5156              : 
    5157            0 :     pub(crate) async fn tenant_shard_split(
    5158            0 :         &self,
    5159            0 :         tenant_id: TenantId,
    5160            0 :         split_req: TenantShardSplitRequest,
    5161            0 :     ) -> Result<TenantShardSplitResponse, ApiError> {
    5162              :         // TODO: return 503 if we get stuck waiting for this lock
    5163              :         // (issue https://github.com/neondatabase/neon/issues/7108)
    5164            0 :         let _tenant_lock = trace_exclusive_lock(
    5165            0 :             &self.tenant_op_locks,
    5166            0 :             tenant_id,
    5167            0 :             TenantOperations::ShardSplit,
    5168            0 :         )
    5169            0 :         .await;
    5170              : 
    5171            0 :         let _gate = self
    5172            0 :             .reconcilers_gate
    5173            0 :             .enter()
    5174            0 :             .map_err(|_| ApiError::ShuttingDown)?;
    5175              : 
    5176            0 :         let new_shard_count = ShardCount::new(split_req.new_shard_count);
    5177            0 :         let new_stripe_size = split_req.new_stripe_size;
    5178              : 
    5179              :         // Validate the request and construct parameters.  This phase is fallible, but does not require
    5180              :         // rollback on errors, as it does no I/O and mutates no state.
    5181            0 :         let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
    5182            0 :             ShardSplitAction::NoOp(resp) => return Ok(resp),
    5183            0 :             ShardSplitAction::Split(params) => params,
    5184              :         };
    5185              : 
    5186              :         // Execute this split: this phase mutates state and does remote I/O on pageservers.  If it fails,
    5187              :         // we must roll back.
    5188            0 :         let r = self
    5189            0 :             .do_tenant_shard_split(tenant_id, shard_split_params)
    5190            0 :             .await;
    5191              : 
    5192            0 :         let (response, waiters) = match r {
    5193            0 :             Ok(r) => r,
    5194            0 :             Err(e) => {
    5195            0 :                 // Split might be part-done, we must do work to abort it.
    5196            0 :                 tracing::warn!("Enqueuing background abort of split on {tenant_id}");
    5197            0 :                 self.abort_tx
    5198            0 :                     .send(TenantShardSplitAbort {
    5199            0 :                         tenant_id,
    5200            0 :                         new_shard_count,
    5201            0 :                         new_stripe_size,
    5202            0 :                         _tenant_lock,
    5203            0 :                         _gate,
    5204            0 :                     })
    5205            0 :                     // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
    5206            0 :                     .ok();
    5207            0 :                 return Err(e);
    5208              :             }
    5209              :         };
    5210              : 
    5211              :         // The split is now complete.  As an optimization, we will trigger all the child shards to upload
    5212              :         // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
    5213              :         // for the background heatmap/download interval before secondaries get warm enough to migrate shards
    5214              :         // in [`Self::optimize_all`]
    5215            0 :         self.tenant_shard_split_start_secondaries(tenant_id, waiters)
    5216            0 :             .await;
    5217            0 :         Ok(response)
    5218            0 :     }
    5219              : 
    5220            0 :     fn prepare_tenant_shard_split(
    5221            0 :         &self,
    5222            0 :         tenant_id: TenantId,
    5223            0 :         split_req: TenantShardSplitRequest,
    5224            0 :     ) -> Result<ShardSplitAction, ApiError> {
    5225            0 :         fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
    5226            0 :             anyhow::anyhow!("failpoint")
    5227            0 :         )));
    5228              : 
    5229            0 :         let mut policy = None;
    5230            0 :         let mut config = None;
    5231            0 :         let mut shard_ident = None;
    5232            0 :         let mut preferred_az_id = None;
    5233              :         // Validate input, and calculate which shards we will create
    5234            0 :         let (old_shard_count, targets) =
    5235              :             {
    5236            0 :                 let locked = self.inner.read().unwrap();
    5237            0 : 
    5238            0 :                 let pageservers = locked.nodes.clone();
    5239            0 : 
    5240            0 :                 let mut targets = Vec::new();
    5241            0 : 
    5242            0 :                 // In case this is a retry, count how many already-split shards we found
    5243            0 :                 let mut children_found = Vec::new();
    5244            0 :                 let mut old_shard_count = None;
    5245              : 
    5246            0 :                 for (tenant_shard_id, shard) in
    5247            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5248              :                 {
    5249            0 :                     match shard.shard.count.count().cmp(&split_req.new_shard_count) {
    5250              :                         Ordering::Equal => {
    5251              :                             //  Already split this
    5252            0 :                             children_found.push(*tenant_shard_id);
    5253            0 :                             continue;
    5254              :                         }
    5255              :                         Ordering::Greater => {
    5256            0 :                             return Err(ApiError::BadRequest(anyhow::anyhow!(
    5257            0 :                                 "Requested count {} but already have shards at count {}",
    5258            0 :                                 split_req.new_shard_count,
    5259            0 :                                 shard.shard.count.count()
    5260            0 :                             )));
    5261              :                         }
    5262            0 :                         Ordering::Less => {
    5263            0 :                             // Fall through: this shard has lower count than requested,
    5264            0 :                             // is a candidate for splitting.
    5265            0 :                         }
    5266            0 :                     }
    5267            0 : 
    5268            0 :                     match old_shard_count {
    5269            0 :                         None => old_shard_count = Some(shard.shard.count),
    5270            0 :                         Some(old_shard_count) => {
    5271            0 :                             if old_shard_count != shard.shard.count {
    5272              :                                 // We may hit this case if a caller asked for two splits to
    5273              :                                 // different sizes, before the first one is complete.
    5274              :                                 // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
    5275              :                                 // of shard_count=1 and shard_count=2 shards in the map.
    5276            0 :                                 return Err(ApiError::Conflict(
    5277            0 :                                     "Cannot split, currently mid-split".to_string(),
    5278            0 :                                 ));
    5279            0 :                             }
    5280              :                         }
    5281              :                     }
    5282            0 :                     if policy.is_none() {
    5283            0 :                         policy = Some(shard.policy.clone());
    5284            0 :                     }
    5285            0 :                     if shard_ident.is_none() {
    5286            0 :                         shard_ident = Some(shard.shard);
    5287            0 :                     }
    5288            0 :                     if config.is_none() {
    5289            0 :                         config = Some(shard.config.clone());
    5290            0 :                     }
    5291            0 :                     if preferred_az_id.is_none() {
    5292            0 :                         preferred_az_id = shard.preferred_az().cloned();
    5293            0 :                     }
    5294              : 
    5295            0 :                     if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
    5296            0 :                         tracing::info!(
    5297            0 :                             "Tenant shard {} already has shard count {}",
    5298              :                             tenant_shard_id,
    5299              :                             split_req.new_shard_count
    5300              :                         );
    5301            0 :                         continue;
    5302            0 :                     }
    5303              : 
    5304            0 :                     let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
    5305            0 :                         anyhow::anyhow!("Cannot split a tenant that is not attached"),
    5306            0 :                     ))?;
    5307              : 
    5308            0 :                     let node = pageservers
    5309            0 :                         .get(&node_id)
    5310            0 :                         .expect("Pageservers may not be deleted while referenced");
    5311            0 : 
    5312            0 :                     targets.push(ShardSplitTarget {
    5313            0 :                         parent_id: *tenant_shard_id,
    5314            0 :                         node: node.clone(),
    5315            0 :                         child_ids: tenant_shard_id
    5316            0 :                             .split(ShardCount::new(split_req.new_shard_count)),
    5317            0 :                     });
    5318              :                 }
    5319              : 
    5320            0 :                 if targets.is_empty() {
    5321            0 :                     if children_found.len() == split_req.new_shard_count as usize {
    5322            0 :                         return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
    5323            0 :                             new_shards: children_found,
    5324            0 :                         }));
    5325              :                     } else {
    5326              :                         // No shards found to split, and no existing children found: the
    5327              :                         // tenant doesn't exist at all.
    5328            0 :                         return Err(ApiError::NotFound(
    5329            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    5330            0 :                         ));
    5331              :                     }
    5332            0 :                 }
    5333            0 : 
    5334            0 :                 (old_shard_count, targets)
    5335            0 :             };
    5336            0 : 
    5337            0 :         // unwrap safety: we would have returned above if we didn't find at least one shard to split
    5338            0 :         let old_shard_count = old_shard_count.unwrap();
    5339            0 :         let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
    5340              :             // This ShardIdentity will be used as the template for all children, so this implicitly
    5341              :             // applies the new stripe size to the children.
    5342            0 :             let mut shard_ident = shard_ident.unwrap();
    5343            0 :             if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
    5344            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    5345            0 :                     "Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards",
    5346            0 :                     shard_ident.stripe_size
    5347            0 :                 )));
    5348            0 :             }
    5349            0 : 
    5350            0 :             shard_ident.stripe_size = new_stripe_size;
    5351            0 :             tracing::info!("applied  stripe size {}", shard_ident.stripe_size.0);
    5352            0 :             shard_ident
    5353              :         } else {
    5354            0 :             shard_ident.unwrap()
    5355              :         };
    5356            0 :         let policy = policy.unwrap();
    5357            0 :         let config = config.unwrap();
    5358            0 : 
    5359            0 :         Ok(ShardSplitAction::Split(Box::new(ShardSplitParams {
    5360            0 :             old_shard_count,
    5361            0 :             new_shard_count: ShardCount::new(split_req.new_shard_count),
    5362            0 :             new_stripe_size: split_req.new_stripe_size,
    5363            0 :             targets,
    5364            0 :             policy,
    5365            0 :             config,
    5366            0 :             shard_ident,
    5367            0 :             preferred_az_id,
    5368            0 :         })))
    5369            0 :     }
    5370              : 
    5371            0 :     async fn do_tenant_shard_split(
    5372            0 :         &self,
    5373            0 :         tenant_id: TenantId,
    5374            0 :         params: Box<ShardSplitParams>,
    5375            0 :     ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
    5376            0 :         // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
    5377            0 :         // request could occur here, deleting or mutating the tenant.  begin_shard_split checks that the
    5378            0 :         // parent shards exist as expected, but it would be neater to do the above pre-checks within the
    5379            0 :         // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
    5380            0 :         // (https://github.com/neondatabase/neon/issues/6676)
    5381            0 : 
    5382            0 :         let ShardSplitParams {
    5383            0 :             old_shard_count,
    5384            0 :             new_shard_count,
    5385            0 :             new_stripe_size,
    5386            0 :             mut targets,
    5387            0 :             policy,
    5388            0 :             config,
    5389            0 :             shard_ident,
    5390            0 :             preferred_az_id,
    5391            0 :         } = *params;
    5392              : 
    5393              :         // Drop any secondary locations: pageservers do not support splitting these, and in any case the
    5394              :         // end-state for a split tenant will usually be to have secondary locations on different nodes.
    5395              :         // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
    5396              :         // at the time of split.
    5397            0 :         let waiters = {
    5398            0 :             let mut locked = self.inner.write().unwrap();
    5399            0 :             let mut waiters = Vec::new();
    5400            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5401            0 :             for target in &mut targets {
    5402            0 :                 let Some(shard) = tenants.get_mut(&target.parent_id) else {
    5403              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    5404            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5405            0 :                         "Shard {} not found",
    5406            0 :                         target.parent_id
    5407            0 :                     )));
    5408              :                 };
    5409              : 
    5410            0 :                 if shard.intent.get_attached() != &Some(target.node.get_id()) {
    5411              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    5412            0 :                     return Err(ApiError::Conflict(format!(
    5413            0 :                         "Shard {} unexpectedly rescheduled during split",
    5414            0 :                         target.parent_id
    5415            0 :                     )));
    5416            0 :                 }
    5417            0 : 
    5418            0 :                 // Irrespective of PlacementPolicy, clear secondary locations from intent
    5419            0 :                 shard.intent.clear_secondary(scheduler);
    5420              : 
    5421              :                 // Run Reconciler to execute detach fo secondary locations.
    5422            0 :                 if let Some(waiter) =
    5423            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    5424            0 :                 {
    5425            0 :                     waiters.push(waiter);
    5426            0 :                 }
    5427              :             }
    5428            0 :             waiters
    5429            0 :         };
    5430            0 :         self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
    5431              : 
    5432              :         // Before creating any new child shards in memory or on the pageservers, persist them: this
    5433              :         // enables us to ensure that we will always be able to clean up if something goes wrong.  This also
    5434              :         // acts as the protection against two concurrent attempts to split: one of them will get a database
    5435              :         // error trying to insert the child shards.
    5436            0 :         let mut child_tsps = Vec::new();
    5437            0 :         for target in &targets {
    5438            0 :             let mut this_child_tsps = Vec::new();
    5439            0 :             for child in &target.child_ids {
    5440            0 :                 let mut child_shard = shard_ident;
    5441            0 :                 child_shard.number = child.shard_number;
    5442            0 :                 child_shard.count = child.shard_count;
    5443            0 : 
    5444            0 :                 tracing::info!(
    5445            0 :                     "Create child shard persistence with stripe size {}",
    5446              :                     shard_ident.stripe_size.0
    5447              :                 );
    5448              : 
    5449            0 :                 this_child_tsps.push(TenantShardPersistence {
    5450            0 :                     tenant_id: child.tenant_id.to_string(),
    5451            0 :                     shard_number: child.shard_number.0 as i32,
    5452            0 :                     shard_count: child.shard_count.literal() as i32,
    5453            0 :                     shard_stripe_size: shard_ident.stripe_size.0 as i32,
    5454            0 :                     // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
    5455            0 :                     // populate the correct generation as part of its transaction, to protect us
    5456            0 :                     // against racing with changes in the state of the parent.
    5457            0 :                     generation: None,
    5458            0 :                     generation_pageserver: Some(target.node.get_id().0 as i64),
    5459            0 :                     placement_policy: serde_json::to_string(&policy).unwrap(),
    5460            0 :                     config: serde_json::to_string(&config).unwrap(),
    5461            0 :                     splitting: SplitState::Splitting,
    5462            0 : 
    5463            0 :                     // Scheduling policies and preferred AZ do not carry through to children
    5464            0 :                     scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    5465            0 :                         .unwrap(),
    5466            0 :                     preferred_az_id: preferred_az_id.as_ref().map(|az| az.0.clone()),
    5467            0 :                 });
    5468            0 :             }
    5469              : 
    5470            0 :             child_tsps.push((target.parent_id, this_child_tsps));
    5471              :         }
    5472              : 
    5473            0 :         if let Err(e) = self
    5474            0 :             .persistence
    5475            0 :             .begin_shard_split(old_shard_count, tenant_id, child_tsps)
    5476            0 :             .await
    5477              :         {
    5478            0 :             match e {
    5479              :                 DatabaseError::Query(diesel::result::Error::DatabaseError(
    5480              :                     DatabaseErrorKind::UniqueViolation,
    5481              :                     _,
    5482              :                 )) => {
    5483              :                     // Inserting a child shard violated a unique constraint: we raced with another call to
    5484              :                     // this function
    5485            0 :                     tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
    5486            0 :                     return Err(ApiError::Conflict("Tenant is already splitting".into()));
    5487              :                 }
    5488            0 :                 _ => return Err(ApiError::InternalServerError(e.into())),
    5489              :             }
    5490            0 :         }
    5491            0 :         fail::fail_point!("shard-split-post-begin", |_| Err(
    5492            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    5493            0 :         ));
    5494              : 
    5495              :         // Now that I have persisted the splitting state, apply it in-memory.  This is infallible, so
    5496              :         // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
    5497              :         // is not set in memory, then it was not persisted.
    5498              :         {
    5499            0 :             let mut locked = self.inner.write().unwrap();
    5500            0 :             for target in &targets {
    5501            0 :                 if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
    5502            0 :                     parent_shard.splitting = SplitState::Splitting;
    5503            0 :                     // Put the observed state to None, to reflect that it is indeterminate once we start the
    5504            0 :                     // split operation.
    5505            0 :                     parent_shard
    5506            0 :                         .observed
    5507            0 :                         .locations
    5508            0 :                         .insert(target.node.get_id(), ObservedStateLocation { conf: None });
    5509            0 :                 }
    5510              :             }
    5511              :         }
    5512              : 
    5513              :         // TODO: issue split calls concurrently (this only matters once we're splitting
    5514              :         // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
    5515              : 
    5516            0 :         for target in &targets {
    5517              :             let ShardSplitTarget {
    5518            0 :                 parent_id,
    5519            0 :                 node,
    5520            0 :                 child_ids,
    5521            0 :             } = target;
    5522            0 :             let client = PageserverClient::new(
    5523            0 :                 node.get_id(),
    5524            0 :                 self.http_client.clone(),
    5525            0 :                 node.base_url(),
    5526            0 :                 self.config.pageserver_jwt_token.as_deref(),
    5527            0 :             );
    5528            0 :             let response = client
    5529            0 :                 .tenant_shard_split(
    5530            0 :                     *parent_id,
    5531            0 :                     TenantShardSplitRequest {
    5532            0 :                         new_shard_count: new_shard_count.literal(),
    5533            0 :                         new_stripe_size,
    5534            0 :                     },
    5535            0 :                 )
    5536            0 :                 .await
    5537            0 :                 .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?;
    5538              : 
    5539            0 :             fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
    5540            0 :                 "failpoint".to_string()
    5541            0 :             )));
    5542              : 
    5543            0 :             failpoint_support::sleep_millis_async!(
    5544              :                 "shard-split-post-remote-sleep",
    5545            0 :                 &self.reconcilers_cancel
    5546              :             );
    5547              : 
    5548            0 :             tracing::info!(
    5549            0 :                 "Split {} into {}",
    5550            0 :                 parent_id,
    5551            0 :                 response
    5552            0 :                     .new_shards
    5553            0 :                     .iter()
    5554            0 :                     .map(|s| format!("{:?}", s))
    5555            0 :                     .collect::<Vec<_>>()
    5556            0 :                     .join(",")
    5557              :             );
    5558              : 
    5559            0 :             if &response.new_shards != child_ids {
    5560              :                 // This should never happen: the pageserver should agree with us on how shard splits work.
    5561            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5562            0 :                     "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
    5563            0 :                     parent_id,
    5564            0 :                     response.new_shards,
    5565            0 :                     child_ids
    5566            0 :                 )));
    5567            0 :             }
    5568              :         }
    5569              : 
    5570            0 :         pausable_failpoint!("shard-split-pre-complete");
    5571              : 
    5572              :         // TODO: if the pageserver restarted concurrently with our split API call,
    5573              :         // the actual generation of the child shard might differ from the generation
    5574              :         // we expect it to have.  In order for our in-database generation to end up
    5575              :         // correct, we should carry the child generation back in the response and apply it here
    5576              :         // in complete_shard_split (and apply the correct generation in memory)
    5577              :         // (or, we can carry generation in the request and reject the request if
    5578              :         //  it doesn't match, but that requires more retry logic on this side)
    5579              : 
    5580            0 :         self.persistence
    5581            0 :             .complete_shard_split(tenant_id, old_shard_count, new_shard_count)
    5582            0 :             .await?;
    5583              : 
    5584            0 :         fail::fail_point!("shard-split-post-complete", |_| Err(
    5585            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    5586            0 :         ));
    5587              : 
    5588              :         // Replace all the shards we just split with their children: this phase is infallible.
    5589            0 :         let (response, child_locations, waiters) =
    5590            0 :             self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
    5591            0 : 
    5592            0 :         // Send compute notifications for all the new shards
    5593            0 :         let mut failed_notifications = Vec::new();
    5594            0 :         for (child_id, child_ps, stripe_size) in child_locations {
    5595            0 :             if let Err(e) = self
    5596            0 :                 .compute_hook
    5597            0 :                 .notify(
    5598            0 :                     compute_hook::ShardUpdate {
    5599            0 :                         tenant_shard_id: child_id,
    5600            0 :                         node_id: child_ps,
    5601            0 :                         stripe_size,
    5602            0 :                         preferred_az: preferred_az_id.as_ref().map(Cow::Borrowed),
    5603            0 :                     },
    5604            0 :                     &self.reconcilers_cancel,
    5605            0 :                 )
    5606            0 :                 .await
    5607              :             {
    5608            0 :                 tracing::warn!(
    5609            0 :                     "Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
    5610              :                     child_id,
    5611              :                     child_ps
    5612              :                 );
    5613            0 :                 failed_notifications.push(child_id);
    5614            0 :             }
    5615              :         }
    5616              : 
    5617              :         // If we failed any compute notifications, make a note to retry later.
    5618            0 :         if !failed_notifications.is_empty() {
    5619            0 :             let mut locked = self.inner.write().unwrap();
    5620            0 :             for failed in failed_notifications {
    5621            0 :                 if let Some(shard) = locked.tenants.get_mut(&failed) {
    5622            0 :                     shard.pending_compute_notification = true;
    5623            0 :                 }
    5624              :             }
    5625            0 :         }
    5626              : 
    5627            0 :         Ok((response, waiters))
    5628            0 :     }
    5629              : 
    5630              :     /// A graceful migration: update the preferred node and let optimisation handle the migration
    5631              :     /// in the background (may take a long time as it will fully warm up a location before cutting over)
    5632              :     ///
    5633              :     /// Our external API calls this a 'prewarm=true' migration, but internally it isn't a special prewarm step: it's
    5634              :     /// just a migration that uses the same graceful procedure as our background scheduling optimisations would use.
    5635            0 :     fn tenant_shard_migrate_with_prewarm(
    5636            0 :         &self,
    5637            0 :         migrate_req: &TenantShardMigrateRequest,
    5638            0 :         shard: &mut TenantShard,
    5639            0 :         scheduler: &mut Scheduler,
    5640            0 :         schedule_context: ScheduleContext,
    5641            0 :     ) -> Result<Option<ScheduleOptimization>, ApiError> {
    5642            0 :         shard.set_preferred_node(Some(migrate_req.node_id));
    5643            0 : 
    5644            0 :         // Generate whatever the initial change to the intent is: this could be creation of a secondary, or
    5645            0 :         // cutting over to an existing secondary.  Caller is responsible for validating this before applying it,
    5646            0 :         // e.g. by checking secondary is warm enough.
    5647            0 :         Ok(shard.optimize_attachment(scheduler, &schedule_context))
    5648            0 :     }
    5649              : 
    5650              :     /// Immediate migration: directly update the intent state and kick off a reconciler
    5651            0 :     fn tenant_shard_migrate_immediate(
    5652            0 :         &self,
    5653            0 :         migrate_req: &TenantShardMigrateRequest,
    5654            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    5655            0 :         shard: &mut TenantShard,
    5656            0 :         scheduler: &mut Scheduler,
    5657            0 :     ) -> Result<Option<ReconcilerWaiter>, ApiError> {
    5658            0 :         // Non-graceful migration: update the intent state immediately
    5659            0 :         let old_attached = *shard.intent.get_attached();
    5660            0 :         match shard.policy {
    5661            0 :             PlacementPolicy::Attached(n) => {
    5662            0 :                 // If our new attached node was a secondary, it no longer should be.
    5663            0 :                 shard
    5664            0 :                     .intent
    5665            0 :                     .remove_secondary(scheduler, migrate_req.node_id);
    5666            0 : 
    5667            0 :                 shard
    5668            0 :                     .intent
    5669            0 :                     .set_attached(scheduler, Some(migrate_req.node_id));
    5670              : 
    5671              :                 // If we were already attached to something, demote that to a secondary
    5672            0 :                 if let Some(old_attached) = old_attached {
    5673            0 :                     if n > 0 {
    5674              :                         // Remove other secondaries to make room for the location we'll demote
    5675            0 :                         while shard.intent.get_secondary().len() >= n {
    5676            0 :                             shard.intent.pop_secondary(scheduler);
    5677            0 :                         }
    5678              : 
    5679            0 :                         shard.intent.push_secondary(scheduler, old_attached);
    5680            0 :                     }
    5681            0 :                 }
    5682              :             }
    5683            0 :             PlacementPolicy::Secondary => {
    5684            0 :                 shard.intent.clear(scheduler);
    5685            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    5686            0 :             }
    5687              :             PlacementPolicy::Detached => {
    5688            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    5689            0 :                     "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
    5690            0 :                 )));
    5691              :             }
    5692              :         }
    5693              : 
    5694            0 :         tracing::info!("Migrating: new intent {:?}", shard.intent);
    5695            0 :         shard.sequence = shard.sequence.next();
    5696            0 :         shard.set_preferred_node(None); // Abort any in-flight graceful migration
    5697            0 :         Ok(self.maybe_configured_reconcile_shard(
    5698            0 :             shard,
    5699            0 :             nodes,
    5700            0 :             (&migrate_req.migration_config).into(),
    5701            0 :         ))
    5702            0 :     }
    5703              : 
    5704            0 :     pub(crate) async fn tenant_shard_migrate(
    5705            0 :         &self,
    5706            0 :         tenant_shard_id: TenantShardId,
    5707            0 :         migrate_req: TenantShardMigrateRequest,
    5708            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    5709              :         // Depending on whether the migration is a change and whether it's graceful or immediate, we might
    5710              :         // get a different outcome to handle
    5711              :         enum MigrationOutcome {
    5712              :             Optimization(Option<ScheduleOptimization>),
    5713              :             Reconcile(Option<ReconcilerWaiter>),
    5714              :         }
    5715              : 
    5716            0 :         let outcome = {
    5717            0 :             let mut locked = self.inner.write().unwrap();
    5718            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5719              : 
    5720            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    5721            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    5722            0 :                     "Node {} not found",
    5723            0 :                     migrate_req.node_id
    5724            0 :                 )));
    5725              :             };
    5726              : 
    5727              :             // Migration to unavavailable node requires force flag
    5728            0 :             if !node.is_available() {
    5729            0 :                 if migrate_req.migration_config.override_scheduler {
    5730              :                     // Warn but proceed: the caller may intend to manually adjust the placement of
    5731              :                     // a shard even if the node is down, e.g. if intervening during an incident.
    5732            0 :                     tracing::warn!("Forcibly migrating to unavailable node {node}");
    5733              :                 } else {
    5734            0 :                     tracing::warn!("Node {node} is unavailable, refusing migration");
    5735            0 :                     return Err(ApiError::PreconditionFailed(
    5736            0 :                         format!("Node {node} is unavailable").into_boxed_str(),
    5737            0 :                     ));
    5738              :                 }
    5739            0 :             }
    5740              : 
    5741              :             // Calculate the ScheduleContext for this tenant
    5742            0 :             let mut schedule_context = ScheduleContext::default();
    5743            0 :             for (_shard_id, shard) in
    5744            0 :                 tenants.range(TenantShardId::tenant_range(tenant_shard_id.tenant_id))
    5745            0 :             {
    5746            0 :                 schedule_context.avoid(&shard.intent.all_pageservers());
    5747            0 :             }
    5748              : 
    5749              :             // Look up the specific shard we will migrate
    5750            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    5751            0 :                 return Err(ApiError::NotFound(
    5752            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    5753            0 :                 ));
    5754              :             };
    5755              : 
    5756              :             // Migration to a node with unfavorable scheduling score requires a force flag, because it might just
    5757              :             // be migrated back by the optimiser.
    5758            0 :             if let Some(better_node) = shard.find_better_location::<AttachedShardTag>(
    5759            0 :                 scheduler,
    5760            0 :                 &schedule_context,
    5761            0 :                 migrate_req.node_id,
    5762            0 :                 &[],
    5763            0 :             ) {
    5764            0 :                 if !migrate_req.migration_config.override_scheduler {
    5765            0 :                     return Err(ApiError::PreconditionFailed(
    5766            0 :                         "Migration to a worse-scoring node".into(),
    5767            0 :                     ));
    5768              :                 } else {
    5769            0 :                     tracing::info!(
    5770            0 :                         "Migrating to a worse-scoring node {} (optimiser would prefer {better_node})",
    5771              :                         migrate_req.node_id
    5772              :                     );
    5773              :                 }
    5774            0 :             }
    5775              : 
    5776            0 :             if let Some(origin_node_id) = migrate_req.origin_node_id {
    5777            0 :                 if shard.intent.get_attached() != &Some(origin_node_id) {
    5778            0 :                     return Err(ApiError::PreconditionFailed(
    5779            0 :                         format!(
    5780            0 :                             "Migration expected to originate from {} but shard is on {:?}",
    5781            0 :                             origin_node_id,
    5782            0 :                             shard.intent.get_attached()
    5783            0 :                         )
    5784            0 :                         .into(),
    5785            0 :                     ));
    5786            0 :                 }
    5787            0 :             }
    5788              : 
    5789            0 :             if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    5790              :                 // No-op case: we will still proceed to wait for reconciliation in case it is
    5791              :                 // incomplete from an earlier update to the intent.
    5792            0 :                 tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
    5793              : 
    5794              :                 // An instruction to migrate to the currently attached node should
    5795              :                 // cancel any pending graceful migration
    5796            0 :                 shard.set_preferred_node(None);
    5797            0 : 
    5798            0 :                 MigrationOutcome::Reconcile(self.maybe_configured_reconcile_shard(
    5799            0 :                     shard,
    5800            0 :                     nodes,
    5801            0 :                     (&migrate_req.migration_config).into(),
    5802            0 :                 ))
    5803            0 :             } else if migrate_req.migration_config.prewarm {
    5804            0 :                 MigrationOutcome::Optimization(self.tenant_shard_migrate_with_prewarm(
    5805            0 :                     &migrate_req,
    5806            0 :                     shard,
    5807            0 :                     scheduler,
    5808            0 :                     schedule_context,
    5809            0 :                 )?)
    5810              :             } else {
    5811            0 :                 MigrationOutcome::Reconcile(self.tenant_shard_migrate_immediate(
    5812            0 :                     &migrate_req,
    5813            0 :                     nodes,
    5814            0 :                     shard,
    5815            0 :                     scheduler,
    5816            0 :                 )?)
    5817              :             }
    5818              :         };
    5819              : 
    5820              :         // We may need to validate + apply an optimisation, or we may need to just retrive a reconcile waiter
    5821            0 :         let waiter = match outcome {
    5822            0 :             MigrationOutcome::Optimization(Some(optimization)) => {
    5823              :                 // Validate and apply the optimization -- this would happen anyway in background reconcile loop, but
    5824              :                 // we might as well do it more promptly as this is a direct external request.
    5825            0 :                 let mut validated = self
    5826            0 :                     .optimize_all_validate(vec![(tenant_shard_id, optimization)])
    5827            0 :                     .await;
    5828            0 :                 if let Some((_shard_id, optimization)) = validated.pop() {
    5829            0 :                     let mut locked = self.inner.write().unwrap();
    5830            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    5831            0 :                     let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    5832              :                         // Rare but possible: tenant is removed between generating optimisation and validating it.
    5833            0 :                         return Err(ApiError::NotFound(
    5834            0 :                             anyhow::anyhow!("Tenant shard not found").into(),
    5835            0 :                         ));
    5836              :                     };
    5837              : 
    5838            0 :                     if !shard.apply_optimization(scheduler, optimization) {
    5839              :                         // This can happen but is unusual enough to warn on: something else changed in the shard that made the optimisation stale
    5840              :                         // and therefore not applied.
    5841            0 :                         tracing::warn!(
    5842            0 :                             "Schedule optimisation generated during graceful migration was not applied, shard changed?"
    5843              :                         );
    5844            0 :                     }
    5845            0 :                     self.maybe_configured_reconcile_shard(
    5846            0 :                         shard,
    5847            0 :                         nodes,
    5848            0 :                         (&migrate_req.migration_config).into(),
    5849            0 :                     )
    5850              :                 } else {
    5851            0 :                     None
    5852              :                 }
    5853              :             }
    5854            0 :             MigrationOutcome::Optimization(None) => None,
    5855            0 :             MigrationOutcome::Reconcile(waiter) => waiter,
    5856              :         };
    5857              : 
    5858              :         // Finally, wait for any reconcile we started to complete.  In the case of immediate-mode migrations to cold
    5859              :         // locations, this has a good chance of timing out.
    5860            0 :         if let Some(waiter) = waiter {
    5861            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    5862              :         } else {
    5863            0 :             tracing::info!("Migration is a no-op");
    5864              :         }
    5865              : 
    5866            0 :         Ok(TenantShardMigrateResponse {})
    5867            0 :     }
    5868              : 
    5869            0 :     pub(crate) async fn tenant_shard_migrate_secondary(
    5870            0 :         &self,
    5871            0 :         tenant_shard_id: TenantShardId,
    5872            0 :         migrate_req: TenantShardMigrateRequest,
    5873            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    5874            0 :         let waiter = {
    5875            0 :             let mut locked = self.inner.write().unwrap();
    5876            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5877              : 
    5878            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    5879            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    5880            0 :                     "Node {} not found",
    5881            0 :                     migrate_req.node_id
    5882            0 :                 )));
    5883              :             };
    5884              : 
    5885            0 :             if !node.is_available() {
    5886              :                 // Warn but proceed: the caller may intend to manually adjust the placement of
    5887              :                 // a shard even if the node is down, e.g. if intervening during an incident.
    5888            0 :                 tracing::warn!("Migrating to unavailable node {node}");
    5889            0 :             }
    5890              : 
    5891            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    5892            0 :                 return Err(ApiError::NotFound(
    5893            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    5894            0 :                 ));
    5895              :             };
    5896              : 
    5897            0 :             if shard.intent.get_secondary().len() == 1
    5898            0 :                 && shard.intent.get_secondary()[0] == migrate_req.node_id
    5899              :             {
    5900            0 :                 tracing::info!(
    5901            0 :                     "Migrating secondary to {node}: intent is unchanged {:?}",
    5902              :                     shard.intent
    5903              :                 );
    5904            0 :             } else if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    5905            0 :                 tracing::info!(
    5906            0 :                     "Migrating secondary to {node}: already attached where we were asked to create a secondary"
    5907              :                 );
    5908              :             } else {
    5909            0 :                 let old_secondaries = shard.intent.get_secondary().clone();
    5910            0 :                 for secondary in old_secondaries {
    5911            0 :                     shard.intent.remove_secondary(scheduler, secondary);
    5912            0 :                 }
    5913              : 
    5914            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    5915            0 :                 shard.sequence = shard.sequence.next();
    5916            0 :                 tracing::info!(
    5917            0 :                     "Migrating secondary to {node}: new intent {:?}",
    5918              :                     shard.intent
    5919              :                 );
    5920              :             }
    5921              : 
    5922            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    5923              :         };
    5924              : 
    5925            0 :         if let Some(waiter) = waiter {
    5926            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    5927              :         } else {
    5928            0 :             tracing::info!("Migration is a no-op");
    5929              :         }
    5930              : 
    5931            0 :         Ok(TenantShardMigrateResponse {})
    5932            0 :     }
    5933              : 
    5934              :     /// 'cancel' in this context means cancel any ongoing reconcile
    5935            0 :     pub(crate) async fn tenant_shard_cancel_reconcile(
    5936            0 :         &self,
    5937            0 :         tenant_shard_id: TenantShardId,
    5938            0 :     ) -> Result<(), ApiError> {
    5939              :         // Take state lock and fire the cancellation token, after which we drop lock and wait for any ongoing reconcile to complete
    5940            0 :         let waiter = {
    5941            0 :             let locked = self.inner.write().unwrap();
    5942            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    5943            0 :                 return Err(ApiError::NotFound(
    5944            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    5945            0 :                 ));
    5946              :             };
    5947              : 
    5948            0 :             let waiter = shard.get_waiter();
    5949            0 :             match waiter {
    5950              :                 None => {
    5951            0 :                     tracing::info!("Shard does not have an ongoing Reconciler");
    5952            0 :                     return Ok(());
    5953              :                 }
    5954            0 :                 Some(waiter) => {
    5955            0 :                     tracing::info!("Cancelling Reconciler");
    5956            0 :                     shard.cancel_reconciler();
    5957            0 :                     waiter
    5958            0 :                 }
    5959            0 :             }
    5960            0 :         };
    5961            0 : 
    5962            0 :         // Cancellation should be prompt.  If this fails we have still done our job of firing the
    5963            0 :         // cancellation token, but by returning an ApiError we will indicate to the caller that
    5964            0 :         // the Reconciler is misbehaving and not respecting the cancellation token
    5965            0 :         self.await_waiters(vec![waiter], SHORT_RECONCILE_TIMEOUT)
    5966            0 :             .await?;
    5967              : 
    5968            0 :         Ok(())
    5969            0 :     }
    5970              : 
    5971              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    5972              :     /// detaching or deleting it on pageservers.
    5973            0 :     pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
    5974            0 :         self.persistence.delete_tenant(tenant_id).await?;
    5975              : 
    5976            0 :         let mut locked = self.inner.write().unwrap();
    5977            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    5978            0 :         let mut shards = Vec::new();
    5979            0 :         for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
    5980            0 :             shards.push(*tenant_shard_id);
    5981            0 :         }
    5982              : 
    5983            0 :         for shard_id in shards {
    5984            0 :             if let Some(mut shard) = tenants.remove(&shard_id) {
    5985            0 :                 shard.intent.clear(scheduler);
    5986            0 :             }
    5987              :         }
    5988              : 
    5989            0 :         Ok(())
    5990            0 :     }
    5991              : 
    5992              :     /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
    5993              :     /// tenant with a very high generation number so that it will see the existing data.
    5994            0 :     pub(crate) async fn tenant_import(
    5995            0 :         &self,
    5996            0 :         tenant_id: TenantId,
    5997            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    5998            0 :         // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
    5999            0 :         let maybe_node = {
    6000            0 :             self.inner
    6001            0 :                 .read()
    6002            0 :                 .unwrap()
    6003            0 :                 .nodes
    6004            0 :                 .values()
    6005            0 :                 .find(|n| n.is_available())
    6006            0 :                 .cloned()
    6007              :         };
    6008            0 :         let Some(node) = maybe_node else {
    6009            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
    6010              :         };
    6011              : 
    6012            0 :         let client = PageserverClient::new(
    6013            0 :             node.get_id(),
    6014            0 :             self.http_client.clone(),
    6015            0 :             node.base_url(),
    6016            0 :             self.config.pageserver_jwt_token.as_deref(),
    6017            0 :         );
    6018              : 
    6019            0 :         let scan_result = client
    6020            0 :             .tenant_scan_remote_storage(tenant_id)
    6021            0 :             .await
    6022            0 :             .map_err(|e| passthrough_api_error(&node, e))?;
    6023              : 
    6024              :         // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
    6025            0 :         let Some(shard_count) = scan_result
    6026            0 :             .shards
    6027            0 :             .iter()
    6028            0 :             .map(|s| s.tenant_shard_id.shard_count)
    6029            0 :             .max()
    6030              :         else {
    6031            0 :             return Err(ApiError::NotFound(
    6032            0 :                 anyhow::anyhow!("No shards found").into(),
    6033            0 :             ));
    6034              :         };
    6035              : 
    6036              :         // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
    6037              :         // to
    6038            0 :         let generation = scan_result
    6039            0 :             .shards
    6040            0 :             .iter()
    6041            0 :             .map(|s| s.generation)
    6042            0 :             .max()
    6043            0 :             .expect("We already validated >0 shards");
    6044              : 
    6045              :         // Find the tenant's stripe size. This wasn't always persisted in the tenant manifest, so
    6046              :         // fall back to the original default stripe size of 32768 (256 MB) if it's not specified.
    6047              :         const ORIGINAL_STRIPE_SIZE: ShardStripeSize = ShardStripeSize(32768);
    6048            0 :         let stripe_size = scan_result
    6049            0 :             .shards
    6050            0 :             .iter()
    6051            0 :             .find(|s| s.tenant_shard_id.shard_count == shard_count && s.generation == generation)
    6052            0 :             .expect("we validated >0 shards above")
    6053            0 :             .stripe_size
    6054            0 :             .unwrap_or_else(|| {
    6055            0 :                 if shard_count.count() > 1 {
    6056            0 :                     warn!("unknown stripe size, assuming {ORIGINAL_STRIPE_SIZE}");
    6057            0 :                 }
    6058            0 :                 ORIGINAL_STRIPE_SIZE
    6059            0 :             });
    6060              : 
    6061            0 :         let (response, waiters) = self
    6062            0 :             .do_tenant_create(TenantCreateRequest {
    6063            0 :                 new_tenant_id: TenantShardId::unsharded(tenant_id),
    6064            0 :                 generation,
    6065            0 : 
    6066            0 :                 shard_parameters: ShardParameters {
    6067            0 :                     count: shard_count,
    6068            0 :                     stripe_size,
    6069            0 :                 },
    6070            0 :                 placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
    6071            0 :                 config: TenantConfig::default(),
    6072            0 :             })
    6073            0 :             .await?;
    6074              : 
    6075            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    6076              :             // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
    6077              :             // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
    6078              :             // reconcile, as reconciliation includes notifying compute.
    6079            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
    6080            0 :         }
    6081              : 
    6082            0 :         Ok(response)
    6083            0 :     }
    6084              : 
    6085              :     /// For debug/support: a full JSON dump of TenantShards.  Returns a response so that
    6086              :     /// we don't have to make TenantShard clonable in the return path.
    6087            0 :     pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    6088            0 :         let serialized = {
    6089            0 :             let locked = self.inner.read().unwrap();
    6090            0 :             let result = locked.tenants.values().collect::<Vec<_>>();
    6091            0 :             serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
    6092              :         };
    6093              : 
    6094            0 :         hyper::Response::builder()
    6095            0 :             .status(hyper::StatusCode::OK)
    6096            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    6097            0 :             .body(hyper::Body::from(serialized))
    6098            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    6099            0 :     }
    6100              : 
    6101              :     /// Check the consistency of in-memory state vs. persistent state, and check that the
    6102              :     /// scheduler's statistics are up to date.
    6103              :     ///
    6104              :     /// These consistency checks expect an **idle** system.  If changes are going on while
    6105              :     /// we run, then we can falsely indicate a consistency issue.  This is sufficient for end-of-test
    6106              :     /// checks, but not suitable for running continuously in the background in the field.
    6107            0 :     pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
    6108            0 :         let (mut expect_nodes, mut expect_shards) = {
    6109            0 :             let locked = self.inner.read().unwrap();
    6110            0 : 
    6111            0 :             locked
    6112            0 :                 .scheduler
    6113            0 :                 .consistency_check(locked.nodes.values(), locked.tenants.values())
    6114            0 :                 .context("Scheduler checks")
    6115            0 :                 .map_err(ApiError::InternalServerError)?;
    6116              : 
    6117            0 :             let expect_nodes = locked
    6118            0 :                 .nodes
    6119            0 :                 .values()
    6120            0 :                 .map(|n| n.to_persistent())
    6121            0 :                 .collect::<Vec<_>>();
    6122            0 : 
    6123            0 :             let expect_shards = locked
    6124            0 :                 .tenants
    6125            0 :                 .values()
    6126            0 :                 .map(|t| t.to_persistent())
    6127            0 :                 .collect::<Vec<_>>();
    6128              : 
    6129              :             // This method can only validate the state of an idle system: if a reconcile is in
    6130              :             // progress, fail out early to avoid giving false errors on state that won't match
    6131              :             // between database and memory under a ReconcileResult is processed.
    6132            0 :             for t in locked.tenants.values() {
    6133            0 :                 if t.reconciler.is_some() {
    6134            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6135            0 :                         "Shard {} reconciliation in progress",
    6136            0 :                         t.tenant_shard_id
    6137            0 :                     )));
    6138            0 :                 }
    6139              :             }
    6140              : 
    6141            0 :             (expect_nodes, expect_shards)
    6142              :         };
    6143              : 
    6144            0 :         let mut nodes = self.persistence.list_nodes().await?;
    6145            0 :         expect_nodes.sort_by_key(|n| n.node_id);
    6146            0 :         nodes.sort_by_key(|n| n.node_id);
    6147              : 
    6148              :         // Errors relating to nodes are deferred so that we don't skip the shard checks below if we have a node error
    6149            0 :         let node_result = if nodes != expect_nodes {
    6150            0 :             tracing::error!("Consistency check failed on nodes.");
    6151            0 :             tracing::error!(
    6152            0 :                 "Nodes in memory: {}",
    6153            0 :                 serde_json::to_string(&expect_nodes)
    6154            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6155              :             );
    6156            0 :             tracing::error!(
    6157            0 :                 "Nodes in database: {}",
    6158            0 :                 serde_json::to_string(&nodes)
    6159            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6160              :             );
    6161            0 :             Err(ApiError::InternalServerError(anyhow::anyhow!(
    6162            0 :                 "Node consistency failure"
    6163            0 :             )))
    6164              :         } else {
    6165            0 :             Ok(())
    6166              :         };
    6167              : 
    6168            0 :         let mut persistent_shards = self.persistence.load_active_tenant_shards().await?;
    6169            0 :         persistent_shards
    6170            0 :             .sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    6171            0 : 
    6172            0 :         expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    6173              : 
    6174              :         // Because JSON contents of persistent tenants might disagree with the fields in current `TenantConfig`
    6175              :         // definition, we will do an encode/decode cycle to ensure any legacy fields are dropped and any new
    6176              :         // fields are added, before doing a comparison.
    6177            0 :         for tsp in &mut persistent_shards {
    6178            0 :             let config: TenantConfig = serde_json::from_str(&tsp.config)
    6179            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?;
    6180            0 :             tsp.config = serde_json::to_string(&config).expect("Encoding config is infallible");
    6181              :         }
    6182              : 
    6183            0 :         if persistent_shards != expect_shards {
    6184            0 :             tracing::error!("Consistency check failed on shards.");
    6185              : 
    6186            0 :             tracing::error!(
    6187            0 :                 "Shards in memory: {}",
    6188            0 :                 serde_json::to_string(&expect_shards)
    6189            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6190              :             );
    6191            0 :             tracing::error!(
    6192            0 :                 "Shards in database: {}",
    6193            0 :                 serde_json::to_string(&persistent_shards)
    6194            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6195              :             );
    6196              : 
    6197              :             // The total dump log lines above are useful in testing but in the field grafana will
    6198              :             // usually just drop them because they're so large. So we also do some explicit logging
    6199              :             // of just the diffs.
    6200            0 :             let persistent_shards = persistent_shards
    6201            0 :                 .into_iter()
    6202            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    6203            0 :                 .collect::<HashMap<_, _>>();
    6204            0 :             let expect_shards = expect_shards
    6205            0 :                 .into_iter()
    6206            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    6207            0 :                 .collect::<HashMap<_, _>>();
    6208            0 :             for (tenant_shard_id, persistent_tsp) in &persistent_shards {
    6209            0 :                 match expect_shards.get(tenant_shard_id) {
    6210              :                     None => {
    6211            0 :                         tracing::error!(
    6212            0 :                             "Shard {} found in database but not in memory",
    6213              :                             tenant_shard_id
    6214              :                         );
    6215              :                     }
    6216            0 :                     Some(expect_tsp) => {
    6217            0 :                         if expect_tsp != persistent_tsp {
    6218            0 :                             tracing::error!(
    6219            0 :                                 "Shard {} is inconsistent.  In memory: {}, database has: {}",
    6220            0 :                                 tenant_shard_id,
    6221            0 :                                 serde_json::to_string(expect_tsp).unwrap(),
    6222            0 :                                 serde_json::to_string(&persistent_tsp).unwrap()
    6223              :                             );
    6224            0 :                         }
    6225              :                     }
    6226              :                 }
    6227              :             }
    6228              : 
    6229              :             // Having already logged any differences, log any shards that simply aren't present in the database
    6230            0 :             for (tenant_shard_id, memory_tsp) in &expect_shards {
    6231            0 :                 if !persistent_shards.contains_key(tenant_shard_id) {
    6232            0 :                     tracing::error!(
    6233            0 :                         "Shard {} found in memory but not in database: {}",
    6234            0 :                         tenant_shard_id,
    6235            0 :                         serde_json::to_string(memory_tsp)
    6236            0 :                             .map_err(|e| ApiError::InternalServerError(e.into()))?
    6237              :                     );
    6238            0 :                 }
    6239              :             }
    6240              : 
    6241            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6242            0 :                 "Shard consistency failure"
    6243            0 :             )));
    6244            0 :         }
    6245            0 : 
    6246            0 :         node_result
    6247            0 :     }
    6248              : 
    6249              :     /// For debug/support: a JSON dump of the [`Scheduler`].  Returns a response so that
    6250              :     /// we don't have to make TenantShard clonable in the return path.
    6251            0 :     pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    6252            0 :         let serialized = {
    6253            0 :             let locked = self.inner.read().unwrap();
    6254            0 :             serde_json::to_string(&locked.scheduler)
    6255            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?
    6256              :         };
    6257              : 
    6258            0 :         hyper::Response::builder()
    6259            0 :             .status(hyper::StatusCode::OK)
    6260            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    6261            0 :             .body(hyper::Body::from(serialized))
    6262            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    6263            0 :     }
    6264              : 
    6265              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    6266              :     /// detaching or deleting it on pageservers.  We do not try and re-schedule any
    6267              :     /// tenants that were on this node.
    6268            0 :     pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
    6269            0 :         self.persistence.delete_node(node_id).await?;
    6270              : 
    6271            0 :         let mut locked = self.inner.write().unwrap();
    6272              : 
    6273            0 :         for shard in locked.tenants.values_mut() {
    6274            0 :             shard.deref_node(node_id);
    6275            0 :             shard.observed.locations.remove(&node_id);
    6276            0 :         }
    6277              : 
    6278            0 :         let mut nodes = (*locked.nodes).clone();
    6279            0 :         nodes.remove(&node_id);
    6280            0 :         locked.nodes = Arc::new(nodes);
    6281            0 :         metrics::METRICS_REGISTRY
    6282            0 :             .metrics_group
    6283            0 :             .storage_controller_pageserver_nodes
    6284            0 :             .set(locked.nodes.len() as i64);
    6285            0 :         metrics::METRICS_REGISTRY
    6286            0 :             .metrics_group
    6287            0 :             .storage_controller_https_pageserver_nodes
    6288            0 :             .set(locked.nodes.values().filter(|n| n.has_https_port()).count() as i64);
    6289            0 : 
    6290            0 :         locked.scheduler.node_remove(node_id);
    6291            0 : 
    6292            0 :         Ok(())
    6293            0 :     }
    6294              : 
    6295              :     /// If a node has any work on it, it will be rescheduled: this is "clean" in the sense
    6296              :     /// that we don't leave any bad state behind in the storage controller, but unclean
    6297              :     /// in the sense that we are not carefully draining the node.
    6298            0 :     pub(crate) async fn node_delete(&self, node_id: NodeId) -> Result<(), ApiError> {
    6299            0 :         let _node_lock =
    6300            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Delete).await;
    6301              : 
    6302              :         // 1. Atomically update in-memory state:
    6303              :         //    - set the scheduling state to Pause to make subsequent scheduling ops skip it
    6304              :         //    - update shards' intents to exclude the node, and reschedule any shards whose intents we modified.
    6305              :         //    - drop the node from the main nodes map, so that when running reconciles complete they do not
    6306              :         //      re-insert references to this node into the ObservedState of shards
    6307              :         //    - drop the node from the scheduler
    6308              :         {
    6309            0 :             let mut locked = self.inner.write().unwrap();
    6310            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6311            0 : 
    6312            0 :             {
    6313            0 :                 let mut nodes_mut = (*nodes).deref().clone();
    6314            0 :                 match nodes_mut.get_mut(&node_id) {
    6315            0 :                     Some(node) => {
    6316            0 :                         // We do not bother setting this in the database, because we're about to delete the row anyway, and
    6317            0 :                         // if we crash it would not be desirable to leave the node paused after a restart.
    6318            0 :                         node.set_scheduling(NodeSchedulingPolicy::Pause);
    6319            0 :                     }
    6320              :                     None => {
    6321            0 :                         tracing::info!(
    6322            0 :                             "Node not found: presuming this is a retry and returning success"
    6323              :                         );
    6324            0 :                         return Ok(());
    6325              :                     }
    6326              :                 }
    6327              : 
    6328            0 :                 *nodes = Arc::new(nodes_mut);
    6329              :             }
    6330              : 
    6331            0 :             for (_tenant_id, mut schedule_context, shards) in
    6332            0 :                 TenantShardContextIterator::new(tenants, ScheduleMode::Normal)
    6333              :             {
    6334            0 :                 for shard in shards {
    6335            0 :                     if shard.deref_node(node_id) {
    6336            0 :                         if let Err(e) = shard.schedule(scheduler, &mut schedule_context) {
    6337              :                             // TODO: implement force flag to remove a node even if we can't reschedule
    6338              :                             // a tenant
    6339            0 :                             tracing::error!(
    6340            0 :                                 "Refusing to delete node, shard {} can't be rescheduled: {e}",
    6341              :                                 shard.tenant_shard_id
    6342              :                             );
    6343            0 :                             return Err(e.into());
    6344              :                         } else {
    6345            0 :                             tracing::info!(
    6346            0 :                                 "Rescheduled shard {} away from node during deletion",
    6347              :                                 shard.tenant_shard_id
    6348              :                             )
    6349              :                         }
    6350              : 
    6351            0 :                         self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    6352            0 :                     }
    6353              : 
    6354              :                     // Here we remove an existing observed location for the node we're removing, and it will
    6355              :                     // not be re-added by a reconciler's completion because we filter out removed nodes in
    6356              :                     // process_result.
    6357              :                     //
    6358              :                     // Note that we update the shard's observed state _after_ calling maybe_reconcile_shard: that
    6359              :                     // means any reconciles we spawned will know about the node we're deleting, enabling them
    6360              :                     // to do live migrations if it's still online.
    6361            0 :                     shard.observed.locations.remove(&node_id);
    6362              :                 }
    6363              :             }
    6364              : 
    6365            0 :             scheduler.node_remove(node_id);
    6366            0 : 
    6367            0 :             {
    6368            0 :                 let mut nodes_mut = (**nodes).clone();
    6369            0 :                 if let Some(mut removed_node) = nodes_mut.remove(&node_id) {
    6370            0 :                     // Ensure that any reconciler holding an Arc<> to this node will
    6371            0 :                     // drop out when trying to RPC to it (setting Offline state sets the
    6372            0 :                     // cancellation token on the Node object).
    6373            0 :                     removed_node.set_availability(NodeAvailability::Offline);
    6374            0 :                 }
    6375            0 :                 *nodes = Arc::new(nodes_mut);
    6376            0 :                 metrics::METRICS_REGISTRY
    6377            0 :                     .metrics_group
    6378            0 :                     .storage_controller_pageserver_nodes
    6379            0 :                     .set(nodes.len() as i64);
    6380            0 :                 metrics::METRICS_REGISTRY
    6381            0 :                     .metrics_group
    6382            0 :                     .storage_controller_https_pageserver_nodes
    6383            0 :                     .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    6384            0 :             }
    6385            0 :         }
    6386            0 : 
    6387            0 :         // Note: some `generation_pageserver` columns on tenant shards in the database may still refer to
    6388            0 :         // the removed node, as this column means "The pageserver to which this generation was issued", and
    6389            0 :         // their generations won't get updated until the reconcilers moving them away from this node complete.
    6390            0 :         // That is safe because in Service::spawn we only use generation_pageserver if it refers to a node
    6391            0 :         // that exists.
    6392            0 : 
    6393            0 :         // 2. Actually delete the node from the database and from in-memory state
    6394            0 :         tracing::info!("Deleting node from database");
    6395            0 :         self.persistence.delete_node(node_id).await?;
    6396              : 
    6397            0 :         Ok(())
    6398            0 :     }
    6399              : 
    6400            0 :     pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
    6401            0 :         let nodes = {
    6402            0 :             self.inner
    6403            0 :                 .read()
    6404            0 :                 .unwrap()
    6405            0 :                 .nodes
    6406            0 :                 .values()
    6407            0 :                 .cloned()
    6408            0 :                 .collect::<Vec<_>>()
    6409            0 :         };
    6410            0 : 
    6411            0 :         Ok(nodes)
    6412            0 :     }
    6413              : 
    6414            0 :     pub(crate) async fn get_node(&self, node_id: NodeId) -> Result<Node, ApiError> {
    6415            0 :         self.inner
    6416            0 :             .read()
    6417            0 :             .unwrap()
    6418            0 :             .nodes
    6419            0 :             .get(&node_id)
    6420            0 :             .cloned()
    6421            0 :             .ok_or(ApiError::NotFound(
    6422            0 :                 format!("Node {node_id} not registered").into(),
    6423            0 :             ))
    6424            0 :     }
    6425              : 
    6426            0 :     pub(crate) async fn get_node_shards(
    6427            0 :         &self,
    6428            0 :         node_id: NodeId,
    6429            0 :     ) -> Result<NodeShardResponse, ApiError> {
    6430            0 :         let locked = self.inner.read().unwrap();
    6431            0 :         let mut shards = Vec::new();
    6432            0 :         for (tid, tenant) in locked.tenants.iter() {
    6433            0 :             let is_intended_secondary = match (
    6434            0 :                 tenant.intent.get_attached() == &Some(node_id),
    6435            0 :                 tenant.intent.get_secondary().contains(&node_id),
    6436            0 :             ) {
    6437              :                 (true, true) => {
    6438            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6439            0 :                         "{} attached as primary+secondary on the same node",
    6440            0 :                         tid
    6441            0 :                     )));
    6442              :                 }
    6443            0 :                 (true, false) => Some(false),
    6444            0 :                 (false, true) => Some(true),
    6445            0 :                 (false, false) => None,
    6446              :             };
    6447            0 :             let is_observed_secondary = if let Some(ObservedStateLocation { conf: Some(conf) }) =
    6448            0 :                 tenant.observed.locations.get(&node_id)
    6449              :             {
    6450            0 :                 Some(conf.secondary_conf.is_some())
    6451              :             } else {
    6452            0 :                 None
    6453              :             };
    6454            0 :             if is_intended_secondary.is_some() || is_observed_secondary.is_some() {
    6455            0 :                 shards.push(NodeShard {
    6456            0 :                     tenant_shard_id: *tid,
    6457            0 :                     is_intended_secondary,
    6458            0 :                     is_observed_secondary,
    6459            0 :                 });
    6460            0 :             }
    6461              :         }
    6462            0 :         Ok(NodeShardResponse { node_id, shards })
    6463            0 :     }
    6464              : 
    6465            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
    6466            0 :         self.persistence.get_leader().await
    6467            0 :     }
    6468              : 
    6469            0 :     pub(crate) async fn node_register(
    6470            0 :         &self,
    6471            0 :         register_req: NodeRegisterRequest,
    6472            0 :     ) -> Result<(), ApiError> {
    6473            0 :         let _node_lock = trace_exclusive_lock(
    6474            0 :             &self.node_op_locks,
    6475            0 :             register_req.node_id,
    6476            0 :             NodeOperations::Register,
    6477            0 :         )
    6478            0 :         .await;
    6479              : 
    6480              :         #[derive(PartialEq)]
    6481              :         enum RegistrationStatus {
    6482              :             UpToDate,
    6483              :             NeedUpdate,
    6484              :             Mismatched,
    6485              :             New,
    6486              :         }
    6487              : 
    6488            0 :         let registration_status = {
    6489            0 :             let locked = self.inner.read().unwrap();
    6490            0 :             if let Some(node) = locked.nodes.get(&register_req.node_id) {
    6491            0 :                 if node.registration_match(&register_req) {
    6492            0 :                     if node.need_update(&register_req) {
    6493            0 :                         RegistrationStatus::NeedUpdate
    6494              :                     } else {
    6495            0 :                         RegistrationStatus::UpToDate
    6496              :                     }
    6497              :                 } else {
    6498            0 :                     RegistrationStatus::Mismatched
    6499              :                 }
    6500              :             } else {
    6501            0 :                 RegistrationStatus::New
    6502              :             }
    6503              :         };
    6504              : 
    6505            0 :         match registration_status {
    6506              :             RegistrationStatus::UpToDate => {
    6507            0 :                 tracing::info!(
    6508            0 :                     "Node {} re-registered with matching address and is up to date",
    6509              :                     register_req.node_id
    6510              :                 );
    6511              : 
    6512            0 :                 return Ok(());
    6513              :             }
    6514              :             RegistrationStatus::Mismatched => {
    6515              :                 // TODO: decide if we want to allow modifying node addresses without removing and re-adding
    6516              :                 // the node.  Safest/simplest thing is to refuse it, and usually we deploy with
    6517              :                 // a fixed address through the lifetime of a node.
    6518            0 :                 tracing::warn!(
    6519            0 :                     "Node {} tried to register with different address",
    6520              :                     register_req.node_id
    6521              :                 );
    6522            0 :                 return Err(ApiError::Conflict(
    6523            0 :                     "Node is already registered with different address".to_string(),
    6524            0 :                 ));
    6525              :             }
    6526            0 :             RegistrationStatus::New | RegistrationStatus::NeedUpdate => {
    6527            0 :                 // fallthrough
    6528            0 :             }
    6529            0 :         }
    6530            0 : 
    6531            0 :         // We do not require that a node is actually online when registered (it will start life
    6532            0 :         // with it's  availability set to Offline), but we _do_ require that its DNS record exists. We're
    6533            0 :         // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
    6534            0 :         // that register themselves with a broken DNS config.  We check only the HTTP hostname, because
    6535            0 :         // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
    6536            0 :         if tokio::net::lookup_host(format!(
    6537            0 :             "{}:{}",
    6538            0 :             register_req.listen_http_addr, register_req.listen_http_port
    6539            0 :         ))
    6540            0 :         .await
    6541            0 :         .is_err()
    6542              :         {
    6543              :             // If we have a transient DNS issue, it's up to the caller to retry their registration.  Because
    6544              :             // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
    6545              :             // we return a soft 503 error, to encourage callers to retry past transient issues.
    6546            0 :             return Err(ApiError::ResourceUnavailable(
    6547            0 :                 format!(
    6548            0 :                     "Node {} tried to register with unknown DNS name '{}'",
    6549            0 :                     register_req.node_id, register_req.listen_http_addr
    6550            0 :                 )
    6551            0 :                 .into(),
    6552            0 :             ));
    6553            0 :         }
    6554            0 : 
    6555            0 :         if self.config.use_https_pageserver_api && register_req.listen_https_port.is_none() {
    6556            0 :             return Err(ApiError::PreconditionFailed(
    6557            0 :                 format!(
    6558            0 :                     "Node {} has no https port, but use_https is enabled",
    6559            0 :                     register_req.node_id
    6560            0 :                 )
    6561            0 :                 .into(),
    6562            0 :             ));
    6563            0 :         }
    6564            0 : 
    6565            0 :         // Ordering: we must persist the new node _before_ adding it to in-memory state.
    6566            0 :         // This ensures that before we use it for anything or expose it via any external
    6567            0 :         // API, it is guaranteed to be available after a restart.
    6568            0 :         let new_node = Node::new(
    6569            0 :             register_req.node_id,
    6570            0 :             register_req.listen_http_addr,
    6571            0 :             register_req.listen_http_port,
    6572            0 :             register_req.listen_https_port,
    6573            0 :             register_req.listen_pg_addr,
    6574            0 :             register_req.listen_pg_port,
    6575            0 :             register_req.availability_zone_id.clone(),
    6576            0 :             self.config.use_https_pageserver_api,
    6577            0 :         );
    6578            0 :         let new_node = match new_node {
    6579            0 :             Ok(new_node) => new_node,
    6580            0 :             Err(error) => return Err(ApiError::InternalServerError(error)),
    6581              :         };
    6582              : 
    6583            0 :         match registration_status {
    6584            0 :             RegistrationStatus::New => self.persistence.insert_node(&new_node).await?,
    6585              :             RegistrationStatus::NeedUpdate => {
    6586            0 :                 self.persistence
    6587            0 :                     .update_node_on_registration(
    6588            0 :                         register_req.node_id,
    6589            0 :                         register_req.listen_https_port,
    6590            0 :                     )
    6591            0 :                     .await?
    6592              :             }
    6593            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    6594              :         }
    6595              : 
    6596            0 :         let mut locked = self.inner.write().unwrap();
    6597            0 :         let mut new_nodes = (*locked.nodes).clone();
    6598            0 : 
    6599            0 :         locked.scheduler.node_upsert(&new_node);
    6600            0 :         new_nodes.insert(register_req.node_id, new_node);
    6601            0 : 
    6602            0 :         locked.nodes = Arc::new(new_nodes);
    6603            0 : 
    6604            0 :         metrics::METRICS_REGISTRY
    6605            0 :             .metrics_group
    6606            0 :             .storage_controller_pageserver_nodes
    6607            0 :             .set(locked.nodes.len() as i64);
    6608            0 :         metrics::METRICS_REGISTRY
    6609            0 :             .metrics_group
    6610            0 :             .storage_controller_https_pageserver_nodes
    6611            0 :             .set(locked.nodes.values().filter(|n| n.has_https_port()).count() as i64);
    6612            0 : 
    6613            0 :         match registration_status {
    6614              :             RegistrationStatus::New => {
    6615            0 :                 tracing::info!(
    6616            0 :                     "Registered pageserver {} ({}), now have {} pageservers",
    6617            0 :                     register_req.node_id,
    6618            0 :                     register_req.availability_zone_id,
    6619            0 :                     locked.nodes.len()
    6620              :                 );
    6621              :             }
    6622              :             RegistrationStatus::NeedUpdate => {
    6623            0 :                 tracing::info!(
    6624            0 :                     "Re-registered and updated node {} ({})",
    6625              :                     register_req.node_id,
    6626              :                     register_req.availability_zone_id,
    6627              :                 );
    6628              :             }
    6629            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    6630              :         }
    6631            0 :         Ok(())
    6632            0 :     }
    6633              : 
    6634              :     /// Configure in-memory and persistent state of a node as requested
    6635              :     ///
    6636              :     /// Note that this function does not trigger any immediate side effects in response
    6637              :     /// to the changes. That part is handled by [`Self::handle_node_availability_transition`].
    6638            0 :     async fn node_state_configure(
    6639            0 :         &self,
    6640            0 :         node_id: NodeId,
    6641            0 :         availability: Option<NodeAvailability>,
    6642            0 :         scheduling: Option<NodeSchedulingPolicy>,
    6643            0 :         node_lock: &TracingExclusiveGuard<NodeOperations>,
    6644            0 :     ) -> Result<AvailabilityTransition, ApiError> {
    6645            0 :         if let Some(scheduling) = scheduling {
    6646              :             // Scheduling is a persistent part of Node: we must write updates to the database before
    6647              :             // applying them in memory
    6648            0 :             self.persistence
    6649            0 :                 .update_node_scheduling_policy(node_id, scheduling)
    6650            0 :                 .await?;
    6651            0 :         }
    6652              : 
    6653              :         // If we're activating a node, then before setting it active we must reconcile any shard locations
    6654              :         // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
    6655              :         // by calling [`Self::node_activate_reconcile`]
    6656              :         //
    6657              :         // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
    6658              :         // nothing else can mutate its availability while we run.
    6659            0 :         let availability_transition = if let Some(input_availability) = availability.as_ref() {
    6660            0 :             let (activate_node, availability_transition) = {
    6661            0 :                 let locked = self.inner.read().unwrap();
    6662            0 :                 let Some(node) = locked.nodes.get(&node_id) else {
    6663            0 :                     return Err(ApiError::NotFound(
    6664            0 :                         anyhow::anyhow!("Node {} not registered", node_id).into(),
    6665            0 :                     ));
    6666              :                 };
    6667              : 
    6668            0 :                 (
    6669            0 :                     node.clone(),
    6670            0 :                     node.get_availability_transition(input_availability),
    6671            0 :                 )
    6672              :             };
    6673              : 
    6674            0 :             if matches!(availability_transition, AvailabilityTransition::ToActive) {
    6675            0 :                 self.node_activate_reconcile(activate_node, node_lock)
    6676            0 :                     .await?;
    6677            0 :             }
    6678            0 :             availability_transition
    6679              :         } else {
    6680            0 :             AvailabilityTransition::Unchanged
    6681              :         };
    6682              : 
    6683              :         // Apply changes from the request to our in-memory state for the Node
    6684            0 :         let mut locked = self.inner.write().unwrap();
    6685            0 :         let (nodes, _tenants, scheduler) = locked.parts_mut();
    6686            0 : 
    6687            0 :         let mut new_nodes = (**nodes).clone();
    6688              : 
    6689            0 :         let Some(node) = new_nodes.get_mut(&node_id) else {
    6690            0 :             return Err(ApiError::NotFound(
    6691            0 :                 anyhow::anyhow!("Node not registered").into(),
    6692            0 :             ));
    6693              :         };
    6694              : 
    6695            0 :         if let Some(availability) = availability {
    6696            0 :             node.set_availability(availability);
    6697            0 :         }
    6698              : 
    6699            0 :         if let Some(scheduling) = scheduling {
    6700            0 :             node.set_scheduling(scheduling);
    6701            0 :         }
    6702              : 
    6703              :         // Update the scheduler, in case the elegibility of the node for new shards has changed
    6704            0 :         scheduler.node_upsert(node);
    6705            0 : 
    6706            0 :         let new_nodes = Arc::new(new_nodes);
    6707            0 :         locked.nodes = new_nodes;
    6708            0 : 
    6709            0 :         Ok(availability_transition)
    6710            0 :     }
    6711              : 
    6712              :     /// Handle availability transition of one node
    6713              :     ///
    6714              :     /// Note that you should first call [`Self::node_state_configure`] to update
    6715              :     /// the in-memory state referencing that node. If you need to handle more than one transition
    6716              :     /// consider using [`Self::handle_node_availability_transitions`].
    6717            0 :     async fn handle_node_availability_transition(
    6718            0 :         &self,
    6719            0 :         node_id: NodeId,
    6720            0 :         transition: AvailabilityTransition,
    6721            0 :         _node_lock: &TracingExclusiveGuard<NodeOperations>,
    6722            0 :     ) -> Result<(), ApiError> {
    6723            0 :         // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
    6724            0 :         match transition {
    6725              :             AvailabilityTransition::ToOffline => {
    6726            0 :                 tracing::info!("Node {} transition to offline", node_id);
    6727              : 
    6728            0 :                 let mut locked = self.inner.write().unwrap();
    6729            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    6730            0 : 
    6731            0 :                 let mut tenants_affected: usize = 0;
    6732              : 
    6733            0 :                 for (_tenant_id, mut schedule_context, shards) in
    6734            0 :                     TenantShardContextIterator::new(tenants, ScheduleMode::Normal)
    6735              :                 {
    6736            0 :                     for tenant_shard in shards {
    6737            0 :                         let tenant_shard_id = tenant_shard.tenant_shard_id;
    6738            0 :                         if let Some(observed_loc) =
    6739            0 :                             tenant_shard.observed.locations.get_mut(&node_id)
    6740            0 :                         {
    6741            0 :                             // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
    6742            0 :                             // not assume our knowledge of the node's configuration is accurate until it comes back online
    6743            0 :                             observed_loc.conf = None;
    6744            0 :                         }
    6745              : 
    6746            0 :                         if nodes.len() == 1 {
    6747              :                             // Special case for single-node cluster: there is no point trying to reschedule
    6748              :                             // any tenant shards: avoid doing so, in order to avoid spewing warnings about
    6749              :                             // failures to schedule them.
    6750            0 :                             continue;
    6751            0 :                         }
    6752            0 : 
    6753            0 :                         if !nodes
    6754            0 :                             .values()
    6755            0 :                             .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    6756              :                         {
    6757              :                             // Special case for when all nodes are unavailable and/or unschedulable: there is no point
    6758              :                             // trying to reschedule since there's nowhere else to go. Without this
    6759              :                             // branch we incorrectly detach tenants in response to node unavailability.
    6760            0 :                             continue;
    6761            0 :                         }
    6762            0 : 
    6763            0 :                         if tenant_shard.intent.demote_attached(scheduler, node_id) {
    6764            0 :                             tenant_shard.sequence = tenant_shard.sequence.next();
    6765            0 : 
    6766            0 :                             match tenant_shard.schedule(scheduler, &mut schedule_context) {
    6767            0 :                                 Err(e) => {
    6768            0 :                                     // It is possible that some tenants will become unschedulable when too many pageservers
    6769            0 :                                     // go offline: in this case there isn't much we can do other than make the issue observable.
    6770            0 :                                     // TODO: give TenantShard a scheduling error attribute to be queried later.
    6771            0 :                                     tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
    6772              :                                 }
    6773              :                                 Ok(()) => {
    6774            0 :                                     if self
    6775            0 :                                         .maybe_reconcile_shard(
    6776            0 :                                             tenant_shard,
    6777            0 :                                             nodes,
    6778            0 :                                             ReconcilerPriority::Normal,
    6779            0 :                                         )
    6780            0 :                                         .is_some()
    6781            0 :                                     {
    6782            0 :                                         tenants_affected += 1;
    6783            0 :                                     };
    6784              :                                 }
    6785              :                             }
    6786            0 :                         }
    6787              :                     }
    6788              :                 }
    6789            0 :                 tracing::info!(
    6790            0 :                     "Launched {} reconciler tasks for tenants affected by node {} going offline",
    6791              :                     tenants_affected,
    6792              :                     node_id
    6793              :                 )
    6794              :             }
    6795              :             AvailabilityTransition::ToActive => {
    6796            0 :                 tracing::info!("Node {} transition to active", node_id);
    6797              : 
    6798            0 :                 let mut locked = self.inner.write().unwrap();
    6799            0 :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    6800              : 
    6801              :                 // When a node comes back online, we must reconcile any tenant that has a None observed
    6802              :                 // location on the node.
    6803            0 :                 for tenant_shard in tenants.values_mut() {
    6804              :                     // If a reconciliation is already in progress, rely on the previous scheduling
    6805              :                     // decision and skip triggering a new reconciliation.
    6806            0 :                     if tenant_shard.reconciler.is_some() {
    6807            0 :                         continue;
    6808            0 :                     }
    6809              : 
    6810            0 :                     if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
    6811            0 :                         if observed_loc.conf.is_none() {
    6812            0 :                             self.maybe_reconcile_shard(
    6813            0 :                                 tenant_shard,
    6814            0 :                                 nodes,
    6815            0 :                                 ReconcilerPriority::Normal,
    6816            0 :                             );
    6817            0 :                         }
    6818            0 :                     }
    6819              :                 }
    6820              : 
    6821              :                 // TODO: in the background, we should balance work back onto this pageserver
    6822              :             }
    6823              :             // No action required for the intermediate unavailable state.
    6824              :             // When we transition into active or offline from the unavailable state,
    6825              :             // the correct handling above will kick in.
    6826              :             AvailabilityTransition::ToWarmingUpFromActive => {
    6827            0 :                 tracing::info!("Node {} transition to unavailable from active", node_id);
    6828              :             }
    6829              :             AvailabilityTransition::ToWarmingUpFromOffline => {
    6830            0 :                 tracing::info!("Node {} transition to unavailable from offline", node_id);
    6831              :             }
    6832              :             AvailabilityTransition::Unchanged => {
    6833            0 :                 tracing::debug!("Node {} no availability change during config", node_id);
    6834              :             }
    6835              :         }
    6836              : 
    6837            0 :         Ok(())
    6838            0 :     }
    6839              : 
    6840              :     /// Handle availability transition for multiple nodes
    6841              :     ///
    6842              :     /// Note that you should first call [`Self::node_state_configure`] for
    6843              :     /// all nodes being handled here for the handling to use fresh in-memory state.
    6844            0 :     async fn handle_node_availability_transitions(
    6845            0 :         &self,
    6846            0 :         transitions: Vec<(
    6847            0 :             NodeId,
    6848            0 :             TracingExclusiveGuard<NodeOperations>,
    6849            0 :             AvailabilityTransition,
    6850            0 :         )>,
    6851            0 :     ) -> Result<(), Vec<(NodeId, ApiError)>> {
    6852            0 :         let mut errors = Vec::default();
    6853            0 :         for (node_id, node_lock, transition) in transitions {
    6854            0 :             let res = self
    6855            0 :                 .handle_node_availability_transition(node_id, transition, &node_lock)
    6856            0 :                 .await;
    6857            0 :             if let Err(err) = res {
    6858            0 :                 errors.push((node_id, err));
    6859            0 :             }
    6860              :         }
    6861              : 
    6862            0 :         if errors.is_empty() {
    6863            0 :             Ok(())
    6864              :         } else {
    6865            0 :             Err(errors)
    6866              :         }
    6867            0 :     }
    6868              : 
    6869            0 :     pub(crate) async fn node_configure(
    6870            0 :         &self,
    6871            0 :         node_id: NodeId,
    6872            0 :         availability: Option<NodeAvailability>,
    6873            0 :         scheduling: Option<NodeSchedulingPolicy>,
    6874            0 :     ) -> Result<(), ApiError> {
    6875            0 :         let node_lock =
    6876            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
    6877              : 
    6878            0 :         let transition = self
    6879            0 :             .node_state_configure(node_id, availability, scheduling, &node_lock)
    6880            0 :             .await?;
    6881            0 :         self.handle_node_availability_transition(node_id, transition, &node_lock)
    6882            0 :             .await
    6883            0 :     }
    6884              : 
    6885              :     /// Wrapper around [`Self::node_configure`] which only allows changes while there is no ongoing
    6886              :     /// operation for HTTP api.
    6887            0 :     pub(crate) async fn external_node_configure(
    6888            0 :         &self,
    6889            0 :         node_id: NodeId,
    6890            0 :         availability: Option<NodeAvailability>,
    6891            0 :         scheduling: Option<NodeSchedulingPolicy>,
    6892            0 :     ) -> Result<(), ApiError> {
    6893            0 :         {
    6894            0 :             let locked = self.inner.read().unwrap();
    6895            0 :             if let Some(op) = locked.ongoing_operation.as_ref().map(|op| op.operation) {
    6896            0 :                 return Err(ApiError::PreconditionFailed(
    6897            0 :                     format!("Ongoing background operation forbids configuring: {op}").into(),
    6898            0 :                 ));
    6899            0 :             }
    6900            0 :         }
    6901            0 : 
    6902            0 :         self.node_configure(node_id, availability, scheduling).await
    6903            0 :     }
    6904              : 
    6905            0 :     pub(crate) async fn start_node_drain(
    6906            0 :         self: &Arc<Self>,
    6907            0 :         node_id: NodeId,
    6908            0 :     ) -> Result<(), ApiError> {
    6909            0 :         let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = {
    6910            0 :             let locked = self.inner.read().unwrap();
    6911            0 :             let nodes = &locked.nodes;
    6912            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    6913            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    6914            0 :             ))?;
    6915            0 :             let schedulable_nodes_count = nodes
    6916            0 :                 .iter()
    6917            0 :                 .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    6918            0 :                 .count();
    6919            0 : 
    6920            0 :             (
    6921            0 :                 locked
    6922            0 :                     .ongoing_operation
    6923            0 :                     .as_ref()
    6924            0 :                     .map(|ongoing| ongoing.operation),
    6925            0 :                 node.is_available(),
    6926            0 :                 node.get_scheduling(),
    6927            0 :                 schedulable_nodes_count,
    6928            0 :             )
    6929            0 :         };
    6930              : 
    6931            0 :         if let Some(ongoing) = ongoing_op {
    6932            0 :             return Err(ApiError::PreconditionFailed(
    6933            0 :                 format!("Background operation already ongoing for node: {}", ongoing).into(),
    6934            0 :             ));
    6935            0 :         }
    6936            0 : 
    6937            0 :         if !node_available {
    6938            0 :             return Err(ApiError::ResourceUnavailable(
    6939            0 :                 format!("Node {node_id} is currently unavailable").into(),
    6940            0 :             ));
    6941            0 :         }
    6942            0 : 
    6943            0 :         if schedulable_nodes_count == 0 {
    6944            0 :             return Err(ApiError::PreconditionFailed(
    6945            0 :                 "No other schedulable nodes to drain to".into(),
    6946            0 :             ));
    6947            0 :         }
    6948            0 : 
    6949            0 :         match node_policy {
    6950              :             NodeSchedulingPolicy::Active => {
    6951            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining))
    6952            0 :                     .await?;
    6953              : 
    6954            0 :                 let cancel = self.cancel.child_token();
    6955            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    6956              : 
    6957            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    6958            0 :                     operation: Operation::Drain(Drain { node_id }),
    6959            0 :                     cancel: cancel.clone(),
    6960            0 :                 });
    6961              : 
    6962            0 :                 let span = tracing::info_span!(parent: None, "drain_node", %node_id);
    6963              : 
    6964            0 :                 tokio::task::spawn({
    6965            0 :                     let service = self.clone();
    6966            0 :                     let cancel = cancel.clone();
    6967            0 :                     async move {
    6968            0 :                         let _gate_guard = gate_guard;
    6969            0 : 
    6970            0 :                         scopeguard::defer! {
    6971            0 :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    6972            0 : 
    6973            0 :                             if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) {
    6974            0 :                                 assert_eq!(removed_drain.node_id, node_id, "We always take the same operation");
    6975            0 :                             } else {
    6976            0 :                                 panic!("We always remove the same operation")
    6977            0 :                             }
    6978            0 :                         }
    6979            0 : 
    6980            0 :                         tracing::info!("Drain background operation starting");
    6981            0 :                         let res = service.drain_node(node_id, cancel).await;
    6982            0 :                         match res {
    6983              :                             Ok(()) => {
    6984            0 :                                 tracing::info!("Drain background operation completed successfully");
    6985              :                             }
    6986              :                             Err(OperationError::Cancelled) => {
    6987            0 :                                 tracing::info!("Drain background operation was cancelled");
    6988              :                             }
    6989            0 :                             Err(err) => {
    6990            0 :                                 tracing::error!("Drain background operation encountered: {err}")
    6991              :                             }
    6992              :                         }
    6993            0 :                     }
    6994            0 :                 }.instrument(span));
    6995            0 :             }
    6996              :             NodeSchedulingPolicy::Draining => {
    6997            0 :                 return Err(ApiError::Conflict(format!(
    6998            0 :                     "Node {node_id} has drain in progress"
    6999            0 :                 )));
    7000              :             }
    7001            0 :             policy => {
    7002            0 :                 return Err(ApiError::PreconditionFailed(
    7003            0 :                     format!("Node {node_id} cannot be drained due to {policy:?} policy").into(),
    7004            0 :                 ));
    7005              :             }
    7006              :         }
    7007              : 
    7008            0 :         Ok(())
    7009            0 :     }
    7010              : 
    7011            0 :     pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> {
    7012            0 :         let node_available = {
    7013            0 :             let locked = self.inner.read().unwrap();
    7014            0 :             let nodes = &locked.nodes;
    7015            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    7016            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    7017            0 :             ))?;
    7018              : 
    7019            0 :             node.is_available()
    7020            0 :         };
    7021            0 : 
    7022            0 :         if !node_available {
    7023            0 :             return Err(ApiError::ResourceUnavailable(
    7024            0 :                 format!("Node {node_id} is currently unavailable").into(),
    7025            0 :             ));
    7026            0 :         }
    7027              : 
    7028            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    7029            0 :             if let Operation::Drain(drain) = op_handler.operation {
    7030            0 :                 if drain.node_id == node_id {
    7031            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    7032            0 :                     op_handler.cancel.cancel();
    7033            0 :                     return Ok(());
    7034            0 :                 }
    7035            0 :             }
    7036            0 :         }
    7037              : 
    7038            0 :         Err(ApiError::PreconditionFailed(
    7039            0 :             format!("Node {node_id} has no drain in progress").into(),
    7040            0 :         ))
    7041            0 :     }
    7042              : 
    7043            0 :     pub(crate) async fn start_node_fill(self: &Arc<Self>, node_id: NodeId) -> Result<(), ApiError> {
    7044            0 :         let (ongoing_op, node_available, node_policy, total_nodes_count) = {
    7045            0 :             let locked = self.inner.read().unwrap();
    7046            0 :             let nodes = &locked.nodes;
    7047            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    7048            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    7049            0 :             ))?;
    7050              : 
    7051            0 :             (
    7052            0 :                 locked
    7053            0 :                     .ongoing_operation
    7054            0 :                     .as_ref()
    7055            0 :                     .map(|ongoing| ongoing.operation),
    7056            0 :                 node.is_available(),
    7057            0 :                 node.get_scheduling(),
    7058            0 :                 nodes.len(),
    7059            0 :             )
    7060            0 :         };
    7061              : 
    7062            0 :         if let Some(ongoing) = ongoing_op {
    7063            0 :             return Err(ApiError::PreconditionFailed(
    7064            0 :                 format!("Background operation already ongoing for node: {}", ongoing).into(),
    7065            0 :             ));
    7066            0 :         }
    7067            0 : 
    7068            0 :         if !node_available {
    7069            0 :             return Err(ApiError::ResourceUnavailable(
    7070            0 :                 format!("Node {node_id} is currently unavailable").into(),
    7071            0 :             ));
    7072            0 :         }
    7073            0 : 
    7074            0 :         if total_nodes_count <= 1 {
    7075            0 :             return Err(ApiError::PreconditionFailed(
    7076            0 :                 "No other nodes to fill from".into(),
    7077            0 :             ));
    7078            0 :         }
    7079            0 : 
    7080            0 :         match node_policy {
    7081              :             NodeSchedulingPolicy::Active => {
    7082            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling))
    7083            0 :                     .await?;
    7084              : 
    7085            0 :                 let cancel = self.cancel.child_token();
    7086            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    7087              : 
    7088            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    7089            0 :                     operation: Operation::Fill(Fill { node_id }),
    7090            0 :                     cancel: cancel.clone(),
    7091            0 :                 });
    7092              : 
    7093            0 :                 let span = tracing::info_span!(parent: None, "fill_node", %node_id);
    7094              : 
    7095            0 :                 tokio::task::spawn({
    7096            0 :                     let service = self.clone();
    7097            0 :                     let cancel = cancel.clone();
    7098            0 :                     async move {
    7099            0 :                         let _gate_guard = gate_guard;
    7100            0 : 
    7101            0 :                         scopeguard::defer! {
    7102            0 :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    7103            0 : 
    7104            0 :                             if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) {
    7105            0 :                                 assert_eq!(removed_fill.node_id, node_id, "We always take the same operation");
    7106            0 :                             } else {
    7107            0 :                                 panic!("We always remove the same operation")
    7108            0 :                             }
    7109            0 :                         }
    7110            0 : 
    7111            0 :                         tracing::info!("Fill background operation starting");
    7112            0 :                         let res = service.fill_node(node_id, cancel).await;
    7113            0 :                         match res {
    7114              :                             Ok(()) => {
    7115            0 :                                 tracing::info!("Fill background operation completed successfully");
    7116              :                             }
    7117              :                             Err(OperationError::Cancelled) => {
    7118            0 :                                 tracing::info!("Fill background operation was cancelled");
    7119              :                             }
    7120            0 :                             Err(err) => {
    7121            0 :                                 tracing::error!("Fill background operation encountered: {err}")
    7122              :                             }
    7123              :                         }
    7124            0 :                     }
    7125            0 :                 }.instrument(span));
    7126            0 :             }
    7127              :             NodeSchedulingPolicy::Filling => {
    7128            0 :                 return Err(ApiError::Conflict(format!(
    7129            0 :                     "Node {node_id} has fill in progress"
    7130            0 :                 )));
    7131              :             }
    7132            0 :             policy => {
    7133            0 :                 return Err(ApiError::PreconditionFailed(
    7134            0 :                     format!("Node {node_id} cannot be filled due to {policy:?} policy").into(),
    7135            0 :                 ));
    7136              :             }
    7137              :         }
    7138              : 
    7139            0 :         Ok(())
    7140            0 :     }
    7141              : 
    7142            0 :     pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> {
    7143            0 :         let node_available = {
    7144            0 :             let locked = self.inner.read().unwrap();
    7145            0 :             let nodes = &locked.nodes;
    7146            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    7147            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    7148            0 :             ))?;
    7149              : 
    7150            0 :             node.is_available()
    7151            0 :         };
    7152            0 : 
    7153            0 :         if !node_available {
    7154            0 :             return Err(ApiError::ResourceUnavailable(
    7155            0 :                 format!("Node {node_id} is currently unavailable").into(),
    7156            0 :             ));
    7157            0 :         }
    7158              : 
    7159            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    7160            0 :             if let Operation::Fill(fill) = op_handler.operation {
    7161            0 :                 if fill.node_id == node_id {
    7162            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    7163            0 :                     op_handler.cancel.cancel();
    7164            0 :                     return Ok(());
    7165            0 :                 }
    7166            0 :             }
    7167            0 :         }
    7168              : 
    7169            0 :         Err(ApiError::PreconditionFailed(
    7170            0 :             format!("Node {node_id} has no fill in progress").into(),
    7171            0 :         ))
    7172            0 :     }
    7173              : 
    7174              :     /// Like [`Self::maybe_configured_reconcile_shard`], but uses the default reconciler
    7175              :     /// configuration
    7176            0 :     fn maybe_reconcile_shard(
    7177            0 :         &self,
    7178            0 :         shard: &mut TenantShard,
    7179            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    7180            0 :         priority: ReconcilerPriority,
    7181            0 :     ) -> Option<ReconcilerWaiter> {
    7182            0 :         self.maybe_configured_reconcile_shard(shard, nodes, ReconcilerConfig::new(priority))
    7183            0 :     }
    7184              : 
    7185              :     /// Before constructing a Reconciler, acquire semaphore units from the appropriate concurrency limit (depends on priority)
    7186            0 :     fn get_reconciler_units(
    7187            0 :         &self,
    7188            0 :         priority: ReconcilerPriority,
    7189            0 :     ) -> Result<ReconcileUnits, TryAcquireError> {
    7190            0 :         let units = match priority {
    7191            0 :             ReconcilerPriority::Normal => self.reconciler_concurrency.clone().try_acquire_owned(),
    7192              :             ReconcilerPriority::High => {
    7193            0 :                 match self
    7194            0 :                     .priority_reconciler_concurrency
    7195            0 :                     .clone()
    7196            0 :                     .try_acquire_owned()
    7197              :                 {
    7198            0 :                     Ok(u) => Ok(u),
    7199              :                     Err(TryAcquireError::NoPermits) => {
    7200              :                         // If the high priority semaphore is exhausted, then high priority tasks may steal units from
    7201              :                         // the normal priority semaphore.
    7202            0 :                         self.reconciler_concurrency.clone().try_acquire_owned()
    7203              :                     }
    7204            0 :                     Err(e) => Err(e),
    7205              :                 }
    7206              :             }
    7207              :         };
    7208              : 
    7209            0 :         units.map(ReconcileUnits::new)
    7210            0 :     }
    7211              : 
    7212              :     /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
    7213            0 :     fn maybe_configured_reconcile_shard(
    7214            0 :         &self,
    7215            0 :         shard: &mut TenantShard,
    7216            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    7217            0 :         reconciler_config: ReconcilerConfig,
    7218            0 :     ) -> Option<ReconcilerWaiter> {
    7219            0 :         let reconcile_needed = shard.get_reconcile_needed(nodes);
    7220              : 
    7221            0 :         let reconcile_reason = match reconcile_needed {
    7222            0 :             ReconcileNeeded::No => return None,
    7223            0 :             ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
    7224            0 :             ReconcileNeeded::Yes(reason) => {
    7225            0 :                 // Fall through to try and acquire units for spawning reconciler
    7226            0 :                 reason
    7227              :             }
    7228              :         };
    7229              : 
    7230            0 :         let units = match self.get_reconciler_units(reconciler_config.priority) {
    7231            0 :             Ok(u) => u,
    7232              :             Err(_) => {
    7233            0 :                 tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
    7234            0 :                     "Concurrency limited: enqueued for reconcile later");
    7235            0 :                 if !shard.delayed_reconcile {
    7236            0 :                     match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
    7237            0 :                         Err(TrySendError::Closed(_)) => {
    7238            0 :                             // Weird mid-shutdown case?
    7239            0 :                         }
    7240              :                         Err(TrySendError::Full(_)) => {
    7241              :                             // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
    7242            0 :                             tracing::warn!(
    7243            0 :                                 "Many shards are waiting to reconcile: delayed_reconcile queue is full"
    7244              :                             );
    7245              :                         }
    7246            0 :                         Ok(()) => {
    7247            0 :                             shard.delayed_reconcile = true;
    7248            0 :                         }
    7249              :                     }
    7250            0 :                 }
    7251              : 
    7252              :                 // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
    7253              :                 // number to advance.  When this function is eventually called again and succeeds in getting units,
    7254              :                 // it will spawn a reconciler that makes this waiter complete.
    7255            0 :                 return Some(shard.future_reconcile_waiter());
    7256              :             }
    7257              :         };
    7258              : 
    7259            0 :         let Ok(gate_guard) = self.reconcilers_gate.enter() else {
    7260              :             // Gate closed: we're shutting down, drop out.
    7261            0 :             return None;
    7262              :         };
    7263              : 
    7264            0 :         shard.spawn_reconciler(
    7265            0 :             reconcile_reason,
    7266            0 :             &self.result_tx,
    7267            0 :             nodes,
    7268            0 :             &self.compute_hook,
    7269            0 :             reconciler_config,
    7270            0 :             &self.config,
    7271            0 :             &self.persistence,
    7272            0 :             units,
    7273            0 :             gate_guard,
    7274            0 :             &self.reconcilers_cancel,
    7275            0 :             self.http_client.clone(),
    7276            0 :         )
    7277            0 :     }
    7278              : 
    7279              :     /// Check all tenants for pending reconciliation work, and reconcile those in need.
    7280              :     /// Additionally, reschedule tenants that require it.
    7281              :     ///
    7282              :     /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
    7283              :     /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
    7284              :     /// available.  A return value of 0 indicates that everything is fully reconciled already.
    7285            0 :     fn reconcile_all(&self) -> usize {
    7286            0 :         let mut locked = self.inner.write().unwrap();
    7287            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    7288            0 :         let pageservers = nodes.clone();
    7289            0 : 
    7290            0 :         // This function is an efficient place to update lazy statistics, since we are walking
    7291            0 :         // all tenants.
    7292            0 :         let mut pending_reconciles = 0;
    7293            0 :         let mut az_violations = 0;
    7294            0 : 
    7295            0 :         // If we find any tenants to drop from memory, stash them to offload after
    7296            0 :         // we're done traversing the map of tenants.
    7297            0 :         let mut drop_detached_tenants = Vec::new();
    7298            0 : 
    7299            0 :         let mut reconciles_spawned = 0;
    7300            0 :         for shard in tenants.values_mut() {
    7301              :             // Accumulate scheduling statistics
    7302            0 :             if let (Some(attached), Some(preferred)) =
    7303            0 :                 (shard.intent.get_attached(), shard.preferred_az())
    7304              :             {
    7305            0 :                 let node_az = nodes
    7306            0 :                     .get(attached)
    7307            0 :                     .expect("Nodes exist if referenced")
    7308            0 :                     .get_availability_zone_id();
    7309            0 :                 if node_az != preferred {
    7310            0 :                     az_violations += 1;
    7311            0 :                 }
    7312            0 :             }
    7313              : 
    7314              :             // Skip checking if this shard is already enqueued for reconciliation
    7315            0 :             if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
    7316              :                 // If there is something delayed, then return a nonzero count so that
    7317              :                 // callers like reconcile_all_now do not incorrectly get the impression
    7318              :                 // that the system is in a quiescent state.
    7319            0 :                 reconciles_spawned = std::cmp::max(1, reconciles_spawned);
    7320            0 :                 pending_reconciles += 1;
    7321            0 :                 continue;
    7322            0 :             }
    7323            0 : 
    7324            0 :             // Eventual consistency: if an earlier reconcile job failed, and the shard is still
    7325            0 :             // dirty, spawn another one
    7326            0 :             if self
    7327            0 :                 .maybe_reconcile_shard(shard, &pageservers, ReconcilerPriority::Normal)
    7328            0 :                 .is_some()
    7329            0 :             {
    7330            0 :                 reconciles_spawned += 1;
    7331            0 :             } else if shard.delayed_reconcile {
    7332            0 :                 // Shard wanted to reconcile but for some reason couldn't.
    7333            0 :                 pending_reconciles += 1;
    7334            0 :             }
    7335              : 
    7336              :             // If this tenant is detached, try dropping it from memory. This is usually done
    7337              :             // proactively in [`Self::process_results`], but we do it here to handle the edge
    7338              :             // case where a reconcile completes while someone else is holding an op lock for the tenant.
    7339            0 :             if shard.tenant_shard_id.shard_number == ShardNumber(0)
    7340            0 :                 && shard.policy == PlacementPolicy::Detached
    7341              :             {
    7342            0 :                 if let Some(guard) = self.tenant_op_locks.try_exclusive(
    7343            0 :                     shard.tenant_shard_id.tenant_id,
    7344            0 :                     TenantOperations::DropDetached,
    7345            0 :                 ) {
    7346            0 :                     drop_detached_tenants.push((shard.tenant_shard_id.tenant_id, guard));
    7347            0 :                 }
    7348            0 :             }
    7349              :         }
    7350              : 
    7351              :         // Some metrics are calculated from SchedulerNode state, update these periodically
    7352            0 :         scheduler.update_metrics();
    7353              : 
    7354              :         // Process any deferred tenant drops
    7355            0 :         for (tenant_id, guard) in drop_detached_tenants {
    7356            0 :             self.maybe_drop_tenant(tenant_id, &mut locked, &guard);
    7357            0 :         }
    7358              : 
    7359            0 :         metrics::METRICS_REGISTRY
    7360            0 :             .metrics_group
    7361            0 :             .storage_controller_schedule_az_violation
    7362            0 :             .set(az_violations as i64);
    7363            0 : 
    7364            0 :         metrics::METRICS_REGISTRY
    7365            0 :             .metrics_group
    7366            0 :             .storage_controller_pending_reconciles
    7367            0 :             .set(pending_reconciles as i64);
    7368            0 : 
    7369            0 :         reconciles_spawned
    7370            0 :     }
    7371              : 
    7372              :     /// `optimize` in this context means identifying shards which have valid scheduled locations, but
    7373              :     /// could be scheduled somewhere better:
    7374              :     /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
    7375              :     ///    * e.g. after a node fails then recovers, to move some work back to it
    7376              :     /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
    7377              :     ///    * e.g. after a shard split, the initial attached locations will all be on the node where
    7378              :     ///      we did the split, but are probably better placed elsewhere.
    7379              :     /// - Creating new secondary locations if it improves the spreading of a sharded tenant
    7380              :     ///    * e.g. after a shard split, some locations will be on the same node (where the split
    7381              :     ///      happened), and will probably be better placed elsewhere.
    7382              :     ///
    7383              :     /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
    7384              :     /// the time of scheduling, this function looks for cases where a better-scoring location is available
    7385              :     /// according to those same soft constraints.
    7386            0 :     async fn optimize_all(&self) -> usize {
    7387              :         // Limit on how many shards' optmizations each call to this function will execute.  Combined
    7388              :         // with the frequency of background calls, this acts as an implicit rate limit that runs a small
    7389              :         // trickle of optimizations in the background, rather than executing a large number in parallel
    7390              :         // when a change occurs.
    7391              :         const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 16;
    7392              : 
    7393              :         // Synchronous prepare: scan shards for possible scheduling optimizations
    7394            0 :         let candidate_work = self.optimize_all_plan();
    7395            0 :         let candidate_work_len = candidate_work.len();
    7396              : 
    7397              :         // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
    7398            0 :         let validated_work = self.optimize_all_validate(candidate_work).await;
    7399              : 
    7400            0 :         let was_work_filtered = validated_work.len() != candidate_work_len;
    7401            0 : 
    7402            0 :         // Synchronous apply: update the shards' intent states according to validated optimisations
    7403            0 :         let mut reconciles_spawned = 0;
    7404            0 :         let mut optimizations_applied = 0;
    7405            0 :         let mut locked = self.inner.write().unwrap();
    7406            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    7407            0 :         for (tenant_shard_id, optimization) in validated_work {
    7408            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    7409              :                 // Shard was dropped between planning and execution;
    7410            0 :                 continue;
    7411              :             };
    7412            0 :             tracing::info!(tenant_shard_id=%tenant_shard_id, "Applying optimization: {optimization:?}");
    7413            0 :             if shard.apply_optimization(scheduler, optimization) {
    7414            0 :                 optimizations_applied += 1;
    7415            0 :                 if self
    7416            0 :                     .maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal)
    7417            0 :                     .is_some()
    7418            0 :                 {
    7419            0 :                     reconciles_spawned += 1;
    7420            0 :                 }
    7421            0 :             }
    7422              : 
    7423            0 :             if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
    7424            0 :                 break;
    7425            0 :             }
    7426              :         }
    7427              : 
    7428            0 :         if was_work_filtered {
    7429            0 :             // If we filtered any work out during validation, ensure we return a nonzero value to indicate
    7430            0 :             // to callers that the system is not in a truly quiet state, it's going to do some work as soon
    7431            0 :             // as these validations start passing.
    7432            0 :             reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
    7433            0 :         }
    7434              : 
    7435            0 :         reconciles_spawned
    7436            0 :     }
    7437              : 
    7438            0 :     fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
    7439              :         // How many candidate optimizations we will generate, before evaluating them for readniess: setting
    7440              :         // this higher than the execution limit gives us a chance to execute some work even if the first
    7441              :         // few optimizations we find are not ready.
    7442              :         const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 64;
    7443              : 
    7444            0 :         let mut work = Vec::new();
    7445            0 :         let mut locked = self.inner.write().unwrap();
    7446            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    7447              : 
    7448              :         // We are going to plan a bunch of optimisations before applying any of them, so the
    7449              :         // utilisation stats on nodes will be effectively stale for the >1st optimisation we
    7450              :         // generate.  To avoid this causing unstable migrations/flapping, it's important that the
    7451              :         // code in TenantShard for finding optimisations uses [`NodeAttachmentSchedulingScore::disregard_utilization`]
    7452              :         // to ignore the utilisation component of the score.
    7453              : 
    7454            0 :         for (_tenant_id, schedule_context, shards) in
    7455            0 :             TenantShardContextIterator::new(tenants, ScheduleMode::Speculative)
    7456              :         {
    7457            0 :             for shard in shards {
    7458            0 :                 if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
    7459            0 :                     break;
    7460            0 :                 }
    7461            0 :                 match shard.get_scheduling_policy() {
    7462            0 :                     ShardSchedulingPolicy::Active => {
    7463            0 :                         // Ok to do optimization
    7464            0 :                     }
    7465            0 :                     ShardSchedulingPolicy::Essential if shard.get_preferred_node().is_some() => {
    7466            0 :                         // Ok to do optimization: we are executing a graceful migration that
    7467            0 :                         // has set preferred_node
    7468            0 :                     }
    7469              :                     ShardSchedulingPolicy::Essential
    7470              :                     | ShardSchedulingPolicy::Pause
    7471              :                     | ShardSchedulingPolicy::Stop => {
    7472              :                         // Policy prevents optimizing this shard.
    7473            0 :                         continue;
    7474              :                     }
    7475              :                 }
    7476              : 
    7477            0 :                 if !matches!(shard.splitting, SplitState::Idle)
    7478            0 :                     || matches!(shard.policy, PlacementPolicy::Detached)
    7479            0 :                     || shard.reconciler.is_some()
    7480              :                 {
    7481              :                     // Do not start any optimizations while another change to the tenant is ongoing: this
    7482              :                     // is not necessary for correctness, but simplifies operations and implicitly throttles
    7483              :                     // optimization changes to happen in a "trickle" over time.
    7484            0 :                     continue;
    7485            0 :                 }
    7486            0 : 
    7487            0 :                 // Fast path: we may quickly identify shards that don't have any possible optimisations
    7488            0 :                 if !shard.maybe_optimizable(scheduler, &schedule_context) {
    7489            0 :                     if cfg!(feature = "testing") {
    7490              :                         // Check that maybe_optimizable doesn't disagree with the actual optimization functions.
    7491              :                         // Only do this in testing builds because it is not a correctness-critical check, so we shouldn't
    7492              :                         // panic in prod if we hit this, or spend cycles on it in prod.
    7493            0 :                         assert!(
    7494            0 :                             shard
    7495            0 :                                 .optimize_attachment(scheduler, &schedule_context)
    7496            0 :                                 .is_none()
    7497            0 :                         );
    7498            0 :                         assert!(
    7499            0 :                             shard
    7500            0 :                                 .optimize_secondary(scheduler, &schedule_context)
    7501            0 :                                 .is_none()
    7502            0 :                         );
    7503            0 :                     }
    7504            0 :                     continue;
    7505            0 :                 }
    7506              : 
    7507            0 :                 if let Some(optimization) =
    7508              :                     // If idle, maybe optimize attachments: if a shard has a secondary location that is preferable to
    7509              :                     // its primary location based on soft constraints, cut it over.
    7510            0 :                     shard.optimize_attachment(scheduler, &schedule_context)
    7511              :                 {
    7512            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for attachment: {optimization:?}");
    7513            0 :                     work.push((shard.tenant_shard_id, optimization));
    7514            0 :                     break;
    7515            0 :                 } else if let Some(optimization) =
    7516              :                     // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
    7517              :                     // better placed on another node, based on ScheduleContext, then adjust it.  This
    7518              :                     // covers cases like after a shard split, where we might have too many shards
    7519              :                     // in the same tenant with secondary locations on the node where they originally split.
    7520            0 :                     shard.optimize_secondary(scheduler, &schedule_context)
    7521              :                 {
    7522            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for secondary: {optimization:?}");
    7523            0 :                     work.push((shard.tenant_shard_id, optimization));
    7524            0 :                     break;
    7525            0 :                 }
    7526              :             }
    7527              :         }
    7528              : 
    7529            0 :         work
    7530            0 :     }
    7531              : 
    7532            0 :     async fn optimize_all_validate(
    7533            0 :         &self,
    7534            0 :         candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
    7535            0 :     ) -> Vec<(TenantShardId, ScheduleOptimization)> {
    7536            0 :         // Take a clone of the node map to use outside the lock in async validation phase
    7537            0 :         let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
    7538            0 : 
    7539            0 :         let mut want_secondary_status = Vec::new();
    7540            0 : 
    7541            0 :         // Validate our plans: this is an async phase where we may do I/O to pageservers to
    7542            0 :         // check that the state of locations is acceptable to run the optimization, such as
    7543            0 :         // checking that a secondary location is sufficiently warmed-up to cleanly cut over
    7544            0 :         // in a live migration.
    7545            0 :         let mut validated_work = Vec::new();
    7546            0 :         for (tenant_shard_id, optimization) in candidate_work {
    7547            0 :             match optimization.action {
    7548              :                 ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
    7549              :                     old_attached_node_id: _,
    7550            0 :                     new_attached_node_id,
    7551            0 :                 }) => {
    7552            0 :                     match validation_nodes.get(&new_attached_node_id) {
    7553            0 :                         None => {
    7554            0 :                             // Node was dropped between planning and validation
    7555            0 :                         }
    7556            0 :                         Some(node) => {
    7557            0 :                             if !node.is_available() {
    7558            0 :                                 tracing::info!(
    7559            0 :                                     "Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable"
    7560              :                                 );
    7561            0 :                             } else {
    7562            0 :                                 // Accumulate optimizations that require fetching secondary status, so that we can execute these
    7563            0 :                                 // remote API requests concurrently.
    7564            0 :                                 want_secondary_status.push((
    7565            0 :                                     tenant_shard_id,
    7566            0 :                                     node.clone(),
    7567            0 :                                     optimization,
    7568            0 :                                 ));
    7569            0 :                             }
    7570              :                         }
    7571              :                     }
    7572              :                 }
    7573              :                 ScheduleOptimizationAction::ReplaceSecondary(_)
    7574              :                 | ScheduleOptimizationAction::CreateSecondary(_)
    7575              :                 | ScheduleOptimizationAction::RemoveSecondary(_) => {
    7576              :                     // No extra checks needed to manage secondaries: this does not interrupt client access
    7577            0 :                     validated_work.push((tenant_shard_id, optimization))
    7578              :                 }
    7579              :             };
    7580              :         }
    7581              : 
    7582              :         // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
    7583              :         // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
    7584              :         // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
    7585            0 :         let results = self
    7586            0 :             .tenant_for_shards_api(
    7587            0 :                 want_secondary_status
    7588            0 :                     .iter()
    7589            0 :                     .map(|i| (i.0, i.1.clone()))
    7590            0 :                     .collect(),
    7591            0 :                 |tenant_shard_id, client| async move {
    7592            0 :                     client.tenant_secondary_status(tenant_shard_id).await
    7593            0 :                 },
    7594            0 :                 1,
    7595            0 :                 1,
    7596            0 :                 SHORT_RECONCILE_TIMEOUT,
    7597            0 :                 &self.cancel,
    7598            0 :             )
    7599            0 :             .await;
    7600              : 
    7601            0 :         for ((tenant_shard_id, node, optimization), secondary_status) in
    7602            0 :             want_secondary_status.into_iter().zip(results.into_iter())
    7603              :         {
    7604            0 :             match secondary_status {
    7605            0 :                 Err(e) => {
    7606            0 :                     tracing::info!(
    7607            0 :                         "Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}"
    7608              :                     );
    7609              :                 }
    7610            0 :                 Ok(progress) => {
    7611              :                     // We require secondary locations to have less than 10GiB of downloads pending before we will use
    7612              :                     // them in an optimization
    7613              :                     const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
    7614              : 
    7615            0 :                     if progress.heatmap_mtime.is_none()
    7616            0 :                         || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
    7617            0 :                             && progress.bytes_downloaded != progress.bytes_total
    7618            0 :                         || progress.bytes_total - progress.bytes_downloaded
    7619            0 :                             > DOWNLOAD_FRESHNESS_THRESHOLD
    7620              :                     {
    7621            0 :                         tracing::info!(
    7622            0 :                             "Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}"
    7623              :                         );
    7624              : 
    7625              :                         #[cfg(feature = "testing")]
    7626            0 :                         if progress.heatmap_mtime.is_none() {
    7627              :                             // No heatmap might mean the attached location has never uploaded one, or that
    7628              :                             // the secondary download hasn't happened yet.  This is relatively unusual in the field,
    7629              :                             // but fairly common in tests.
    7630            0 :                             self.kick_secondary_download(tenant_shard_id).await;
    7631            0 :                         }
    7632              :                     } else {
    7633              :                         // Location looks ready: proceed
    7634            0 :                         tracing::info!(
    7635            0 :                             "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
    7636              :                         );
    7637            0 :                         validated_work.push((tenant_shard_id, optimization))
    7638              :                     }
    7639              :                 }
    7640              :             }
    7641              :         }
    7642              : 
    7643            0 :         validated_work
    7644            0 :     }
    7645              : 
    7646              :     /// Some aspects of scheduling optimisation wait for secondary locations to be warm.  This
    7647              :     /// happens on multi-minute timescales in the field, which is fine because optimisation is meant
    7648              :     /// to be a lazy background thing. However, when testing, it is not practical to wait around, so
    7649              :     /// we have this helper to move things along faster.
    7650              :     #[cfg(feature = "testing")]
    7651            0 :     async fn kick_secondary_download(&self, tenant_shard_id: TenantShardId) {
    7652            0 :         let (attached_node, secondaries) = {
    7653            0 :             let locked = self.inner.read().unwrap();
    7654            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    7655            0 :                 tracing::warn!(
    7656            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: not found"
    7657              :                 );
    7658            0 :                 return;
    7659              :             };
    7660              : 
    7661            0 :             let Some(attached) = shard.intent.get_attached() else {
    7662            0 :                 tracing::warn!(
    7663            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: no attached"
    7664              :                 );
    7665            0 :                 return;
    7666              :             };
    7667              : 
    7668            0 :             let secondaries = shard
    7669            0 :                 .intent
    7670            0 :                 .get_secondary()
    7671            0 :                 .iter()
    7672            0 :                 .map(|n| locked.nodes.get(n).unwrap().clone())
    7673            0 :                 .collect::<Vec<_>>();
    7674            0 : 
    7675            0 :             (locked.nodes.get(attached).unwrap().clone(), secondaries)
    7676            0 :         };
    7677            0 : 
    7678            0 :         // Make remote API calls to upload + download heatmaps: we ignore errors because this is just
    7679            0 :         // a 'kick' to let scheduling optimisation run more promptly.
    7680            0 :         match attached_node
    7681            0 :             .with_client_retries(
    7682            0 :                 |client| async move { client.tenant_heatmap_upload(tenant_shard_id).await },
    7683            0 :                 &self.http_client,
    7684            0 :                 &self.config.pageserver_jwt_token,
    7685            0 :                 3,
    7686            0 :                 10,
    7687            0 :                 SHORT_RECONCILE_TIMEOUT,
    7688            0 :                 &self.cancel,
    7689            0 :             )
    7690            0 :             .await
    7691              :         {
    7692            0 :             Some(Err(e)) => {
    7693            0 :                 tracing::info!(
    7694            0 :                     "Failed to upload heatmap from {attached_node} for {tenant_shard_id}: {e}"
    7695              :                 );
    7696              :             }
    7697              :             None => {
    7698            0 :                 tracing::info!(
    7699            0 :                     "Cancelled while uploading heatmap from {attached_node} for {tenant_shard_id}"
    7700              :                 );
    7701              :             }
    7702              :             Some(Ok(_)) => {
    7703            0 :                 tracing::info!(
    7704            0 :                     "Successfully uploaded heatmap from {attached_node} for {tenant_shard_id}"
    7705              :                 );
    7706              :             }
    7707              :         }
    7708              : 
    7709            0 :         for secondary_node in secondaries {
    7710            0 :             match secondary_node
    7711            0 :                 .with_client_retries(
    7712            0 :                     |client| async move {
    7713            0 :                         client
    7714            0 :                             .tenant_secondary_download(
    7715            0 :                                 tenant_shard_id,
    7716            0 :                                 Some(Duration::from_secs(1)),
    7717            0 :                             )
    7718            0 :                             .await
    7719            0 :                     },
    7720            0 :                     &self.http_client,
    7721            0 :                     &self.config.pageserver_jwt_token,
    7722            0 :                     3,
    7723            0 :                     10,
    7724            0 :                     SHORT_RECONCILE_TIMEOUT,
    7725            0 :                     &self.cancel,
    7726            0 :                 )
    7727            0 :                 .await
    7728              :             {
    7729            0 :                 Some(Err(e)) => {
    7730            0 :                     tracing::info!(
    7731            0 :                         "Failed to download heatmap from {secondary_node} for {tenant_shard_id}: {e}"
    7732              :                     );
    7733              :                 }
    7734              :                 None => {
    7735            0 :                     tracing::info!(
    7736            0 :                         "Cancelled while downloading heatmap from {secondary_node} for {tenant_shard_id}"
    7737              :                     );
    7738              :                 }
    7739            0 :                 Some(Ok(progress)) => {
    7740            0 :                     tracing::info!(
    7741            0 :                         "Successfully downloaded heatmap from {secondary_node} for {tenant_shard_id}: {progress:?}"
    7742              :                     );
    7743              :                 }
    7744              :             }
    7745              :         }
    7746            0 :     }
    7747              : 
    7748              :     /// Asynchronously split a tenant that's eligible for automatic splits. At most one tenant will
    7749              :     /// be split per call.
    7750              :     ///
    7751              :     /// Two sets of criteria are used: initial splits and size-based splits (in that order).
    7752              :     /// Initial splits are used to eagerly split unsharded tenants that may be performing initial
    7753              :     /// ingestion, since sharded tenants have significantly better ingestion throughput. Size-based
    7754              :     /// splits are used to bound the maximum shard size and balance out load.
    7755              :     ///
    7756              :     /// Splits are based on max_logical_size, i.e. the logical size of the largest timeline in a
    7757              :     /// tenant. We use this instead of the total logical size because branches will duplicate
    7758              :     /// logical size without actually using more storage. We could also use visible physical size,
    7759              :     /// but this might overestimate tenants that frequently churn branches.
    7760              :     ///
    7761              :     /// Initial splits (initial_split_threshold):
    7762              :     /// * Applies to tenants with 1 shard.
    7763              :     /// * The largest timeline (max_logical_size) exceeds initial_split_threshold.
    7764              :     /// * Splits into initial_split_shards.
    7765              :     ///
    7766              :     /// Size-based splits (split_threshold):
    7767              :     /// * Applies to all tenants.
    7768              :     /// * The largest timeline (max_logical_size) divided by shard count exceeds split_threshold.
    7769              :     /// * Splits such that max_logical_size / shard_count <= split_threshold, in powers of 2.
    7770              :     ///
    7771              :     /// Tenant shards are ordered by descending max_logical_size, first initial split candidates
    7772              :     /// then size-based split candidates. The first matching candidate is split.
    7773              :     ///
    7774              :     /// The shard count is clamped to max_split_shards. If a candidate is eligible for both initial
    7775              :     /// and size-based splits, the largest shard count will be used.
    7776              :     ///
    7777              :     /// An unsharded tenant will get DEFAULT_STRIPE_SIZE, regardless of what its ShardIdentity says.
    7778              :     /// A sharded tenant will retain its stripe size, as splits do not allow changing it.
    7779              :     ///
    7780              :     /// TODO: consider spawning multiple splits in parallel: this is only called once every 20
    7781              :     /// seconds, so a large backlog can take a long time, and if a tenant fails to split it will
    7782              :     /// block all other splits.
    7783            0 :     async fn autosplit_tenants(self: &Arc<Self>) {
    7784            0 :         // If max_split_shards is set to 0 or 1, we can't split.
    7785            0 :         let max_split_shards = self.config.max_split_shards;
    7786            0 :         if max_split_shards <= 1 {
    7787            0 :             return;
    7788            0 :         }
    7789            0 : 
    7790            0 :         // If initial_split_shards is set to 0 or 1, disable initial splits.
    7791            0 :         let mut initial_split_threshold = self.config.initial_split_threshold.unwrap_or(0);
    7792            0 :         let initial_split_shards = self.config.initial_split_shards;
    7793            0 :         if initial_split_shards <= 1 {
    7794            0 :             initial_split_threshold = 0;
    7795            0 :         }
    7796              : 
    7797              :         // If no split_threshold nor initial_split_threshold, disable autosplits.
    7798            0 :         let split_threshold = self.config.split_threshold.unwrap_or(0);
    7799            0 :         if split_threshold == 0 && initial_split_threshold == 0 {
    7800            0 :             return;
    7801            0 :         }
    7802            0 : 
    7803            0 :         // Fetch split candidates in prioritized order.
    7804            0 :         //
    7805            0 :         // If initial splits are enabled, fetch eligible tenants first. We prioritize initial splits
    7806            0 :         // over size-based splits, since these are often performing initial ingestion and rely on
    7807            0 :         // splits to improve ingest throughput.
    7808            0 :         let mut candidates = Vec::new();
    7809            0 : 
    7810            0 :         if initial_split_threshold > 0 {
    7811              :             // Initial splits: fetch tenants with 1 shard where the logical size of the largest
    7812              :             // timeline exceeds the initial split threshold.
    7813            0 :             let initial_candidates = self
    7814            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    7815            0 :                     order_by: TenantSorting::MaxLogicalSize,
    7816            0 :                     limit: 10,
    7817            0 :                     where_shards_lt: Some(ShardCount(2)),
    7818            0 :                     where_gt: Some(initial_split_threshold),
    7819            0 :                 })
    7820            0 :                 .await;
    7821            0 :             candidates.extend(initial_candidates);
    7822            0 :         }
    7823              : 
    7824            0 :         if split_threshold > 0 {
    7825              :             // Size-based splits: fetch tenants where the logical size of the largest timeline
    7826              :             // divided by shard count exceeds the split threshold.
    7827              :             //
    7828              :             // max_logical_size is only tracked on shard 0, and contains the total logical size
    7829              :             // across all shards. We have to order and filter by MaxLogicalSizePerShard, i.e.
    7830              :             // max_logical_size / shard_count, such that we only receive tenants that are actually
    7831              :             // eligible for splits. But we still use max_logical_size for later split calculations.
    7832            0 :             let size_candidates = self
    7833            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    7834            0 :                     order_by: TenantSorting::MaxLogicalSizePerShard,
    7835            0 :                     limit: 10,
    7836            0 :                     where_shards_lt: Some(ShardCount(max_split_shards)),
    7837            0 :                     where_gt: Some(split_threshold),
    7838            0 :                 })
    7839            0 :                 .await;
    7840              :             #[cfg(feature = "testing")]
    7841            0 :             assert!(
    7842            0 :                 size_candidates.iter().all(|c| c.id.is_shard_zero()),
    7843            0 :                 "MaxLogicalSizePerShard returned non-zero shard: {size_candidates:?}",
    7844              :             );
    7845            0 :             candidates.extend(size_candidates);
    7846            0 :         }
    7847              : 
    7848              :         // Filter out tenants in a prohibiting scheduling mode.
    7849            0 :         {
    7850            0 :             let state = self.inner.read().unwrap();
    7851            0 :             candidates.retain(|i| {
    7852            0 :                 let policy = state.tenants.get(&i.id).map(|s| s.get_scheduling_policy());
    7853            0 :                 policy == Some(ShardSchedulingPolicy::Active)
    7854            0 :             });
    7855            0 :         }
    7856              : 
    7857              :         // Pick the first candidate to split. This will generally always be the first one in
    7858              :         // candidates, but we defensively skip candidates that end up not actually splitting.
    7859            0 :         let Some((candidate, new_shard_count)) = candidates
    7860            0 :             .into_iter()
    7861            0 :             .filter_map(|candidate| {
    7862            0 :                 let new_shard_count = Self::compute_split_shards(ShardSplitInputs {
    7863            0 :                     shard_count: candidate.id.shard_count,
    7864            0 :                     max_logical_size: candidate.max_logical_size,
    7865            0 :                     split_threshold,
    7866            0 :                     max_split_shards,
    7867            0 :                     initial_split_threshold,
    7868            0 :                     initial_split_shards,
    7869            0 :                 });
    7870            0 :                 new_shard_count.map(|shards| (candidate, shards.count()))
    7871            0 :             })
    7872            0 :             .next()
    7873              :         else {
    7874            0 :             debug!("no split-eligible tenants found");
    7875            0 :             return;
    7876              :         };
    7877              : 
    7878              :         // Retain the stripe size of sharded tenants, as splits don't allow changing it. Otherwise,
    7879              :         // use DEFAULT_STRIPE_SIZE for unsharded tenants -- their stripe size doesn't really matter,
    7880              :         // and if we change the default stripe size we want to use the new default rather than an
    7881              :         // old, persisted stripe size.
    7882            0 :         let new_stripe_size = match candidate.id.shard_count.count() {
    7883            0 :             0 => panic!("invalid shard count 0"),
    7884            0 :             1 => Some(DEFAULT_STRIPE_SIZE),
    7885            0 :             2.. => None,
    7886              :         };
    7887              : 
    7888              :         // We spawn a task to run this, so it's exactly like some external API client requesting
    7889              :         // it.  We don't want to block the background reconcile loop on this.
    7890            0 :         let old_shard_count = candidate.id.shard_count.count();
    7891            0 :         info!(
    7892            0 :             "auto-splitting tenant {old_shard_count} → {new_shard_count} shards, \
    7893            0 :                 current size {candidate:?} (split_threshold={split_threshold} \
    7894            0 :                 initial_split_threshold={initial_split_threshold})"
    7895              :         );
    7896              : 
    7897            0 :         let this = self.clone();
    7898            0 :         tokio::spawn(
    7899            0 :             async move {
    7900            0 :                 match this
    7901            0 :                     .tenant_shard_split(
    7902            0 :                         candidate.id.tenant_id,
    7903            0 :                         TenantShardSplitRequest {
    7904            0 :                             new_shard_count,
    7905            0 :                             new_stripe_size,
    7906            0 :                         },
    7907            0 :                     )
    7908            0 :                     .await
    7909              :                 {
    7910              :                     Ok(_) => {
    7911            0 :                         info!("successful auto-split {old_shard_count} → {new_shard_count} shards")
    7912              :                     }
    7913            0 :                     Err(err) => error!("auto-split failed: {err}"),
    7914              :                 }
    7915            0 :             }
    7916            0 :             .instrument(info_span!("auto_split", tenant_id=%candidate.id.tenant_id)),
    7917              :         );
    7918            0 :     }
    7919              : 
    7920              :     /// Returns the number of shards to split a tenant into, or None if the tenant shouldn't split,
    7921              :     /// based on the total logical size of the largest timeline summed across all shards. Uses the
    7922              :     /// larger of size-based and initial splits, clamped to max_split_shards.
    7923              :     ///
    7924              :     /// NB: the thresholds are exclusive, since TopTenantShardsRequest uses where_gt.
    7925           25 :     fn compute_split_shards(inputs: ShardSplitInputs) -> Option<ShardCount> {
    7926           25 :         let ShardSplitInputs {
    7927           25 :             shard_count,
    7928           25 :             max_logical_size,
    7929           25 :             split_threshold,
    7930           25 :             max_split_shards,
    7931           25 :             initial_split_threshold,
    7932           25 :             initial_split_shards,
    7933           25 :         } = inputs;
    7934           25 : 
    7935           25 :         let mut new_shard_count: u8 = shard_count.count();
    7936           25 : 
    7937           25 :         // Size-based splits. Ensures max_logical_size / new_shard_count <= split_threshold, using
    7938           25 :         // power-of-two shard counts.
    7939           25 :         //
    7940           25 :         // If the current shard count is not a power of two, and does not exceed split_threshold,
    7941           25 :         // then we leave it alone rather than forcing a power-of-two split.
    7942           25 :         if split_threshold > 0
    7943           18 :             && max_logical_size.div_ceil(split_threshold) > shard_count.count() as u64
    7944           12 :         {
    7945           12 :             new_shard_count = max_logical_size
    7946           12 :                 .div_ceil(split_threshold)
    7947           12 :                 .checked_next_power_of_two()
    7948           12 :                 .unwrap_or(u8::MAX as u64)
    7949           12 :                 .try_into()
    7950           12 :                 .unwrap_or(u8::MAX);
    7951           13 :         }
    7952              : 
    7953              :         // Initial splits. Use the larger of size-based and initial split shard counts. This only
    7954              :         // applies to unsharded tenants, i.e. changes to initial_split_threshold or
    7955              :         // initial_split_shards are not retroactive for sharded tenants.
    7956           25 :         if initial_split_threshold > 0
    7957           14 :             && shard_count.count() <= 1
    7958           11 :             && max_logical_size > initial_split_threshold
    7959            8 :         {
    7960            8 :             new_shard_count = new_shard_count.max(initial_split_shards);
    7961           17 :         }
    7962              : 
    7963              :         // Clamp to max shards.
    7964           25 :         new_shard_count = new_shard_count.min(max_split_shards);
    7965           25 : 
    7966           25 :         // Don't split if we're not increasing the shard count.
    7967           25 :         if new_shard_count <= shard_count.count() {
    7968           10 :             return None;
    7969           15 :         }
    7970           15 : 
    7971           15 :         Some(ShardCount(new_shard_count))
    7972           25 :     }
    7973              : 
    7974              :     /// Fetches the top tenant shards from every node, in descending order of
    7975              :     /// max logical size. Any node errors will be logged and ignored.
    7976            0 :     async fn get_top_tenant_shards(
    7977            0 :         &self,
    7978            0 :         request: &TopTenantShardsRequest,
    7979            0 :     ) -> Vec<TopTenantShardItem> {
    7980            0 :         let nodes = self
    7981            0 :             .inner
    7982            0 :             .read()
    7983            0 :             .unwrap()
    7984            0 :             .nodes
    7985            0 :             .values()
    7986            0 :             .cloned()
    7987            0 :             .collect_vec();
    7988            0 : 
    7989            0 :         let mut futures = FuturesUnordered::new();
    7990            0 :         for node in nodes {
    7991            0 :             futures.push(async move {
    7992            0 :                 node.with_client_retries(
    7993            0 :                     |client| async move { client.top_tenant_shards(request.clone()).await },
    7994            0 :                     &self.http_client,
    7995            0 :                     &self.config.pageserver_jwt_token,
    7996            0 :                     3,
    7997            0 :                     3,
    7998            0 :                     Duration::from_secs(5),
    7999            0 :                     &self.cancel,
    8000            0 :                 )
    8001            0 :                 .await
    8002            0 :             });
    8003            0 :         }
    8004              : 
    8005            0 :         let mut top = Vec::new();
    8006            0 :         while let Some(output) = futures.next().await {
    8007            0 :             match output {
    8008            0 :                 Some(Ok(response)) => top.extend(response.shards),
    8009            0 :                 Some(Err(mgmt_api::Error::Cancelled)) => {}
    8010            0 :                 Some(Err(err)) => warn!("failed to fetch top tenants: {err}"),
    8011            0 :                 None => {} // node is shutting down
    8012              :             }
    8013              :         }
    8014              : 
    8015            0 :         top.sort_by_key(|i| i.max_logical_size);
    8016            0 :         top.reverse();
    8017            0 :         top
    8018            0 :     }
    8019              : 
    8020              :     /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
    8021              :     /// also wait for any generated Reconcilers to complete.  Calling this until it returns zero should
    8022              :     /// put the system into a quiescent state where future background reconciliations won't do anything.
    8023            0 :     pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
    8024            0 :         let reconciles_spawned = self.reconcile_all();
    8025            0 :         let reconciles_spawned = if reconciles_spawned == 0 {
    8026              :             // Only optimize when we are otherwise idle
    8027            0 :             self.optimize_all().await
    8028              :         } else {
    8029            0 :             reconciles_spawned
    8030              :         };
    8031              : 
    8032            0 :         let waiters = {
    8033            0 :             let mut waiters = Vec::new();
    8034            0 :             let locked = self.inner.read().unwrap();
    8035            0 :             for (_tenant_shard_id, shard) in locked.tenants.iter() {
    8036            0 :                 if let Some(waiter) = shard.get_waiter() {
    8037            0 :                     waiters.push(waiter);
    8038            0 :                 }
    8039              :             }
    8040            0 :             waiters
    8041            0 :         };
    8042            0 : 
    8043            0 :         let waiter_count = waiters.len();
    8044            0 :         match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    8045            0 :             Ok(()) => {}
    8046            0 :             Err(ReconcileWaitError::Failed(_, reconcile_error))
    8047            0 :                 if matches!(*reconcile_error, ReconcileError::Cancel) =>
    8048            0 :             {
    8049            0 :                 // Ignore reconciler cancel errors: this reconciler might have shut down
    8050            0 :                 // because some other change superceded it.  We will return a nonzero number,
    8051            0 :                 // so the caller knows they might have to call again to quiesce the system.
    8052            0 :             }
    8053            0 :             Err(e) => {
    8054            0 :                 return Err(e);
    8055              :             }
    8056              :         };
    8057              : 
    8058            0 :         tracing::info!(
    8059            0 :             "{} reconciles in reconcile_all, {} waiters",
    8060              :             reconciles_spawned,
    8061              :             waiter_count
    8062              :         );
    8063              : 
    8064            0 :         Ok(std::cmp::max(waiter_count, reconciles_spawned))
    8065            0 :     }
    8066              : 
    8067            0 :     async fn stop_reconciliations(&self, reason: StopReconciliationsReason) {
    8068            0 :         // Cancel all on-going reconciles and wait for them to exit the gate.
    8069            0 :         tracing::info!("{reason}: cancelling and waiting for in-flight reconciles");
    8070            0 :         self.reconcilers_cancel.cancel();
    8071            0 :         self.reconcilers_gate.close().await;
    8072              : 
    8073              :         // Signal the background loop in [`Service::process_results`] to exit once
    8074              :         // it has proccessed the results from all the reconciles we cancelled earlier.
    8075            0 :         tracing::info!("{reason}: processing results from previously in-flight reconciles");
    8076            0 :         self.result_tx.send(ReconcileResultRequest::Stop).ok();
    8077            0 :         self.result_tx.closed().await;
    8078            0 :     }
    8079              : 
    8080            0 :     pub async fn shutdown(&self) {
    8081            0 :         self.stop_reconciliations(StopReconciliationsReason::ShuttingDown)
    8082            0 :             .await;
    8083              : 
    8084              :         // Background tasks hold gate guards: this notifies them of the cancellation and
    8085              :         // waits for them all to complete.
    8086            0 :         tracing::info!("Shutting down: cancelling and waiting for background tasks to exit");
    8087            0 :         self.cancel.cancel();
    8088            0 :         self.gate.close().await;
    8089            0 :     }
    8090              : 
    8091              :     /// Spot check the download lag for a secondary location of a shard.
    8092              :     /// Should be used as a heuristic, since it's not always precise: the
    8093              :     /// secondary might have not downloaded the new heat map yet and, hence,
    8094              :     /// is not aware of the lag.
    8095              :     ///
    8096              :     /// Returns:
    8097              :     /// * Ok(None) if the lag could not be determined from the status,
    8098              :     /// * Ok(Some(_)) if the lag could be determind
    8099              :     /// * Err on failures to query the pageserver.
    8100            0 :     async fn secondary_lag(
    8101            0 :         &self,
    8102            0 :         secondary: &NodeId,
    8103            0 :         tenant_shard_id: TenantShardId,
    8104            0 :     ) -> Result<Option<u64>, mgmt_api::Error> {
    8105            0 :         let nodes = self.inner.read().unwrap().nodes.clone();
    8106            0 :         let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError(
    8107            0 :             StatusCode::NOT_FOUND,
    8108            0 :             format!("Node with id {} not found", secondary),
    8109            0 :         ))?;
    8110              : 
    8111            0 :         match node
    8112            0 :             .with_client_retries(
    8113            0 :                 |client| async move { client.tenant_secondary_status(tenant_shard_id).await },
    8114            0 :                 &self.http_client,
    8115            0 :                 &self.config.pageserver_jwt_token,
    8116            0 :                 1,
    8117            0 :                 3,
    8118            0 :                 Duration::from_millis(250),
    8119            0 :                 &self.cancel,
    8120            0 :             )
    8121            0 :             .await
    8122              :         {
    8123            0 :             Some(Ok(status)) => match status.heatmap_mtime {
    8124            0 :                 Some(_) => Ok(Some(status.bytes_total - status.bytes_downloaded)),
    8125            0 :                 None => Ok(None),
    8126              :             },
    8127            0 :             Some(Err(e)) => Err(e),
    8128            0 :             None => Err(mgmt_api::Error::Cancelled),
    8129              :         }
    8130            0 :     }
    8131              : 
    8132              :     /// Drain a node by moving the shards attached to it as primaries.
    8133              :     /// This is a long running operation and it should run as a separate Tokio task.
    8134            0 :     pub(crate) async fn drain_node(
    8135            0 :         self: &Arc<Self>,
    8136            0 :         node_id: NodeId,
    8137            0 :         cancel: CancellationToken,
    8138            0 :     ) -> Result<(), OperationError> {
    8139              :         const MAX_SECONDARY_LAG_BYTES_DEFAULT: u64 = 256 * 1024 * 1024;
    8140            0 :         let max_secondary_lag_bytes = self
    8141            0 :             .config
    8142            0 :             .max_secondary_lag_bytes
    8143            0 :             .unwrap_or(MAX_SECONDARY_LAG_BYTES_DEFAULT);
    8144              : 
    8145              :         // By default, live migrations are generous about the wait time for getting
    8146              :         // the secondary location up to speed. When draining, give up earlier in order
    8147              :         // to not stall the operation when a cold secondary is encountered.
    8148              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(20);
    8149              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    8150            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    8151            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    8152            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    8153            0 :             .build();
    8154            0 : 
    8155            0 :         let mut waiters = Vec::new();
    8156            0 : 
    8157            0 :         let mut tid_iter = TenantShardIterator::new({
    8158            0 :             let service = self.clone();
    8159            0 :             move |last_inspected_shard: Option<TenantShardId>| {
    8160            0 :                 let locked = &service.inner.read().unwrap();
    8161            0 :                 let tenants = &locked.tenants;
    8162            0 :                 let entry = match last_inspected_shard {
    8163            0 :                     Some(skip_past) => {
    8164            0 :                         // Skip to the last seen tenant shard id
    8165            0 :                         let mut cursor = tenants.iter().skip_while(|(tid, _)| **tid != skip_past);
    8166            0 : 
    8167            0 :                         // Skip past the last seen
    8168            0 :                         cursor.nth(1)
    8169              :                     }
    8170            0 :                     None => tenants.first_key_value(),
    8171              :                 };
    8172              : 
    8173            0 :                 entry.map(|(tid, _)| tid).copied()
    8174            0 :             }
    8175            0 :         });
    8176              : 
    8177            0 :         while !tid_iter.finished() {
    8178            0 :             if cancel.is_cancelled() {
    8179            0 :                 match self
    8180            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8181            0 :                     .await
    8182              :                 {
    8183            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    8184            0 :                     Err(err) => {
    8185            0 :                         return Err(OperationError::FinalizeError(
    8186            0 :                             format!(
    8187            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    8188            0 :                                 node_id, err
    8189            0 :                             )
    8190            0 :                             .into(),
    8191            0 :                         ));
    8192              :                     }
    8193              :                 }
    8194            0 :             }
    8195            0 : 
    8196            0 :             drain_utils::validate_node_state(&node_id, self.inner.read().unwrap().nodes.clone())?;
    8197              : 
    8198            0 :             while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    8199            0 :                 let tid = match tid_iter.next() {
    8200            0 :                     Some(tid) => tid,
    8201              :                     None => {
    8202            0 :                         break;
    8203              :                     }
    8204              :                 };
    8205              : 
    8206            0 :                 let tid_drain = TenantShardDrain {
    8207            0 :                     drained_node: node_id,
    8208            0 :                     tenant_shard_id: tid,
    8209            0 :                 };
    8210              : 
    8211            0 :                 let dest_node_id = {
    8212            0 :                     let locked = self.inner.read().unwrap();
    8213            0 : 
    8214            0 :                     match tid_drain
    8215            0 :                         .tenant_shard_eligible_for_drain(&locked.tenants, &locked.scheduler)
    8216              :                     {
    8217            0 :                         Some(node_id) => node_id,
    8218              :                         None => {
    8219            0 :                             continue;
    8220              :                         }
    8221              :                     }
    8222              :                 };
    8223              : 
    8224            0 :                 match self.secondary_lag(&dest_node_id, tid).await {
    8225            0 :                     Ok(Some(lag)) if lag <= max_secondary_lag_bytes => {
    8226            0 :                         // The secondary is reasonably up to date.
    8227            0 :                         // Migrate to it
    8228            0 :                     }
    8229            0 :                     Ok(Some(lag)) => {
    8230            0 :                         tracing::info!(
    8231            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8232            0 :                             "Secondary on node {dest_node_id} is lagging by {lag}. Skipping reconcile."
    8233              :                         );
    8234            0 :                         continue;
    8235              :                     }
    8236              :                     Ok(None) => {
    8237            0 :                         tracing::info!(
    8238            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8239            0 :                             "Could not determine lag for secondary on node {dest_node_id}. Skipping reconcile."
    8240              :                         );
    8241            0 :                         continue;
    8242              :                     }
    8243            0 :                     Err(err) => {
    8244            0 :                         tracing::warn!(
    8245            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8246            0 :                             "Failed to get secondary lag from node {dest_node_id}. Skipping reconcile: {err}"
    8247              :                         );
    8248            0 :                         continue;
    8249              :                     }
    8250              :                 }
    8251              : 
    8252              :                 {
    8253            0 :                     let mut locked = self.inner.write().unwrap();
    8254            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    8255            0 :                     let rescheduled = tid_drain.reschedule_to_secondary(
    8256            0 :                         dest_node_id,
    8257            0 :                         tenants,
    8258            0 :                         scheduler,
    8259            0 :                         nodes,
    8260            0 :                     )?;
    8261              : 
    8262            0 :                     if let Some(tenant_shard) = rescheduled {
    8263            0 :                         let waiter = self.maybe_configured_reconcile_shard(
    8264            0 :                             tenant_shard,
    8265            0 :                             nodes,
    8266            0 :                             reconciler_config,
    8267            0 :                         );
    8268            0 :                         if let Some(some) = waiter {
    8269            0 :                             waiters.push(some);
    8270            0 :                         }
    8271            0 :                     }
    8272              :                 }
    8273              :             }
    8274              : 
    8275            0 :             waiters = self
    8276            0 :                 .await_waiters_remainder(waiters, WAITER_FILL_DRAIN_POLL_TIMEOUT)
    8277            0 :                 .await;
    8278              : 
    8279            0 :             failpoint_support::sleep_millis_async!("sleepy-drain-loop", &cancel);
    8280              :         }
    8281              : 
    8282            0 :         while !waiters.is_empty() {
    8283            0 :             if cancel.is_cancelled() {
    8284            0 :                 match self
    8285            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8286            0 :                     .await
    8287              :                 {
    8288            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    8289            0 :                     Err(err) => {
    8290            0 :                         return Err(OperationError::FinalizeError(
    8291            0 :                             format!(
    8292            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    8293            0 :                                 node_id, err
    8294            0 :                             )
    8295            0 :                             .into(),
    8296            0 :                         ));
    8297              :                     }
    8298              :                 }
    8299            0 :             }
    8300            0 : 
    8301            0 :             tracing::info!("Awaiting {} pending drain reconciliations", waiters.len());
    8302              : 
    8303            0 :             waiters = self
    8304            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    8305            0 :                 .await;
    8306              :         }
    8307              : 
    8308              :         // At this point we have done the best we could to drain shards from this node.
    8309              :         // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]`
    8310              :         // to complete the drain.
    8311            0 :         if let Err(err) = self
    8312            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart))
    8313            0 :             .await
    8314              :         {
    8315              :             // This is not fatal. Anything that is polling the node scheduling policy to detect
    8316              :             // the end of the drain operations will hang, but all such places should enforce an
    8317              :             // overall timeout. The scheduling policy will be updated upon node re-attach and/or
    8318              :             // by the counterpart fill operation.
    8319            0 :             return Err(OperationError::FinalizeError(
    8320            0 :                 format!(
    8321            0 :                     "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}"
    8322            0 :                 )
    8323            0 :                 .into(),
    8324            0 :             ));
    8325            0 :         }
    8326            0 : 
    8327            0 :         Ok(())
    8328            0 :     }
    8329              : 
    8330              :     /// Create a node fill plan (pick secondaries to promote), based on:
    8331              :     /// 1. Shards which have a secondary on this node, and this node is in their home AZ, and are currently attached to a node
    8332              :     ///    outside their home AZ, should be migrated back here.
    8333              :     /// 2. If after step 1 we have not migrated enough shards for this node to have its fair share of
    8334              :     ///    attached shards, we will promote more shards from the nodes with the most attached shards, unless
    8335              :     ///    those shards have a home AZ that doesn't match the node we're filling.
    8336            0 :     fn fill_node_plan(&self, node_id: NodeId) -> Vec<TenantShardId> {
    8337            0 :         let mut locked = self.inner.write().unwrap();
    8338            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    8339            0 : 
    8340            0 :         let node_az = nodes
    8341            0 :             .get(&node_id)
    8342            0 :             .expect("Node must exist")
    8343            0 :             .get_availability_zone_id()
    8344            0 :             .clone();
    8345            0 : 
    8346            0 :         // The tenant shard IDs that we plan to promote from secondary to attached on this node
    8347            0 :         let mut plan = Vec::new();
    8348            0 : 
    8349            0 :         // Collect shards which do not have a preferred AZ & are elegible for moving in stage 2
    8350            0 :         let mut free_tids_by_node: HashMap<NodeId, Vec<TenantShardId>> = HashMap::new();
    8351            0 : 
    8352            0 :         // Don't respect AZ preferences if there is only one AZ.  This comes up in tests, but it could
    8353            0 :         // conceivably come up in real life if deploying a single-AZ region intentionally.
    8354            0 :         let respect_azs = nodes
    8355            0 :             .values()
    8356            0 :             .map(|n| n.get_availability_zone_id())
    8357            0 :             .unique()
    8358            0 :             .count()
    8359            0 :             > 1;
    8360              : 
    8361              :         // Step 1: collect all shards that we are required to migrate back to this node because their AZ preference
    8362              :         // requires it.
    8363            0 :         for (tsid, tenant_shard) in tenants {
    8364            0 :             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    8365              :                 // Shard doesn't have a secondary on this node, ignore it.
    8366            0 :                 continue;
    8367            0 :             }
    8368            0 : 
    8369            0 :             // AZ check: when filling nodes after a restart, our intent is to move _back_ the
    8370            0 :             // shards which belong on this node, not to promote shards whose scheduling preference
    8371            0 :             // would be on their currently attached node.  So will avoid promoting shards whose
    8372            0 :             // home AZ doesn't match the AZ of the node we're filling.
    8373            0 :             match tenant_shard.preferred_az() {
    8374            0 :                 _ if !respect_azs => {
    8375            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    8376            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    8377            0 :                     }
    8378              :                 }
    8379              :                 None => {
    8380              :                     // Shard doesn't have an AZ preference: it is elegible to be moved, but we
    8381              :                     // will only do so if our target shard count requires it.
    8382            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    8383            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    8384            0 :                     }
    8385              :                 }
    8386            0 :                 Some(az) if az == &node_az => {
    8387              :                     // This shard's home AZ is equal to the node we're filling: it should
    8388              :                     // be moved back to this node as part of filling, unless its currently
    8389              :                     // attached location is also in its home AZ.
    8390            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    8391            0 :                         if nodes
    8392            0 :                             .get(primary)
    8393            0 :                             .expect("referenced node must exist")
    8394            0 :                             .get_availability_zone_id()
    8395            0 :                             != tenant_shard
    8396            0 :                                 .preferred_az()
    8397            0 :                                 .expect("tenant must have an AZ preference")
    8398              :                         {
    8399            0 :                             plan.push(*tsid)
    8400            0 :                         }
    8401              :                     } else {
    8402            0 :                         plan.push(*tsid)
    8403              :                     }
    8404              :                 }
    8405            0 :                 Some(_) => {
    8406            0 :                     // This shard's home AZ is somewhere other than the node we're filling,
    8407            0 :                     // it may not be moved back to this node as part of filling.  Ignore it
    8408            0 :                 }
    8409              :             }
    8410              :         }
    8411              : 
    8412              :         // Step 2: also promote any AZ-agnostic shards as required to achieve the target number of attachments
    8413            0 :         let fill_requirement = locked.scheduler.compute_fill_requirement(node_id);
    8414            0 : 
    8415            0 :         let expected_attached = locked.scheduler.expected_attached_shard_count();
    8416            0 :         let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count();
    8417            0 : 
    8418            0 :         let mut promoted_per_tenant: HashMap<TenantId, usize> = HashMap::new();
    8419              : 
    8420            0 :         for (node_id, attached) in nodes_by_load {
    8421            0 :             let available = locked.nodes.get(&node_id).is_some_and(|n| n.is_available());
    8422            0 :             if !available {
    8423            0 :                 continue;
    8424            0 :             }
    8425            0 : 
    8426            0 :             if plan.len() >= fill_requirement
    8427            0 :                 || free_tids_by_node.is_empty()
    8428            0 :                 || attached <= expected_attached
    8429              :             {
    8430            0 :                 break;
    8431            0 :             }
    8432            0 : 
    8433            0 :             let can_take = attached - expected_attached;
    8434            0 :             let needed = fill_requirement - plan.len();
    8435            0 :             let mut take = std::cmp::min(can_take, needed);
    8436            0 : 
    8437            0 :             let mut remove_node = false;
    8438            0 :             while take > 0 {
    8439            0 :                 match free_tids_by_node.get_mut(&node_id) {
    8440            0 :                     Some(tids) => match tids.pop() {
    8441            0 :                         Some(tid) => {
    8442            0 :                             let max_promote_for_tenant = std::cmp::max(
    8443            0 :                                 tid.shard_count.count() as usize / locked.nodes.len(),
    8444            0 :                                 1,
    8445            0 :                             );
    8446            0 :                             let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default();
    8447            0 :                             if *promoted < max_promote_for_tenant {
    8448            0 :                                 plan.push(tid);
    8449            0 :                                 *promoted += 1;
    8450            0 :                                 take -= 1;
    8451            0 :                             }
    8452              :                         }
    8453              :                         None => {
    8454            0 :                             remove_node = true;
    8455            0 :                             break;
    8456              :                         }
    8457              :                     },
    8458              :                     None => {
    8459            0 :                         break;
    8460              :                     }
    8461              :                 }
    8462              :             }
    8463              : 
    8464            0 :             if remove_node {
    8465            0 :                 free_tids_by_node.remove(&node_id);
    8466            0 :             }
    8467              :         }
    8468              : 
    8469            0 :         plan
    8470            0 :     }
    8471              : 
    8472              :     /// Fill a node by promoting its secondaries until the cluster is balanced
    8473              :     /// with regards to attached shard counts. Note that this operation only
    8474              :     /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`].
    8475              :     /// This is a long running operation and it should run as a separate Tokio task.
    8476            0 :     pub(crate) async fn fill_node(
    8477            0 :         &self,
    8478            0 :         node_id: NodeId,
    8479            0 :         cancel: CancellationToken,
    8480            0 :     ) -> Result<(), OperationError> {
    8481              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(20);
    8482              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    8483            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    8484            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    8485            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    8486            0 :             .build();
    8487            0 : 
    8488            0 :         let mut tids_to_promote = self.fill_node_plan(node_id);
    8489            0 :         let mut waiters = Vec::new();
    8490              : 
    8491              :         // Execute the plan we've composed above. Before aplying each move from the plan,
    8492              :         // we validate to ensure that it has not gone stale in the meantime.
    8493            0 :         while !tids_to_promote.is_empty() {
    8494            0 :             if cancel.is_cancelled() {
    8495            0 :                 match self
    8496            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8497            0 :                     .await
    8498              :                 {
    8499            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    8500            0 :                     Err(err) => {
    8501            0 :                         return Err(OperationError::FinalizeError(
    8502            0 :                             format!(
    8503            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    8504            0 :                                 node_id, err
    8505            0 :                             )
    8506            0 :                             .into(),
    8507            0 :                         ));
    8508              :                     }
    8509              :                 }
    8510            0 :             }
    8511            0 : 
    8512            0 :             {
    8513            0 :                 let mut locked = self.inner.write().unwrap();
    8514            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    8515              : 
    8516            0 :                 let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
    8517            0 :                     format!("node {node_id} was removed").into(),
    8518            0 :                 ))?;
    8519              : 
    8520            0 :                 let current_policy = node.get_scheduling();
    8521            0 :                 if !matches!(current_policy, NodeSchedulingPolicy::Filling) {
    8522              :                     // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
    8523              :                     // about it
    8524            0 :                     return Err(OperationError::NodeStateChanged(
    8525            0 :                         format!("node {node_id} changed state to {current_policy:?}").into(),
    8526            0 :                     ));
    8527            0 :                 }
    8528              : 
    8529            0 :                 while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    8530            0 :                     if let Some(tid) = tids_to_promote.pop() {
    8531            0 :                         if let Some(tenant_shard) = tenants.get_mut(&tid) {
    8532              :                             // If the node being filled is not a secondary anymore,
    8533              :                             // skip the promotion.
    8534            0 :                             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    8535            0 :                                 continue;
    8536            0 :                             }
    8537            0 : 
    8538            0 :                             let previously_attached_to = *tenant_shard.intent.get_attached();
    8539            0 :                             match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) {
    8540            0 :                                 Err(e) => {
    8541            0 :                                     tracing::warn!(
    8542            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8543            0 :                                         "Scheduling error when filling pageserver {} : {e}", node_id
    8544              :                                     );
    8545              :                                 }
    8546              :                                 Ok(()) => {
    8547            0 :                                     tracing::info!(
    8548            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8549            0 :                                         "Rescheduled shard while filling node {}: {:?} -> {}",
    8550              :                                         node_id,
    8551              :                                         previously_attached_to,
    8552              :                                         node_id
    8553              :                                     );
    8554              : 
    8555            0 :                                     if let Some(waiter) = self.maybe_configured_reconcile_shard(
    8556            0 :                                         tenant_shard,
    8557            0 :                                         nodes,
    8558            0 :                                         reconciler_config,
    8559            0 :                                     ) {
    8560            0 :                                         waiters.push(waiter);
    8561            0 :                                     }
    8562              :                                 }
    8563              :                             }
    8564            0 :                         }
    8565              :                     } else {
    8566            0 :                         break;
    8567              :                     }
    8568              :                 }
    8569              :             }
    8570              : 
    8571            0 :             waiters = self
    8572            0 :                 .await_waiters_remainder(waiters, WAITER_FILL_DRAIN_POLL_TIMEOUT)
    8573            0 :                 .await;
    8574              :         }
    8575              : 
    8576            0 :         while !waiters.is_empty() {
    8577            0 :             if cancel.is_cancelled() {
    8578            0 :                 match self
    8579            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8580            0 :                     .await
    8581              :                 {
    8582            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    8583            0 :                     Err(err) => {
    8584            0 :                         return Err(OperationError::FinalizeError(
    8585            0 :                             format!(
    8586            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    8587            0 :                                 node_id, err
    8588            0 :                             )
    8589            0 :                             .into(),
    8590            0 :                         ));
    8591              :                     }
    8592              :                 }
    8593            0 :             }
    8594            0 : 
    8595            0 :             tracing::info!("Awaiting {} pending fill reconciliations", waiters.len());
    8596              : 
    8597            0 :             waiters = self
    8598            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    8599            0 :                 .await;
    8600              :         }
    8601              : 
    8602            0 :         if let Err(err) = self
    8603            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8604            0 :             .await
    8605              :         {
    8606              :             // This isn't a huge issue since the filling process starts upon request. However, it
    8607              :             // will prevent the next drain from starting. The only case in which this can fail
    8608              :             // is database unavailability. Such a case will require manual intervention.
    8609            0 :             return Err(OperationError::FinalizeError(
    8610            0 :                 format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}")
    8611            0 :                     .into(),
    8612            0 :             ));
    8613            0 :         }
    8614            0 : 
    8615            0 :         Ok(())
    8616            0 :     }
    8617              : 
    8618              :     /// Updates scrubber metadata health check results.
    8619            0 :     pub(crate) async fn metadata_health_update(
    8620            0 :         &self,
    8621            0 :         update_req: MetadataHealthUpdateRequest,
    8622            0 :     ) -> Result<(), ApiError> {
    8623            0 :         let now = chrono::offset::Utc::now();
    8624            0 :         let (healthy_records, unhealthy_records) = {
    8625            0 :             let locked = self.inner.read().unwrap();
    8626            0 :             let healthy_records = update_req
    8627            0 :                 .healthy_tenant_shards
    8628            0 :                 .into_iter()
    8629            0 :                 // Retain only health records associated with tenant shards managed by storage controller.
    8630            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    8631            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, true, now))
    8632            0 :                 .collect();
    8633            0 :             let unhealthy_records = update_req
    8634            0 :                 .unhealthy_tenant_shards
    8635            0 :                 .into_iter()
    8636            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    8637            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, false, now))
    8638            0 :                 .collect();
    8639            0 : 
    8640            0 :             (healthy_records, unhealthy_records)
    8641            0 :         };
    8642            0 : 
    8643            0 :         self.persistence
    8644            0 :             .update_metadata_health_records(healthy_records, unhealthy_records, now)
    8645            0 :             .await?;
    8646            0 :         Ok(())
    8647            0 :     }
    8648              : 
    8649              :     /// Lists the tenant shards that has unhealthy metadata status.
    8650            0 :     pub(crate) async fn metadata_health_list_unhealthy(
    8651            0 :         &self,
    8652            0 :     ) -> Result<Vec<TenantShardId>, ApiError> {
    8653            0 :         let result = self
    8654            0 :             .persistence
    8655            0 :             .list_unhealthy_metadata_health_records()
    8656            0 :             .await?
    8657            0 :             .iter()
    8658            0 :             .map(|p| p.get_tenant_shard_id().unwrap())
    8659            0 :             .collect();
    8660            0 : 
    8661            0 :         Ok(result)
    8662            0 :     }
    8663              : 
    8664              :     /// Lists the tenant shards that have not been scrubbed for some duration.
    8665            0 :     pub(crate) async fn metadata_health_list_outdated(
    8666            0 :         &self,
    8667            0 :         not_scrubbed_for: Duration,
    8668            0 :     ) -> Result<Vec<MetadataHealthRecord>, ApiError> {
    8669            0 :         let earlier = chrono::offset::Utc::now() - not_scrubbed_for;
    8670            0 :         let result = self
    8671            0 :             .persistence
    8672            0 :             .list_outdated_metadata_health_records(earlier)
    8673            0 :             .await?
    8674            0 :             .into_iter()
    8675            0 :             .map(|record| record.into())
    8676            0 :             .collect();
    8677            0 :         Ok(result)
    8678            0 :     }
    8679              : 
    8680            0 :     pub(crate) fn get_leadership_status(&self) -> LeadershipStatus {
    8681            0 :         self.inner.read().unwrap().get_leadership_status()
    8682            0 :     }
    8683              : 
    8684            0 :     pub(crate) async fn step_down(&self) -> GlobalObservedState {
    8685            0 :         tracing::info!("Received step down request from peer");
    8686            0 :         failpoint_support::sleep_millis_async!("sleep-on-step-down-handling");
    8687              : 
    8688            0 :         self.inner.write().unwrap().step_down();
    8689            0 : 
    8690            0 :         // Wait for reconciliations to stop, or terminate this process if they
    8691            0 :         // fail to stop in time (this indicates a bug in shutdown)
    8692            0 :         tokio::select! {
    8693            0 :             _ = self.stop_reconciliations(StopReconciliationsReason::SteppingDown) => {
    8694            0 :                 tracing::info!("Reconciliations stopped, proceeding with step down");
    8695              :             }
    8696            0 :             _ = async {
    8697            0 :                 failpoint_support::sleep_millis_async!("step-down-delay-timeout");
    8698            0 :                 tokio::time::sleep(Duration::from_secs(10)).await
    8699            0 :             } => {
    8700            0 :                 tracing::warn!("Step down timed out while waiting for reconciliation gate, terminating process");
    8701              : 
    8702              :                 // The caller may proceed to act as leader when it sees this request fail: reduce the chance
    8703              :                 // of a split-brain situation by terminating this controller instead of leaving it up in a partially-shut-down state.
    8704            0 :                 std::process::exit(1);
    8705              :             }
    8706              :         }
    8707              : 
    8708            0 :         let mut global_observed = GlobalObservedState::default();
    8709            0 :         let locked = self.inner.read().unwrap();
    8710            0 :         for (tid, tenant_shard) in locked.tenants.iter() {
    8711            0 :             global_observed
    8712            0 :                 .0
    8713            0 :                 .insert(*tid, tenant_shard.observed.clone());
    8714            0 :         }
    8715              : 
    8716            0 :         global_observed
    8717            0 :     }
    8718              : 
    8719            0 :     pub(crate) async fn update_shards_preferred_azs(
    8720            0 :         &self,
    8721            0 :         req: ShardsPreferredAzsRequest,
    8722            0 :     ) -> Result<ShardsPreferredAzsResponse, ApiError> {
    8723            0 :         let preferred_azs = req.preferred_az_ids.into_iter().collect::<Vec<_>>();
    8724            0 :         let updated = self
    8725            0 :             .persistence
    8726            0 :             .set_tenant_shard_preferred_azs(preferred_azs)
    8727            0 :             .await
    8728            0 :             .map_err(|err| {
    8729            0 :                 ApiError::InternalServerError(anyhow::anyhow!(
    8730            0 :                     "Failed to persist preferred AZs: {err}"
    8731            0 :                 ))
    8732            0 :             })?;
    8733              : 
    8734            0 :         let mut updated_in_mem_and_db = Vec::default();
    8735            0 : 
    8736            0 :         let mut locked = self.inner.write().unwrap();
    8737            0 :         let state = locked.deref_mut();
    8738            0 :         for (tid, az_id) in updated {
    8739            0 :             let shard = state.tenants.get_mut(&tid);
    8740            0 :             if let Some(shard) = shard {
    8741            0 :                 shard.set_preferred_az(&mut state.scheduler, az_id);
    8742            0 :                 updated_in_mem_and_db.push(tid);
    8743            0 :             }
    8744              :         }
    8745              : 
    8746            0 :         Ok(ShardsPreferredAzsResponse {
    8747            0 :             updated: updated_in_mem_and_db,
    8748            0 :         })
    8749            0 :     }
    8750              : }
    8751              : 
    8752              : #[cfg(test)]
    8753              : mod tests {
    8754              :     use super::*;
    8755              : 
    8756              :     /// Tests Service::compute_split_shards. For readability, this specifies sizes in GBs rather
    8757              :     /// than bytes. Note that max_logical_size is the total logical size of the largest timeline
    8758              :     /// summed across all shards.
    8759              :     #[test]
    8760            1 :     fn compute_split_shards() {
    8761            1 :         // Size-based split: two shards have a 500 GB timeline, which need to split into 8 shards
    8762            1 :         // that are <= 64 GB,
    8763            1 :         assert_eq!(
    8764            1 :             Service::compute_split_shards(ShardSplitInputs {
    8765            1 :                 shard_count: ShardCount(2),
    8766            1 :                 max_logical_size: 500,
    8767            1 :                 split_threshold: 64,
    8768            1 :                 max_split_shards: 16,
    8769            1 :                 initial_split_threshold: 0,
    8770            1 :                 initial_split_shards: 0,
    8771            1 :             }),
    8772            1 :             Some(ShardCount(8))
    8773            1 :         );
    8774              : 
    8775              :         // Size-based split: noop at or below threshold, fires above.
    8776            1 :         assert_eq!(
    8777            1 :             Service::compute_split_shards(ShardSplitInputs {
    8778            1 :                 shard_count: ShardCount(2),
    8779            1 :                 max_logical_size: 127,
    8780            1 :                 split_threshold: 64,
    8781            1 :                 max_split_shards: 16,
    8782            1 :                 initial_split_threshold: 0,
    8783            1 :                 initial_split_shards: 0,
    8784            1 :             }),
    8785            1 :             None,
    8786            1 :         );
    8787            1 :         assert_eq!(
    8788            1 :             Service::compute_split_shards(ShardSplitInputs {
    8789            1 :                 shard_count: ShardCount(2),
    8790            1 :                 max_logical_size: 128,
    8791            1 :                 split_threshold: 64,
    8792            1 :                 max_split_shards: 16,
    8793            1 :                 initial_split_threshold: 0,
    8794            1 :                 initial_split_shards: 0,
    8795            1 :             }),
    8796            1 :             None,
    8797            1 :         );
    8798            1 :         assert_eq!(
    8799            1 :             Service::compute_split_shards(ShardSplitInputs {
    8800            1 :                 shard_count: ShardCount(2),
    8801            1 :                 max_logical_size: 129,
    8802            1 :                 split_threshold: 64,
    8803            1 :                 max_split_shards: 16,
    8804            1 :                 initial_split_threshold: 0,
    8805            1 :                 initial_split_shards: 0,
    8806            1 :             }),
    8807            1 :             Some(ShardCount(4)),
    8808            1 :         );
    8809              : 
    8810              :         // Size-based split: clamped to max_split_shards.
    8811            1 :         assert_eq!(
    8812            1 :             Service::compute_split_shards(ShardSplitInputs {
    8813            1 :                 shard_count: ShardCount(2),
    8814            1 :                 max_logical_size: 10000,
    8815            1 :                 split_threshold: 64,
    8816            1 :                 max_split_shards: 16,
    8817            1 :                 initial_split_threshold: 0,
    8818            1 :                 initial_split_shards: 0,
    8819            1 :             }),
    8820            1 :             Some(ShardCount(16))
    8821            1 :         );
    8822              : 
    8823              :         // Size-based split: tenant already at or beyond max_split_shards is not split.
    8824            1 :         assert_eq!(
    8825            1 :             Service::compute_split_shards(ShardSplitInputs {
    8826            1 :                 shard_count: ShardCount(16),
    8827            1 :                 max_logical_size: 10000,
    8828            1 :                 split_threshold: 64,
    8829            1 :                 max_split_shards: 16,
    8830            1 :                 initial_split_threshold: 0,
    8831            1 :                 initial_split_shards: 0,
    8832            1 :             }),
    8833            1 :             None
    8834            1 :         );
    8835              : 
    8836            1 :         assert_eq!(
    8837            1 :             Service::compute_split_shards(ShardSplitInputs {
    8838            1 :                 shard_count: ShardCount(32),
    8839            1 :                 max_logical_size: 10000,
    8840            1 :                 split_threshold: 64,
    8841            1 :                 max_split_shards: 16,
    8842            1 :                 initial_split_threshold: 0,
    8843            1 :                 initial_split_shards: 0,
    8844            1 :             }),
    8845            1 :             None
    8846            1 :         );
    8847              : 
    8848              :         // Size-based split: a non-power-of-2 shard count is normalized to power-of-2 if it
    8849              :         // exceeds split_threshold (i.e. a 3-shard tenant splits into 8, not 6).
    8850            1 :         assert_eq!(
    8851            1 :             Service::compute_split_shards(ShardSplitInputs {
    8852            1 :                 shard_count: ShardCount(3),
    8853            1 :                 max_logical_size: 320,
    8854            1 :                 split_threshold: 64,
    8855            1 :                 max_split_shards: 16,
    8856            1 :                 initial_split_threshold: 0,
    8857            1 :                 initial_split_shards: 0,
    8858            1 :             }),
    8859            1 :             Some(ShardCount(8))
    8860            1 :         );
    8861              : 
    8862              :         // Size-based split: a non-power-of-2 shard count is not normalized to power-of-2 if the
    8863              :         // existing shards are below or at split_threshold, but splits into 4 if it exceeds it.
    8864            1 :         assert_eq!(
    8865            1 :             Service::compute_split_shards(ShardSplitInputs {
    8866            1 :                 shard_count: ShardCount(3),
    8867            1 :                 max_logical_size: 191,
    8868            1 :                 split_threshold: 64,
    8869            1 :                 max_split_shards: 16,
    8870            1 :                 initial_split_threshold: 0,
    8871            1 :                 initial_split_shards: 0,
    8872            1 :             }),
    8873            1 :             None
    8874            1 :         );
    8875            1 :         assert_eq!(
    8876            1 :             Service::compute_split_shards(ShardSplitInputs {
    8877            1 :                 shard_count: ShardCount(3),
    8878            1 :                 max_logical_size: 192,
    8879            1 :                 split_threshold: 64,
    8880            1 :                 max_split_shards: 16,
    8881            1 :                 initial_split_threshold: 0,
    8882            1 :                 initial_split_shards: 0,
    8883            1 :             }),
    8884            1 :             None
    8885            1 :         );
    8886            1 :         assert_eq!(
    8887            1 :             Service::compute_split_shards(ShardSplitInputs {
    8888            1 :                 shard_count: ShardCount(3),
    8889            1 :                 max_logical_size: 193,
    8890            1 :                 split_threshold: 64,
    8891            1 :                 max_split_shards: 16,
    8892            1 :                 initial_split_threshold: 0,
    8893            1 :                 initial_split_shards: 0,
    8894            1 :             }),
    8895            1 :             Some(ShardCount(4))
    8896            1 :         );
    8897              : 
    8898              :         // Initial split: tenant has a 10 GB timeline, split into 4 shards.
    8899            1 :         assert_eq!(
    8900            1 :             Service::compute_split_shards(ShardSplitInputs {
    8901            1 :                 shard_count: ShardCount(1),
    8902            1 :                 max_logical_size: 10,
    8903            1 :                 split_threshold: 0,
    8904            1 :                 max_split_shards: 16,
    8905            1 :                 initial_split_threshold: 8,
    8906            1 :                 initial_split_shards: 4,
    8907            1 :             }),
    8908            1 :             Some(ShardCount(4))
    8909            1 :         );
    8910              : 
    8911              :         // Initial split: 0 ShardCount is equivalent to 1.
    8912            1 :         assert_eq!(
    8913            1 :             Service::compute_split_shards(ShardSplitInputs {
    8914            1 :                 shard_count: ShardCount(0),
    8915            1 :                 max_logical_size: 10,
    8916            1 :                 split_threshold: 0,
    8917            1 :                 max_split_shards: 16,
    8918            1 :                 initial_split_threshold: 8,
    8919            1 :                 initial_split_shards: 4,
    8920            1 :             }),
    8921            1 :             Some(ShardCount(4))
    8922            1 :         );
    8923              : 
    8924              :         // Initial split: at or below threshold is noop.
    8925            1 :         assert_eq!(
    8926            1 :             Service::compute_split_shards(ShardSplitInputs {
    8927            1 :                 shard_count: ShardCount(1),
    8928            1 :                 max_logical_size: 7,
    8929            1 :                 split_threshold: 0,
    8930            1 :                 max_split_shards: 16,
    8931            1 :                 initial_split_threshold: 8,
    8932            1 :                 initial_split_shards: 4,
    8933            1 :             }),
    8934            1 :             None,
    8935            1 :         );
    8936            1 :         assert_eq!(
    8937            1 :             Service::compute_split_shards(ShardSplitInputs {
    8938            1 :                 shard_count: ShardCount(1),
    8939            1 :                 max_logical_size: 8,
    8940            1 :                 split_threshold: 0,
    8941            1 :                 max_split_shards: 16,
    8942            1 :                 initial_split_threshold: 8,
    8943            1 :                 initial_split_shards: 4,
    8944            1 :             }),
    8945            1 :             None,
    8946            1 :         );
    8947            1 :         assert_eq!(
    8948            1 :             Service::compute_split_shards(ShardSplitInputs {
    8949            1 :                 shard_count: ShardCount(1),
    8950            1 :                 max_logical_size: 9,
    8951            1 :                 split_threshold: 0,
    8952            1 :                 max_split_shards: 16,
    8953            1 :                 initial_split_threshold: 8,
    8954            1 :                 initial_split_shards: 4,
    8955            1 :             }),
    8956            1 :             Some(ShardCount(4))
    8957            1 :         );
    8958              : 
    8959              :         // Initial split: already sharded tenant is not affected, even if above threshold and below
    8960              :         // shard count.
    8961            1 :         assert_eq!(
    8962            1 :             Service::compute_split_shards(ShardSplitInputs {
    8963            1 :                 shard_count: ShardCount(2),
    8964            1 :                 max_logical_size: 20,
    8965            1 :                 split_threshold: 0,
    8966            1 :                 max_split_shards: 16,
    8967            1 :                 initial_split_threshold: 8,
    8968            1 :                 initial_split_shards: 4,
    8969            1 :             }),
    8970            1 :             None,
    8971            1 :         );
    8972              : 
    8973              :         // Initial split: clamped to max_shards.
    8974            1 :         assert_eq!(
    8975            1 :             Service::compute_split_shards(ShardSplitInputs {
    8976            1 :                 shard_count: ShardCount(1),
    8977            1 :                 max_logical_size: 10,
    8978            1 :                 split_threshold: 0,
    8979            1 :                 max_split_shards: 3,
    8980            1 :                 initial_split_threshold: 8,
    8981            1 :                 initial_split_shards: 4,
    8982            1 :             }),
    8983            1 :             Some(ShardCount(3)),
    8984            1 :         );
    8985              : 
    8986              :         // Initial+size split: tenant eligible for both will use the larger shard count.
    8987            1 :         assert_eq!(
    8988            1 :             Service::compute_split_shards(ShardSplitInputs {
    8989            1 :                 shard_count: ShardCount(1),
    8990            1 :                 max_logical_size: 10,
    8991            1 :                 split_threshold: 64,
    8992            1 :                 max_split_shards: 16,
    8993            1 :                 initial_split_threshold: 8,
    8994            1 :                 initial_split_shards: 4,
    8995            1 :             }),
    8996            1 :             Some(ShardCount(4)),
    8997            1 :         );
    8998            1 :         assert_eq!(
    8999            1 :             Service::compute_split_shards(ShardSplitInputs {
    9000            1 :                 shard_count: ShardCount(1),
    9001            1 :                 max_logical_size: 500,
    9002            1 :                 split_threshold: 64,
    9003            1 :                 max_split_shards: 16,
    9004            1 :                 initial_split_threshold: 8,
    9005            1 :                 initial_split_shards: 4,
    9006            1 :             }),
    9007            1 :             Some(ShardCount(8)),
    9008            1 :         );
    9009              : 
    9010              :         // Initial+size split: sharded tenant is only eligible for size-based split.
    9011            1 :         assert_eq!(
    9012            1 :             Service::compute_split_shards(ShardSplitInputs {
    9013            1 :                 shard_count: ShardCount(2),
    9014            1 :                 max_logical_size: 200,
    9015            1 :                 split_threshold: 64,
    9016            1 :                 max_split_shards: 16,
    9017            1 :                 initial_split_threshold: 8,
    9018            1 :                 initial_split_shards: 8,
    9019            1 :             }),
    9020            1 :             Some(ShardCount(4)),
    9021            1 :         );
    9022              : 
    9023              :         // Initial+size split: uses the larger shard count even with initial_split_threshold above
    9024              :         // split_threshold.
    9025            1 :         assert_eq!(
    9026            1 :             Service::compute_split_shards(ShardSplitInputs {
    9027            1 :                 shard_count: ShardCount(1),
    9028            1 :                 max_logical_size: 10,
    9029            1 :                 split_threshold: 4,
    9030            1 :                 max_split_shards: 16,
    9031            1 :                 initial_split_threshold: 8,
    9032            1 :                 initial_split_shards: 8,
    9033            1 :             }),
    9034            1 :             Some(ShardCount(8)),
    9035            1 :         );
    9036              : 
    9037              :         // Test backwards compatibility with production settings when initial/size-based splits were
    9038              :         // rolled out: a single split into 8 shards at 64 GB. Any already sharded tenants with <8
    9039              :         // shards will split according to split_threshold.
    9040            1 :         assert_eq!(
    9041            1 :             Service::compute_split_shards(ShardSplitInputs {
    9042            1 :                 shard_count: ShardCount(1),
    9043            1 :                 max_logical_size: 65,
    9044            1 :                 split_threshold: 64,
    9045            1 :                 max_split_shards: 8,
    9046            1 :                 initial_split_threshold: 64,
    9047            1 :                 initial_split_shards: 8,
    9048            1 :             }),
    9049            1 :             Some(ShardCount(8)),
    9050            1 :         );
    9051              : 
    9052            1 :         assert_eq!(
    9053            1 :             Service::compute_split_shards(ShardSplitInputs {
    9054            1 :                 shard_count: ShardCount(1),
    9055            1 :                 max_logical_size: 64,
    9056            1 :                 split_threshold: 64,
    9057            1 :                 max_split_shards: 8,
    9058            1 :                 initial_split_threshold: 64,
    9059            1 :                 initial_split_shards: 8,
    9060            1 :             }),
    9061            1 :             None,
    9062            1 :         );
    9063              : 
    9064            1 :         assert_eq!(
    9065            1 :             Service::compute_split_shards(ShardSplitInputs {
    9066            1 :                 shard_count: ShardCount(2),
    9067            1 :                 max_logical_size: 129,
    9068            1 :                 split_threshold: 64,
    9069            1 :                 max_split_shards: 8,
    9070            1 :                 initial_split_threshold: 64,
    9071            1 :                 initial_split_shards: 8,
    9072            1 :             }),
    9073            1 :             Some(ShardCount(4)),
    9074            1 :         );
    9075            1 :     }
    9076              : }
        

Generated by: LCOV version 2.1-beta