LCOV - code coverage report
Current view: top level - storage_controller/src - service.rs (source / functions) Coverage Total Hit
Test: 6a14b070dc6eeeeb359cfa8817925ac37a02fab4.info Lines: 5.8 % 5566 321
Test Date: 2025-03-31 22:46:13 Functions: 0.4 % 482 2

            Line data    Source code
       1              : pub mod chaos_injector;
       2              : mod context_iterator;
       3              : pub(crate) mod safekeeper_reconciler;
       4              : mod safekeeper_service;
       5              : 
       6              : use std::borrow::Cow;
       7              : use std::cmp::Ordering;
       8              : use std::collections::{BTreeMap, HashMap, HashSet};
       9              : use std::error::Error;
      10              : use std::num::NonZeroU32;
      11              : use std::ops::{Deref, DerefMut};
      12              : use std::path::PathBuf;
      13              : use std::str::FromStr;
      14              : use std::sync::Arc;
      15              : use std::time::{Duration, Instant, SystemTime};
      16              : 
      17              : use anyhow::Context;
      18              : use context_iterator::TenantShardContextIterator;
      19              : use control_plane::storage_controller::{
      20              :     AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
      21              : };
      22              : use diesel::result::DatabaseErrorKind;
      23              : use futures::StreamExt;
      24              : use futures::stream::FuturesUnordered;
      25              : use http_utils::error::ApiError;
      26              : use hyper::Uri;
      27              : use itertools::Itertools;
      28              : use pageserver_api::controller_api::{
      29              :     AvailabilityZone, MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability,
      30              :     NodeRegisterRequest, NodeSchedulingPolicy, NodeShard, NodeShardResponse, PlacementPolicy,
      31              :     ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse,
      32              :     TenantCreateRequest, TenantCreateResponse, TenantCreateResponseShard, TenantDescribeResponse,
      33              :     TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
      34              :     TenantShardMigrateRequest, TenantShardMigrateResponse,
      35              : };
      36              : use pageserver_api::models::{
      37              :     self, DetachBehavior, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease,
      38              :     PageserverUtilization, SecondaryProgress, ShardParameters, TenantConfig,
      39              :     TenantConfigPatchRequest, TenantConfigRequest, TenantLocationConfigRequest,
      40              :     TenantLocationConfigResponse, TenantShardLocation, TenantShardSplitRequest,
      41              :     TenantShardSplitResponse, TenantSorting, TenantTimeTravelRequest,
      42              :     TimelineArchivalConfigRequest, TimelineCreateRequest, TimelineCreateResponseStorcon,
      43              :     TimelineInfo, TopTenantShardItem, TopTenantShardsRequest,
      44              : };
      45              : use pageserver_api::shard::{
      46              :     ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
      47              : };
      48              : use pageserver_api::upcall_api::{
      49              :     ReAttachRequest, ReAttachResponse, ReAttachResponseTenant, ValidateRequest, ValidateResponse,
      50              :     ValidateResponseTenant,
      51              : };
      52              : use pageserver_client::{BlockUnblock, mgmt_api};
      53              : use reqwest::{Certificate, StatusCode};
      54              : use safekeeper_api::models::SafekeeperUtilization;
      55              : use safekeeper_reconciler::SafekeeperReconcilers;
      56              : use tokio::sync::TryAcquireError;
      57              : use tokio::sync::mpsc::error::TrySendError;
      58              : use tokio_util::sync::CancellationToken;
      59              : use tracing::{Instrument, debug, error, info, info_span, instrument, warn};
      60              : use utils::completion::Barrier;
      61              : use utils::generation::Generation;
      62              : use utils::id::{NodeId, TenantId, TimelineId};
      63              : use utils::lsn::Lsn;
      64              : use utils::sync::gate::Gate;
      65              : use utils::{failpoint_support, pausable_failpoint};
      66              : 
      67              : use crate::background_node_operations::{
      68              :     Drain, Fill, MAX_RECONCILES_PER_OPERATION, Operation, OperationError, OperationHandler,
      69              : };
      70              : use crate::compute_hook::{self, ComputeHook, NotifyError};
      71              : use crate::drain_utils::{self, TenantShardDrain, TenantShardIterator};
      72              : use crate::heartbeater::{Heartbeater, PageserverState, SafekeeperState};
      73              : use crate::id_lock_map::{
      74              :     IdLockMap, TracingExclusiveGuard, trace_exclusive_lock, trace_shared_lock,
      75              : };
      76              : use crate::leadership::Leadership;
      77              : use crate::metrics;
      78              : use crate::node::{AvailabilityTransition, Node};
      79              : use crate::pageserver_client::PageserverClient;
      80              : use crate::peer_client::GlobalObservedState;
      81              : use crate::persistence::split_state::SplitState;
      82              : use crate::persistence::{
      83              :     AbortShardSplitStatus, ControllerPersistence, DatabaseError, DatabaseResult,
      84              :     MetadataHealthPersistence, Persistence, ShardGenerationState, TenantFilter,
      85              :     TenantShardPersistence,
      86              : };
      87              : use crate::reconciler::{
      88              :     ReconcileError, ReconcileUnits, ReconcilerConfig, ReconcilerConfigBuilder, ReconcilerPriority,
      89              :     attached_location_conf,
      90              : };
      91              : use crate::safekeeper::Safekeeper;
      92              : use crate::scheduler::{
      93              :     AttachedShardTag, MaySchedule, ScheduleContext, ScheduleError, ScheduleMode, Scheduler,
      94              : };
      95              : use crate::tenant_shard::{
      96              :     IntentState, MigrateAttachment, ObservedState, ObservedStateDelta, ObservedStateLocation,
      97              :     ReconcileNeeded, ReconcileResult, ReconcileWaitError, ReconcilerStatus, ReconcilerWaiter,
      98              :     ScheduleOptimization, ScheduleOptimizationAction, TenantShard,
      99              : };
     100              : 
     101              : const WAITER_FILL_DRAIN_POLL_TIMEOUT: Duration = Duration::from_millis(500);
     102              : 
     103              : // For operations that should be quick, like attaching a new tenant
     104              : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
     105              : 
     106              : // For operations that might be slow, like migrating a tenant with
     107              : // some data in it.
     108              : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     109              : 
     110              : // If we receive a call using Secondary mode initially, it will omit generation.  We will initialize
     111              : // tenant shards into this generation, and as long as it remains in this generation, we will accept
     112              : // input generation from future requests as authoritative.
     113              : const INITIAL_GENERATION: Generation = Generation::new(0);
     114              : 
     115              : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
     116              : /// up on unresponsive pageservers and proceed.
     117              : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     118              : 
     119              : /// How long a node may be unresponsive to heartbeats before we declare it offline.
     120              : /// This must be long enough to cover node restarts as well as normal operations: in future
     121              : pub const MAX_OFFLINE_INTERVAL_DEFAULT: Duration = Duration::from_secs(30);
     122              : 
     123              : /// How long a node may be unresponsive to heartbeats during start up before we declare it
     124              : /// offline.
     125              : ///
     126              : /// This is much more lenient than [`MAX_OFFLINE_INTERVAL_DEFAULT`] since the pageserver's
     127              : /// handling of the re-attach response may take a long time and blocks heartbeats from
     128              : /// being handled on the pageserver side.
     129              : pub const MAX_WARMING_UP_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
     130              : 
     131              : /// How often to send heartbeats to registered nodes?
     132              : pub const HEARTBEAT_INTERVAL_DEFAULT: Duration = Duration::from_secs(5);
     133              : 
     134              : /// How long is too long for a reconciliation?
     135              : pub const LONG_RECONCILE_THRESHOLD_DEFAULT: Duration = Duration::from_secs(120);
     136              : 
     137              : #[derive(Clone, strum_macros::Display)]
     138              : enum TenantOperations {
     139              :     Create,
     140              :     LocationConfig,
     141              :     ConfigSet,
     142              :     ConfigPatch,
     143              :     TimeTravelRemoteStorage,
     144              :     Delete,
     145              :     UpdatePolicy,
     146              :     ShardSplit,
     147              :     SecondaryDownload,
     148              :     TimelineCreate,
     149              :     TimelineDelete,
     150              :     AttachHook,
     151              :     TimelineArchivalConfig,
     152              :     TimelineDetachAncestor,
     153              :     TimelineGcBlockUnblock,
     154              :     DropDetached,
     155              :     DownloadHeatmapLayers,
     156              :     TimelineLsnLease,
     157              : }
     158              : 
     159              : #[derive(Clone, strum_macros::Display)]
     160              : enum NodeOperations {
     161              :     Register,
     162              :     Configure,
     163              :     Delete,
     164              : }
     165              : 
     166              : /// The leadership status for the storage controller process.
     167              : /// Allowed transitions are:
     168              : /// 1. Leader -> SteppedDown
     169              : /// 2. Candidate -> Leader
     170              : #[derive(
     171              :     Eq,
     172              :     PartialEq,
     173              :     Copy,
     174              :     Clone,
     175              :     strum_macros::Display,
     176            0 :     strum_macros::EnumIter,
     177              :     measured::FixedCardinalityLabel,
     178              : )]
     179              : #[strum(serialize_all = "snake_case")]
     180              : pub(crate) enum LeadershipStatus {
     181              :     /// This is the steady state where the storage controller can produce
     182              :     /// side effects in the cluster.
     183              :     Leader,
     184              :     /// We've been notified to step down by another candidate. No reconciliations
     185              :     /// take place in this state.
     186              :     SteppedDown,
     187              :     /// Initial state for a new storage controller instance. Will attempt to assume leadership.
     188              :     #[allow(unused)]
     189              :     Candidate,
     190              : }
     191              : 
     192              : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
     193              : pub const PRIORITY_RECONCILER_CONCURRENCY_DEFAULT: usize = 256;
     194              : 
     195              : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
     196              : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
     197              : // than they're being pushed onto the queue.
     198              : const MAX_DELAYED_RECONCILES: usize = 10000;
     199              : 
     200              : // Top level state available to all HTTP handlers
     201              : struct ServiceState {
     202              :     leadership_status: LeadershipStatus,
     203              : 
     204              :     tenants: BTreeMap<TenantShardId, TenantShard>,
     205              : 
     206              :     nodes: Arc<HashMap<NodeId, Node>>,
     207              : 
     208              :     safekeepers: Arc<HashMap<NodeId, Safekeeper>>,
     209              : 
     210              :     safekeeper_reconcilers: SafekeeperReconcilers,
     211              : 
     212              :     scheduler: Scheduler,
     213              : 
     214              :     /// Ongoing background operation on the cluster if any is running.
     215              :     /// Note that only one such operation may run at any given time,
     216              :     /// hence the type choice.
     217              :     ongoing_operation: Option<OperationHandler>,
     218              : 
     219              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     220              :     delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     221              : }
     222              : 
     223              : /// Transform an error from a pageserver into an error to return to callers of a storage
     224              : /// controller API.
     225            0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
     226            0 :     match e {
     227            0 :         mgmt_api::Error::SendRequest(e) => {
     228            0 :             // Presume errors sending requests are connectivity/availability issues
     229            0 :             ApiError::ResourceUnavailable(format!("{node} error sending request: {e}").into())
     230              :         }
     231            0 :         mgmt_api::Error::ReceiveErrorBody(str) => {
     232            0 :             // Presume errors receiving body are connectivity/availability issues
     233            0 :             ApiError::ResourceUnavailable(
     234            0 :                 format!("{node} error receiving error body: {str}").into(),
     235            0 :             )
     236              :         }
     237            0 :         mgmt_api::Error::ReceiveBody(err) if err.is_decode() => {
     238            0 :             // Return 500 for decoding errors.
     239            0 :             ApiError::InternalServerError(anyhow::Error::from(err).context("error decoding body"))
     240              :         }
     241            0 :         mgmt_api::Error::ReceiveBody(err) => {
     242            0 :             // Presume errors receiving body are connectivity/availability issues except for decoding errors
     243            0 :             let src_str = err.source().map(|e| e.to_string()).unwrap_or_default();
     244            0 :             ApiError::ResourceUnavailable(
     245            0 :                 format!("{node} error receiving error body: {err} {}", src_str).into(),
     246            0 :             )
     247              :         }
     248            0 :         mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
     249            0 :             ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
     250              :         }
     251            0 :         mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
     252            0 :             ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
     253              :         }
     254            0 :         mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
     255            0 :         | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
     256              :             // Auth errors talking to a pageserver are not auth errors for the caller: they are
     257              :             // internal server errors, showing that something is wrong with the pageserver or
     258              :             // storage controller's auth configuration.
     259            0 :             ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
     260              :         }
     261            0 :         mgmt_api::Error::ApiError(status @ StatusCode::TOO_MANY_REQUESTS, msg) => {
     262            0 :             // Pass through 429 errors: if pageserver is asking us to wait + retry, we in
     263            0 :             // turn ask our clients to wait + retry
     264            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     265              :         }
     266            0 :         mgmt_api::Error::ApiError(status, msg) => {
     267            0 :             // Presume general case of pageserver API errors is that we tried to do something
     268            0 :             // that can't be done right now.
     269            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     270              :         }
     271            0 :         mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
     272            0 :         mgmt_api::Error::Timeout(e) => ApiError::Timeout(e.into()),
     273              :     }
     274            0 : }
     275              : 
     276              : impl ServiceState {
     277            0 :     fn new(
     278            0 :         nodes: HashMap<NodeId, Node>,
     279            0 :         safekeepers: HashMap<NodeId, Safekeeper>,
     280            0 :         tenants: BTreeMap<TenantShardId, TenantShard>,
     281            0 :         scheduler: Scheduler,
     282            0 :         delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     283            0 :         initial_leadership_status: LeadershipStatus,
     284            0 :         reconcilers_cancel: CancellationToken,
     285            0 :     ) -> Self {
     286            0 :         metrics::update_leadership_status(initial_leadership_status);
     287            0 : 
     288            0 :         Self {
     289            0 :             leadership_status: initial_leadership_status,
     290            0 :             tenants,
     291            0 :             nodes: Arc::new(nodes),
     292            0 :             safekeepers: Arc::new(safekeepers),
     293            0 :             safekeeper_reconcilers: SafekeeperReconcilers::new(reconcilers_cancel),
     294            0 :             scheduler,
     295            0 :             ongoing_operation: None,
     296            0 :             delayed_reconcile_rx,
     297            0 :         }
     298            0 :     }
     299              : 
     300            0 :     fn parts_mut(
     301            0 :         &mut self,
     302            0 :     ) -> (
     303            0 :         &mut Arc<HashMap<NodeId, Node>>,
     304            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     305            0 :         &mut Scheduler,
     306            0 :     ) {
     307            0 :         (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
     308            0 :     }
     309              : 
     310              :     #[allow(clippy::type_complexity)]
     311            0 :     fn parts_mut_sk(
     312            0 :         &mut self,
     313            0 :     ) -> (
     314            0 :         &mut Arc<HashMap<NodeId, Node>>,
     315            0 :         &mut Arc<HashMap<NodeId, Safekeeper>>,
     316            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     317            0 :         &mut Scheduler,
     318            0 :     ) {
     319            0 :         (
     320            0 :             &mut self.nodes,
     321            0 :             &mut self.safekeepers,
     322            0 :             &mut self.tenants,
     323            0 :             &mut self.scheduler,
     324            0 :         )
     325            0 :     }
     326              : 
     327            0 :     fn get_leadership_status(&self) -> LeadershipStatus {
     328            0 :         self.leadership_status
     329            0 :     }
     330              : 
     331            0 :     fn step_down(&mut self) {
     332            0 :         self.leadership_status = LeadershipStatus::SteppedDown;
     333            0 :         metrics::update_leadership_status(self.leadership_status);
     334            0 :     }
     335              : 
     336            0 :     fn become_leader(&mut self) {
     337            0 :         self.leadership_status = LeadershipStatus::Leader;
     338            0 :         metrics::update_leadership_status(self.leadership_status);
     339            0 :     }
     340              : }
     341              : 
     342              : #[derive(Clone)]
     343              : pub struct Config {
     344              :     // All pageservers managed by one instance of this service must have
     345              :     // the same public key.  This JWT token will be used to authenticate
     346              :     // this service to the pageservers it manages.
     347              :     pub pageserver_jwt_token: Option<String>,
     348              : 
     349              :     // All safekeepers managed by one instance of this service must have
     350              :     // the same public key. This JWT token will be used to authenticate
     351              :     // this service to the safekeepers it manages.
     352              :     pub safekeeper_jwt_token: Option<String>,
     353              : 
     354              :     // This JWT token will be used to authenticate this service to the control plane.
     355              :     pub control_plane_jwt_token: Option<String>,
     356              : 
     357              :     // This JWT token will be used to authenticate with other storage controller instances
     358              :     pub peer_jwt_token: Option<String>,
     359              : 
     360              :     /// Where the compute hook should send notifications of pageserver attachment locations
     361              :     /// (this URL points to the control plane in prod). If this is None, the compute hook will
     362              :     /// assume it is running in a test environment and try to update neon_local.
     363              :     pub compute_hook_url: Option<String>,
     364              : 
     365              :     /// Prefix for storage API endpoints of the control plane. We use this prefix to compute
     366              :     /// URLs that we use to send pageserver and safekeeper attachment locations.
     367              :     /// If this is None, the compute hook will assume it is running in a test environment
     368              :     /// and try to invoke neon_local instead.
     369              :     ///
     370              :     /// For now, there is also `compute_hook_url` which allows configuration of the pageserver
     371              :     /// specific endpoint, but it is in the process of being phased out.
     372              :     pub control_plane_url: Option<String>,
     373              : 
     374              :     /// Grace period within which a pageserver does not respond to heartbeats, but is still
     375              :     /// considered active. Once the grace period elapses, the next heartbeat failure will
     376              :     /// mark the pagseserver offline.
     377              :     pub max_offline_interval: Duration,
     378              : 
     379              :     /// Extended grace period within which pageserver may not respond to heartbeats.
     380              :     /// This extended grace period kicks in after the node has been drained for restart
     381              :     /// and/or upon handling the re-attach request from a node.
     382              :     pub max_warming_up_interval: Duration,
     383              : 
     384              :     /// How many normal-priority Reconcilers may be spawned concurrently
     385              :     pub reconciler_concurrency: usize,
     386              : 
     387              :     /// How many high-priority Reconcilers may be spawned concurrently
     388              :     pub priority_reconciler_concurrency: usize,
     389              : 
     390              :     /// How many API requests per second to allow per tenant, across all
     391              :     /// tenant-scoped API endpoints. Further API requests queue until ready.
     392              :     pub tenant_rate_limit: NonZeroU32,
     393              : 
     394              :     /// If a tenant shard's largest timeline (max_logical_size) exceeds this value, all tenant
     395              :     /// shards will be split in 2 until they fall below split_threshold (up to max_split_shards).
     396              :     ///
     397              :     /// This will greedily split into as many shards as necessary to fall below split_threshold, as
     398              :     /// powers of 2: if a tenant shard is 7 times larger than split_threshold, it will split into 8
     399              :     /// immediately, rather than first 2 then 4 then 8.
     400              :     ///
     401              :     /// None or 0 disables auto-splitting.
     402              :     ///
     403              :     /// TODO: consider using total logical size of all timelines instead.
     404              :     pub split_threshold: Option<u64>,
     405              : 
     406              :     /// The maximum number of shards a tenant can be split into during autosplits. Does not affect
     407              :     /// manual split requests. 0 or 1 disables autosplits, as we already have 1 shard.
     408              :     pub max_split_shards: u8,
     409              : 
     410              :     /// The size at which an unsharded tenant should initially split. Ingestion is significantly
     411              :     /// faster with multiple shards, so eagerly splitting below split_threshold will typically speed
     412              :     /// up initial ingestion of large tenants.
     413              :     ///
     414              :     /// This should be below split_threshold, but it is not required. If both split_threshold and
     415              :     /// initial_split_threshold qualify, the largest number of target shards will be used.
     416              :     ///
     417              :     /// Does not apply to already sharded tenants: changing initial_split_threshold or
     418              :     /// initial_split_shards is not retroactive for already-sharded tenants.
     419              :     ///
     420              :     /// None or 0 disables initial splits.
     421              :     pub initial_split_threshold: Option<u64>,
     422              : 
     423              :     /// The number of shards to split into when reaching initial_split_threshold. Will
     424              :     /// be clamped to max_split_shards.
     425              :     ///
     426              :     /// 0 or 1 disables initial splits. Has no effect if initial_split_threshold is disabled.
     427              :     pub initial_split_shards: u8,
     428              : 
     429              :     // TODO: make this cfg(feature  = "testing")
     430              :     pub neon_local_repo_dir: Option<PathBuf>,
     431              : 
     432              :     // Maximum acceptable download lag for the secondary location
     433              :     // while draining a node. If the secondary location is lagging
     434              :     // by more than the configured amount, then the secondary is not
     435              :     // upgraded to primary.
     436              :     pub max_secondary_lag_bytes: Option<u64>,
     437              : 
     438              :     pub heartbeat_interval: Duration,
     439              : 
     440              :     pub address_for_peers: Option<Uri>,
     441              : 
     442              :     pub start_as_candidate: bool,
     443              : 
     444              :     pub long_reconcile_threshold: Duration,
     445              : 
     446              :     pub use_https_pageserver_api: bool,
     447              : 
     448              :     pub use_https_safekeeper_api: bool,
     449              : 
     450              :     pub ssl_ca_certs: Vec<Certificate>,
     451              : 
     452              :     pub timelines_onto_safekeepers: bool,
     453              : 
     454              :     pub use_local_compute_notifications: bool,
     455              : }
     456              : 
     457              : impl From<DatabaseError> for ApiError {
     458            0 :     fn from(err: DatabaseError) -> ApiError {
     459            0 :         match err {
     460            0 :             DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
     461              :             // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
     462              :             DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
     463            0 :                 ApiError::ShuttingDown
     464              :             }
     465            0 :             DatabaseError::Logical(reason) | DatabaseError::Migration(reason) => {
     466            0 :                 ApiError::InternalServerError(anyhow::anyhow!(reason))
     467              :             }
     468              :         }
     469            0 :     }
     470              : }
     471              : 
     472              : enum InitialShardScheduleOutcome {
     473              :     Scheduled(TenantCreateResponseShard),
     474              :     NotScheduled,
     475              :     ShardScheduleError(ScheduleError),
     476              : }
     477              : 
     478              : pub struct Service {
     479              :     inner: Arc<std::sync::RwLock<ServiceState>>,
     480              :     config: Config,
     481              :     persistence: Arc<Persistence>,
     482              :     compute_hook: Arc<ComputeHook>,
     483              :     result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResultRequest>,
     484              : 
     485              :     heartbeater_ps: Heartbeater<Node, PageserverState>,
     486              :     heartbeater_sk: Heartbeater<Safekeeper, SafekeeperState>,
     487              : 
     488              :     // Channel for background cleanup from failed operations that require cleanup, such as shard split
     489              :     abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
     490              : 
     491              :     // Locking on a tenant granularity (covers all shards in the tenant):
     492              :     // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
     493              :     // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
     494              :     tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
     495              : 
     496              :     // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
     497              :     // that transition it to/from Active.
     498              :     node_op_locks: IdLockMap<NodeId, NodeOperations>,
     499              : 
     500              :     // Limit how many Reconcilers we will spawn concurrently for normal-priority tasks such as background reconciliations
     501              :     // and reconciliation on startup.
     502              :     reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     503              : 
     504              :     // Limit how many Reconcilers we will spawn concurrently for high-priority tasks such as tenant/timeline CRUD, which
     505              :     // a human user might be waiting for.
     506              :     priority_reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     507              : 
     508              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     509              :     /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
     510              :     ///
     511              :     /// Note that this state logically lives inside ServiceState, but carrying Sender here makes the code simpler
     512              :     /// by avoiding needing a &mut ref to something inside the ServiceState.  This could be optimized to
     513              :     /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
     514              :     delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
     515              : 
     516              :     // Process shutdown will fire this token
     517              :     cancel: CancellationToken,
     518              : 
     519              :     // Child token of [`Service::cancel`] used by reconcilers
     520              :     reconcilers_cancel: CancellationToken,
     521              : 
     522              :     // Background tasks will hold this gate
     523              :     gate: Gate,
     524              : 
     525              :     // Reconcilers background tasks will hold this gate
     526              :     reconcilers_gate: Gate,
     527              : 
     528              :     /// This waits for initial reconciliation with pageservers to complete.  Until this barrier
     529              :     /// passes, it isn't safe to do any actions that mutate tenants.
     530              :     pub(crate) startup_complete: Barrier,
     531              : 
     532              :     /// HTTP client with proper CA certs.
     533              :     http_client: reqwest::Client,
     534              : }
     535              : 
     536              : impl From<ReconcileWaitError> for ApiError {
     537            0 :     fn from(value: ReconcileWaitError) -> Self {
     538            0 :         match value {
     539            0 :             ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
     540            0 :             e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
     541            0 :             e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
     542              :         }
     543            0 :     }
     544              : }
     545              : 
     546              : impl From<OperationError> for ApiError {
     547            0 :     fn from(value: OperationError) -> Self {
     548            0 :         match value {
     549            0 :             OperationError::NodeStateChanged(err) | OperationError::FinalizeError(err) => {
     550            0 :                 ApiError::InternalServerError(anyhow::anyhow!(err))
     551              :             }
     552            0 :             OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()),
     553              :         }
     554            0 :     }
     555              : }
     556              : 
     557              : #[allow(clippy::large_enum_variant)]
     558              : enum TenantCreateOrUpdate {
     559              :     Create(TenantCreateRequest),
     560              :     Update(Vec<ShardUpdate>),
     561              : }
     562              : 
     563              : struct ShardSplitParams {
     564              :     old_shard_count: ShardCount,
     565              :     new_shard_count: ShardCount,
     566              :     new_stripe_size: Option<ShardStripeSize>,
     567              :     targets: Vec<ShardSplitTarget>,
     568              :     policy: PlacementPolicy,
     569              :     config: TenantConfig,
     570              :     shard_ident: ShardIdentity,
     571              :     preferred_az_id: Option<AvailabilityZone>,
     572              : }
     573              : 
     574              : // When preparing for a shard split, we may either choose to proceed with the split,
     575              : // or find that the work is already done and return NoOp.
     576              : enum ShardSplitAction {
     577              :     Split(Box<ShardSplitParams>),
     578              :     NoOp(TenantShardSplitResponse),
     579              : }
     580              : 
     581              : // A parent shard which will be split
     582              : struct ShardSplitTarget {
     583              :     parent_id: TenantShardId,
     584              :     node: Node,
     585              :     child_ids: Vec<TenantShardId>,
     586              : }
     587              : 
     588              : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
     589              : /// might not be available.  We therefore use a queue of abort operations processed in the background.
     590              : struct TenantShardSplitAbort {
     591              :     tenant_id: TenantId,
     592              :     /// The target values from the request that failed
     593              :     new_shard_count: ShardCount,
     594              :     new_stripe_size: Option<ShardStripeSize>,
     595              :     /// Until this abort op is complete, no other operations may be done on the tenant
     596              :     _tenant_lock: TracingExclusiveGuard<TenantOperations>,
     597              : }
     598              : 
     599              : #[derive(thiserror::Error, Debug)]
     600              : enum TenantShardSplitAbortError {
     601              :     #[error(transparent)]
     602              :     Database(#[from] DatabaseError),
     603              :     #[error(transparent)]
     604              :     Remote(#[from] mgmt_api::Error),
     605              :     #[error("Unavailable")]
     606              :     Unavailable,
     607              : }
     608              : 
     609              : /// Inputs for computing a target shard count for a tenant.
     610              : struct ShardSplitInputs {
     611              :     /// Current shard count.
     612              :     shard_count: ShardCount,
     613              :     /// Total size of largest timeline summed across all shards.
     614              :     max_logical_size: u64,
     615              :     /// Size-based split threshold. Zero if size-based splits are disabled.
     616              :     split_threshold: u64,
     617              :     /// Upper bound on target shards. 0 or 1 disables splits.
     618              :     max_split_shards: u8,
     619              :     /// Initial split threshold. Zero if initial splits are disabled.
     620              :     initial_split_threshold: u64,
     621              :     /// Number of shards for initial splits. 0 or 1 disables initial splits.
     622              :     initial_split_shards: u8,
     623              : }
     624              : 
     625              : struct ShardUpdate {
     626              :     tenant_shard_id: TenantShardId,
     627              :     placement_policy: PlacementPolicy,
     628              :     tenant_config: TenantConfig,
     629              : 
     630              :     /// If this is None, generation is not updated.
     631              :     generation: Option<Generation>,
     632              : 
     633              :     /// If this is None, scheduling policy is not updated.
     634              :     scheduling_policy: Option<ShardSchedulingPolicy>,
     635              : }
     636              : 
     637              : enum StopReconciliationsReason {
     638              :     ShuttingDown,
     639              :     SteppingDown,
     640              : }
     641              : 
     642              : impl std::fmt::Display for StopReconciliationsReason {
     643            0 :     fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result {
     644            0 :         let s = match self {
     645            0 :             Self::ShuttingDown => "Shutting down",
     646            0 :             Self::SteppingDown => "Stepping down",
     647              :         };
     648            0 :         write!(writer, "{}", s)
     649            0 :     }
     650              : }
     651              : 
     652              : pub(crate) enum ReconcileResultRequest {
     653              :     ReconcileResult(ReconcileResult),
     654              :     Stop,
     655              : }
     656              : 
     657              : #[derive(Clone)]
     658              : struct MutationLocation {
     659              :     node: Node,
     660              :     generation: Generation,
     661              : }
     662              : 
     663              : #[derive(Clone)]
     664              : struct ShardMutationLocations {
     665              :     latest: MutationLocation,
     666              :     other: Vec<MutationLocation>,
     667              : }
     668              : 
     669              : #[derive(Default, Clone)]
     670              : struct TenantMutationLocations(BTreeMap<TenantShardId, ShardMutationLocations>);
     671              : 
     672              : impl Service {
     673            0 :     pub fn get_config(&self) -> &Config {
     674            0 :         &self.config
     675            0 :     }
     676              : 
     677            0 :     pub fn get_http_client(&self) -> &reqwest::Client {
     678            0 :         &self.http_client
     679            0 :     }
     680              : 
     681              :     /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
     682              :     /// view of the world, and determine which pageservers are responsive.
     683              :     #[instrument(skip_all)]
     684              :     async fn startup_reconcile(
     685              :         self: &Arc<Service>,
     686              :         current_leader: Option<ControllerPersistence>,
     687              :         leader_step_down_state: Option<GlobalObservedState>,
     688              :         bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
     689              :             Result<(), (TenantShardId, NotifyError)>,
     690              :         >,
     691              :     ) {
     692              :         // Startup reconciliation does I/O to other services: whether they
     693              :         // are responsive or not, we should aim to finish within our deadline, because:
     694              :         // - If we don't, a k8s readiness hook watching /ready will kill us.
     695              :         // - While we're waiting for startup reconciliation, we are not fully
     696              :         //   available for end user operations like creating/deleting tenants and timelines.
     697              :         //
     698              :         // We set multiple deadlines to break up the time available between the phases of work: this is
     699              :         // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
     700              :         let start_at = Instant::now();
     701              :         let node_scan_deadline = start_at
     702              :             .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
     703              :             .expect("Reconcile timeout is a modest constant");
     704              : 
     705              :         let observed = if let Some(state) = leader_step_down_state {
     706              :             tracing::info!(
     707              :                 "Using observed state received from leader at {}",
     708              :                 current_leader.as_ref().unwrap().address
     709              :             );
     710              : 
     711              :             state
     712              :         } else {
     713              :             self.build_global_observed_state(node_scan_deadline).await
     714              :         };
     715              : 
     716              :         // Accumulate a list of any tenant locations that ought to be detached
     717              :         let mut cleanup = Vec::new();
     718              : 
     719              :         // Send initial heartbeat requests to all nodes loaded from the database
     720              :         let all_nodes = {
     721              :             let locked = self.inner.read().unwrap();
     722              :             locked.nodes.clone()
     723              :         };
     724              :         let (mut nodes_online, mut sks_online) =
     725              :             self.initial_heartbeat_round(all_nodes.keys()).await;
     726              : 
     727              :         // List of tenants for which we will attempt to notify compute of their location at startup
     728              :         let mut compute_notifications = Vec::new();
     729              : 
     730              :         // Populate intent and observed states for all tenants, based on reported state on pageservers
     731              :         tracing::info!("Populating tenant shards' states from initial pageserver scan...");
     732              :         let shard_count = {
     733              :             let mut locked = self.inner.write().unwrap();
     734              :             let (nodes, safekeepers, tenants, scheduler) = locked.parts_mut_sk();
     735              : 
     736              :             // Mark nodes online if they responded to us: nodes are offline by default after a restart.
     737              :             let mut new_nodes = (**nodes).clone();
     738              :             for (node_id, node) in new_nodes.iter_mut() {
     739              :                 if let Some(utilization) = nodes_online.remove(node_id) {
     740              :                     node.set_availability(NodeAvailability::Active(utilization));
     741              :                     scheduler.node_upsert(node);
     742              :                 }
     743              :             }
     744              :             *nodes = Arc::new(new_nodes);
     745              : 
     746              :             let mut new_sks = (**safekeepers).clone();
     747              :             for (node_id, node) in new_sks.iter_mut() {
     748              :                 if let Some((utilization, last_seen_at)) = sks_online.remove(node_id) {
     749              :                     node.set_availability(SafekeeperState::Available {
     750              :                         utilization,
     751              :                         last_seen_at,
     752              :                     });
     753              :                 }
     754              :             }
     755              :             *safekeepers = Arc::new(new_sks);
     756              : 
     757              :             for (tenant_shard_id, observed_state) in observed.0 {
     758              :                 let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
     759              :                     for node_id in observed_state.locations.keys() {
     760              :                         cleanup.push((tenant_shard_id, *node_id));
     761              :                     }
     762              : 
     763              :                     continue;
     764              :                 };
     765              : 
     766              :                 tenant_shard.observed = observed_state;
     767              :             }
     768              : 
     769              :             // Populate each tenant's intent state
     770              :             let mut schedule_context = ScheduleContext::default();
     771              :             for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
     772              :                 if tenant_shard_id.shard_number == ShardNumber(0) {
     773              :                     // Reset scheduling context each time we advance to the next Tenant
     774              :                     schedule_context = ScheduleContext::default();
     775              :                 }
     776              : 
     777              :                 tenant_shard.intent_from_observed(scheduler);
     778              :                 if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
     779              :                     // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
     780              :                     // not enough pageservers are available.  The tenant may well still be available
     781              :                     // to clients.
     782              :                     tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
     783              :                 } else {
     784              :                     // If we're both intending and observed to be attached at a particular node, we will
     785              :                     // emit a compute notification for this. In the case where our observed state does not
     786              :                     // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
     787              :                     if let Some(attached_at) = tenant_shard.stably_attached() {
     788              :                         compute_notifications.push(compute_hook::ShardUpdate {
     789              :                             tenant_shard_id: *tenant_shard_id,
     790              :                             node_id: attached_at,
     791              :                             stripe_size: tenant_shard.shard.stripe_size,
     792              :                             preferred_az: tenant_shard
     793              :                                 .preferred_az()
     794            0 :                                 .map(|az| Cow::Owned(az.clone())),
     795              :                         });
     796              :                     }
     797              :                 }
     798              :             }
     799              : 
     800              :             tenants.len()
     801              :         };
     802              : 
     803              :         // Before making any obeservable changes to the cluster, persist self
     804              :         // as leader in database and memory.
     805              :         let leadership = Leadership::new(
     806              :             self.persistence.clone(),
     807              :             self.config.clone(),
     808              :             self.cancel.child_token(),
     809              :         );
     810              : 
     811              :         if let Err(e) = leadership.become_leader(current_leader).await {
     812              :             tracing::error!("Failed to persist self as leader: {e}. Aborting start-up ...");
     813              :             std::process::exit(1);
     814              :         }
     815              : 
     816              :         let safekeepers = self.inner.read().unwrap().safekeepers.clone();
     817              :         let sk_schedule_requests =
     818              :             match safekeeper_reconciler::load_schedule_requests(self, &safekeepers).await {
     819              :                 Ok(v) => v,
     820              :                 Err(e) => {
     821              :                     tracing::warn!(
     822              :                         "Failed to load safekeeper pending ops at startup: {e}." // Don't abort for now: " Aborting start-up..."
     823              :                     );
     824              :                     // std::process::exit(1);
     825              :                     Vec::new()
     826              :                 }
     827              :             };
     828              : 
     829              :         {
     830              :             let mut locked = self.inner.write().unwrap();
     831              :             locked.become_leader();
     832              : 
     833              :             locked
     834              :                 .safekeeper_reconcilers
     835              :                 .schedule_request_vec(self, sk_schedule_requests);
     836              :         }
     837              : 
     838              :         // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
     839              :         // generation_pageserver in the database.
     840              : 
     841              :         // Emit compute hook notifications for all tenants which are already stably attached.  Other tenants
     842              :         // will emit compute hook notifications when they reconcile.
     843              :         //
     844              :         // Ordering: our calls to notify_background synchronously establish a relative order for these notifications vs. any later
     845              :         // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
     846              :         // calls will be correctly ordered wrt these.
     847              :         //
     848              :         // Concurrency: we call notify_background for all tenants, which will create O(N) tokio tasks, but almost all of them
     849              :         // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
     850              :         // unit and start doing I/O.
     851              :         tracing::info!(
     852              :             "Sending {} compute notifications",
     853              :             compute_notifications.len()
     854              :         );
     855              :         self.compute_hook.notify_background(
     856              :             compute_notifications,
     857              :             bg_compute_notify_result_tx.clone(),
     858              :             &self.cancel,
     859              :         );
     860              : 
     861              :         // Finally, now that the service is up and running, launch reconcile operations for any tenants
     862              :         // which require it: under normal circumstances this should only include tenants that were in some
     863              :         // transient state before we restarted, or any tenants whose compute hooks failed above.
     864              :         tracing::info!("Checking for shards in need of reconciliation...");
     865              :         let reconcile_tasks = self.reconcile_all();
     866              :         // We will not wait for these reconciliation tasks to run here: we're now done with startup and
     867              :         // normal operations may proceed.
     868              : 
     869              :         // Clean up any tenants that were found on pageservers but are not known to us.  Do this in the
     870              :         // background because it does not need to complete in order to proceed with other work.
     871              :         if !cleanup.is_empty() {
     872              :             tracing::info!("Cleaning up {} locations in the background", cleanup.len());
     873              :             tokio::task::spawn({
     874              :                 let cleanup_self = self.clone();
     875            0 :                 async move { cleanup_self.cleanup_locations(cleanup).await }
     876              :             });
     877              :         }
     878              : 
     879              :         tracing::info!(
     880              :             "Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)"
     881              :         );
     882              :     }
     883              : 
     884            0 :     async fn initial_heartbeat_round<'a>(
     885            0 :         &self,
     886            0 :         node_ids: impl Iterator<Item = &'a NodeId>,
     887            0 :     ) -> (
     888            0 :         HashMap<NodeId, PageserverUtilization>,
     889            0 :         HashMap<NodeId, (SafekeeperUtilization, Instant)>,
     890            0 :     ) {
     891            0 :         assert!(!self.startup_complete.is_ready());
     892              : 
     893            0 :         let all_nodes = {
     894            0 :             let locked = self.inner.read().unwrap();
     895            0 :             locked.nodes.clone()
     896            0 :         };
     897            0 : 
     898            0 :         let mut nodes_to_heartbeat = HashMap::new();
     899            0 :         for node_id in node_ids {
     900            0 :             match all_nodes.get(node_id) {
     901            0 :                 Some(node) => {
     902            0 :                     nodes_to_heartbeat.insert(*node_id, node.clone());
     903            0 :                 }
     904              :                 None => {
     905            0 :                     tracing::warn!("Node {node_id} was removed during start-up");
     906              :                 }
     907              :             }
     908              :         }
     909              : 
     910            0 :         let all_sks = {
     911            0 :             let locked = self.inner.read().unwrap();
     912            0 :             locked.safekeepers.clone()
     913            0 :         };
     914            0 : 
     915            0 :         tracing::info!("Sending initial heartbeats...");
     916            0 :         let (res_ps, res_sk) = tokio::join!(
     917            0 :             self.heartbeater_ps.heartbeat(Arc::new(nodes_to_heartbeat)),
     918            0 :             self.heartbeater_sk.heartbeat(all_sks)
     919            0 :         );
     920              : 
     921            0 :         let mut online_nodes = HashMap::new();
     922            0 :         if let Ok(deltas) = res_ps {
     923            0 :             for (node_id, status) in deltas.0 {
     924            0 :                 match status {
     925            0 :                     PageserverState::Available { utilization, .. } => {
     926            0 :                         online_nodes.insert(node_id, utilization);
     927            0 :                     }
     928            0 :                     PageserverState::Offline => {}
     929              :                     PageserverState::WarmingUp { .. } => {
     930            0 :                         unreachable!("Nodes are never marked warming-up during startup reconcile")
     931              :                     }
     932              :                 }
     933              :             }
     934            0 :         }
     935              : 
     936            0 :         let mut online_sks = HashMap::new();
     937            0 :         if let Ok(deltas) = res_sk {
     938            0 :             for (node_id, status) in deltas.0 {
     939            0 :                 match status {
     940              :                     SafekeeperState::Available {
     941            0 :                         utilization,
     942            0 :                         last_seen_at,
     943            0 :                     } => {
     944            0 :                         online_sks.insert(node_id, (utilization, last_seen_at));
     945            0 :                     }
     946            0 :                     SafekeeperState::Offline => {}
     947              :                 }
     948              :             }
     949            0 :         }
     950              : 
     951            0 :         (online_nodes, online_sks)
     952            0 :     }
     953              : 
     954              :     /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
     955              :     ///
     956              :     /// The result includes only nodes which responded within the deadline
     957            0 :     async fn scan_node_locations(
     958            0 :         &self,
     959            0 :         deadline: Instant,
     960            0 :     ) -> HashMap<NodeId, LocationConfigListResponse> {
     961            0 :         let nodes = {
     962            0 :             let locked = self.inner.read().unwrap();
     963            0 :             locked.nodes.clone()
     964            0 :         };
     965            0 : 
     966            0 :         let mut node_results = HashMap::new();
     967            0 : 
     968            0 :         let mut node_list_futs = FuturesUnordered::new();
     969            0 : 
     970            0 :         tracing::info!("Scanning shards on {} nodes...", nodes.len());
     971            0 :         for node in nodes.values() {
     972            0 :             node_list_futs.push({
     973            0 :                 async move {
     974            0 :                     tracing::info!("Scanning shards on node {node}...");
     975            0 :                     let timeout = Duration::from_secs(5);
     976            0 :                     let response = node
     977            0 :                         .with_client_retries(
     978            0 :                             |client| async move { client.list_location_config().await },
     979            0 :                             &self.http_client,
     980            0 :                             &self.config.pageserver_jwt_token,
     981            0 :                             1,
     982            0 :                             5,
     983            0 :                             timeout,
     984            0 :                             &self.cancel,
     985            0 :                         )
     986            0 :                         .await;
     987            0 :                     (node.get_id(), response)
     988            0 :                 }
     989            0 :             });
     990            0 :         }
     991              : 
     992              :         loop {
     993            0 :             let (node_id, result) = tokio::select! {
     994            0 :                 next = node_list_futs.next() => {
     995            0 :                     match next {
     996            0 :                         Some(result) => result,
     997              :                         None =>{
     998              :                             // We got results for all our nodes
     999            0 :                             break;
    1000              :                         }
    1001              : 
    1002              :                     }
    1003              :                 },
    1004            0 :                 _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
    1005              :                     // Give up waiting for anyone who hasn't responded: we will yield the results that we have
    1006            0 :                     tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
    1007            0 :                     break;
    1008              :                 }
    1009              :             };
    1010              : 
    1011            0 :             let Some(list_response) = result else {
    1012            0 :                 tracing::info!("Shutdown during startup_reconcile");
    1013            0 :                 break;
    1014              :             };
    1015              : 
    1016            0 :             match list_response {
    1017            0 :                 Err(e) => {
    1018            0 :                     tracing::warn!("Could not scan node {} ({e})", node_id);
    1019              :                 }
    1020            0 :                 Ok(listing) => {
    1021            0 :                     node_results.insert(node_id, listing);
    1022            0 :                 }
    1023              :             }
    1024              :         }
    1025              : 
    1026            0 :         node_results
    1027            0 :     }
    1028              : 
    1029            0 :     async fn build_global_observed_state(&self, deadline: Instant) -> GlobalObservedState {
    1030            0 :         let node_listings = self.scan_node_locations(deadline).await;
    1031            0 :         let mut observed = GlobalObservedState::default();
    1032              : 
    1033            0 :         for (node_id, location_confs) in node_listings {
    1034            0 :             tracing::info!(
    1035            0 :                 "Received {} shard statuses from pageserver {}",
    1036            0 :                 location_confs.tenant_shards.len(),
    1037              :                 node_id
    1038              :             );
    1039              : 
    1040            0 :             for (tid, location_conf) in location_confs.tenant_shards {
    1041            0 :                 let entry = observed.0.entry(tid).or_default();
    1042            0 :                 entry.locations.insert(
    1043            0 :                     node_id,
    1044            0 :                     ObservedStateLocation {
    1045            0 :                         conf: location_conf,
    1046            0 :                     },
    1047            0 :                 );
    1048            0 :             }
    1049              :         }
    1050              : 
    1051            0 :         observed
    1052            0 :     }
    1053              : 
    1054              :     /// Used during [`Self::startup_reconcile`]: detach a list of unknown-to-us tenants from pageservers.
    1055              :     ///
    1056              :     /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
    1057              :     /// tenants, then it is probably something incompletely deleted before: we will not fight with any
    1058              :     /// other task trying to attach it.
    1059              :     #[instrument(skip_all)]
    1060              :     async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
    1061              :         let nodes = self.inner.read().unwrap().nodes.clone();
    1062              : 
    1063              :         for (tenant_shard_id, node_id) in cleanup {
    1064              :             // A node reported a tenant_shard_id which is unknown to us: detach it.
    1065              :             let Some(node) = nodes.get(&node_id) else {
    1066              :                 // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
    1067              :                 // a location to clean up on a node that has since been removed.
    1068              :                 tracing::info!(
    1069              :                     "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
    1070              :                 );
    1071              :                 continue;
    1072              :             };
    1073              : 
    1074              :             if self.cancel.is_cancelled() {
    1075              :                 break;
    1076              :             }
    1077              : 
    1078              :             let client = PageserverClient::new(
    1079              :                 node.get_id(),
    1080              :                 self.http_client.clone(),
    1081              :                 node.base_url(),
    1082              :                 self.config.pageserver_jwt_token.as_deref(),
    1083              :             );
    1084              :             match client
    1085              :                 .location_config(
    1086              :                     tenant_shard_id,
    1087              :                     LocationConfig {
    1088              :                         mode: LocationConfigMode::Detached,
    1089              :                         generation: None,
    1090              :                         secondary_conf: None,
    1091              :                         shard_number: tenant_shard_id.shard_number.0,
    1092              :                         shard_count: tenant_shard_id.shard_count.literal(),
    1093              :                         shard_stripe_size: 0,
    1094              :                         tenant_conf: models::TenantConfig::default(),
    1095              :                     },
    1096              :                     None,
    1097              :                     false,
    1098              :                 )
    1099              :                 .await
    1100              :             {
    1101              :                 Ok(()) => {
    1102              :                     tracing::info!(
    1103              :                         "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
    1104              :                     );
    1105              :                 }
    1106              :                 Err(e) => {
    1107              :                     // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
    1108              :                     // break anything.
    1109              :                     tracing::error!(
    1110              :                         "Failed to detach unknown shard {tenant_shard_id} on pageserver {node_id}: {e}"
    1111              :                     );
    1112              :                 }
    1113              :             }
    1114              :         }
    1115              :     }
    1116              : 
    1117              :     /// Long running background task that periodically wakes up and looks for shards that need
    1118              :     /// reconciliation.  Reconciliation is fallible, so any reconciliation tasks that fail during
    1119              :     /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
    1120              :     /// for those retries.
    1121              :     #[instrument(skip_all)]
    1122              :     async fn background_reconcile(self: &Arc<Self>) {
    1123              :         self.startup_complete.clone().wait().await;
    1124              : 
    1125              :         const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
    1126              :         let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
    1127              :         while !self.reconcilers_cancel.is_cancelled() {
    1128              :             tokio::select! {
    1129              :               _ = interval.tick() => {
    1130              :                 let reconciles_spawned = self.reconcile_all();
    1131              :                 if reconciles_spawned == 0 {
    1132              :                     // Run optimizer only when we didn't find any other work to do
    1133              :                     self.optimize_all().await;
    1134              :                 }
    1135              :                 // Always attempt autosplits. Sharding is crucial for bulk ingest performance, so we
    1136              :                 // must be responsive when new projects begin ingesting and reach the threshold.
    1137              :                 self.autosplit_tenants().await;
    1138              :             }
    1139              :               _ = self.reconcilers_cancel.cancelled() => return
    1140              :             }
    1141              :         }
    1142              :     }
    1143              :     /// Heartbeat all storage nodes once in a while.
    1144              :     #[instrument(skip_all)]
    1145              :     async fn spawn_heartbeat_driver(&self) {
    1146              :         self.startup_complete.clone().wait().await;
    1147              : 
    1148              :         let mut interval = tokio::time::interval(self.config.heartbeat_interval);
    1149              :         while !self.cancel.is_cancelled() {
    1150              :             tokio::select! {
    1151              :               _ = interval.tick() => { }
    1152              :               _ = self.cancel.cancelled() => return
    1153              :             };
    1154              : 
    1155              :             let nodes = {
    1156              :                 let locked = self.inner.read().unwrap();
    1157              :                 locked.nodes.clone()
    1158              :             };
    1159              : 
    1160              :             let safekeepers = {
    1161              :                 let locked = self.inner.read().unwrap();
    1162              :                 locked.safekeepers.clone()
    1163              :             };
    1164              : 
    1165              :             let (res_ps, res_sk) = tokio::join!(
    1166              :                 self.heartbeater_ps.heartbeat(nodes),
    1167              :                 self.heartbeater_sk.heartbeat(safekeepers)
    1168              :             );
    1169              : 
    1170              :             if let Ok(deltas) = res_ps {
    1171              :                 let mut to_handle = Vec::default();
    1172              : 
    1173              :                 for (node_id, state) in deltas.0 {
    1174              :                     let new_availability = match state {
    1175              :                         PageserverState::Available { utilization, .. } => {
    1176              :                             NodeAvailability::Active(utilization)
    1177              :                         }
    1178              :                         PageserverState::WarmingUp { started_at } => {
    1179              :                             NodeAvailability::WarmingUp(started_at)
    1180              :                         }
    1181              :                         PageserverState::Offline => {
    1182              :                             // The node might have been placed in the WarmingUp state
    1183              :                             // while the heartbeat round was on-going. Hence, filter out
    1184              :                             // offline transitions for WarmingUp nodes that are still within
    1185              :                             // their grace period.
    1186              :                             if let Ok(NodeAvailability::WarmingUp(started_at)) = self
    1187              :                                 .get_node(node_id)
    1188              :                                 .await
    1189              :                                 .as_ref()
    1190            0 :                                 .map(|n| n.get_availability())
    1191              :                             {
    1192              :                                 let now = Instant::now();
    1193              :                                 if now - *started_at >= self.config.max_warming_up_interval {
    1194              :                                     NodeAvailability::Offline
    1195              :                                 } else {
    1196              :                                     NodeAvailability::WarmingUp(*started_at)
    1197              :                                 }
    1198              :                             } else {
    1199              :                                 NodeAvailability::Offline
    1200              :                             }
    1201              :                         }
    1202              :                     };
    1203              : 
    1204              :                     let node_lock = trace_exclusive_lock(
    1205              :                         &self.node_op_locks,
    1206              :                         node_id,
    1207              :                         NodeOperations::Configure,
    1208              :                     )
    1209              :                     .await;
    1210              : 
    1211              :                     pausable_failpoint!("heartbeat-pre-node-state-configure");
    1212              : 
    1213              :                     // This is the code path for geniune availability transitions (i.e node
    1214              :                     // goes unavailable and/or comes back online).
    1215              :                     let res = self
    1216              :                         .node_state_configure(node_id, Some(new_availability), None, &node_lock)
    1217              :                         .await;
    1218              : 
    1219              :                     match res {
    1220              :                         Ok(transition) => {
    1221              :                             // Keep hold of the lock until the availability transitions
    1222              :                             // have been handled in
    1223              :                             // [`Service::handle_node_availability_transitions`] in order avoid
    1224              :                             // racing with [`Service::external_node_configure`].
    1225              :                             to_handle.push((node_id, node_lock, transition));
    1226              :                         }
    1227              :                         Err(ApiError::NotFound(_)) => {
    1228              :                             // This should be rare, but legitimate since the heartbeats are done
    1229              :                             // on a snapshot of the nodes.
    1230              :                             tracing::info!("Node {} was not found after heartbeat round", node_id);
    1231              :                         }
    1232              :                         Err(ApiError::ShuttingDown) => {
    1233              :                             // No-op: we're shutting down, no need to try and update any nodes' statuses
    1234              :                         }
    1235              :                         Err(err) => {
    1236              :                             // Transition to active involves reconciling: if a node responds to a heartbeat then
    1237              :                             // becomes unavailable again, we may get an error here.
    1238              :                             tracing::error!(
    1239              :                                 "Failed to update node state {} after heartbeat round: {}",
    1240              :                                 node_id,
    1241              :                                 err
    1242              :                             );
    1243              :                         }
    1244              :                     }
    1245              :                 }
    1246              : 
    1247              :                 // We collected all the transitions above and now we handle them.
    1248              :                 let res = self.handle_node_availability_transitions(to_handle).await;
    1249              :                 if let Err(errs) = res {
    1250              :                     for (node_id, err) in errs {
    1251              :                         match err {
    1252              :                             ApiError::NotFound(_) => {
    1253              :                                 // This should be rare, but legitimate since the heartbeats are done
    1254              :                                 // on a snapshot of the nodes.
    1255              :                                 tracing::info!(
    1256              :                                     "Node {} was not found after heartbeat round",
    1257              :                                     node_id
    1258              :                                 );
    1259              :                             }
    1260              :                             err => {
    1261              :                                 tracing::error!(
    1262              :                                     "Failed to handle availability transition for {} after heartbeat round: {}",
    1263              :                                     node_id,
    1264              :                                     err
    1265              :                                 );
    1266              :                             }
    1267              :                         }
    1268              :                     }
    1269              :                 }
    1270              :             }
    1271              :             if let Ok(deltas) = res_sk {
    1272              :                 let mut locked = self.inner.write().unwrap();
    1273              :                 let mut safekeepers = (*locked.safekeepers).clone();
    1274              :                 for (id, state) in deltas.0 {
    1275              :                     let Some(sk) = safekeepers.get_mut(&id) else {
    1276              :                         tracing::info!(
    1277              :                             "Couldn't update safekeeper safekeeper state for id {id} from heartbeat={state:?}"
    1278              :                         );
    1279              :                         continue;
    1280              :                     };
    1281              :                     sk.set_availability(state);
    1282              :                 }
    1283              :                 locked.safekeepers = Arc::new(safekeepers);
    1284              :             }
    1285              :         }
    1286              :     }
    1287              : 
    1288              :     /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
    1289              :     /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
    1290              :     /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
    1291              :     /// will indicate that reconciliation is not needed.
    1292              :     #[instrument(skip_all, fields(
    1293              :         seq=%result.sequence,
    1294              :         tenant_id=%result.tenant_shard_id.tenant_id,
    1295              :         shard_id=%result.tenant_shard_id.shard_slug(),
    1296              :     ))]
    1297              :     fn process_result(&self, result: ReconcileResult) {
    1298              :         let mut locked = self.inner.write().unwrap();
    1299              :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    1300              :         let Some(tenant) = tenants.get_mut(&result.tenant_shard_id) else {
    1301              :             // A reconciliation result might race with removing a tenant: drop results for
    1302              :             // tenants that aren't in our map.
    1303              :             return;
    1304              :         };
    1305              : 
    1306              :         // Usually generation should only be updated via this path, so the max() isn't
    1307              :         // needed, but it is used to handle out-of-band updates via. e.g. test hook.
    1308              :         tenant.generation = std::cmp::max(tenant.generation, result.generation);
    1309              : 
    1310              :         // If the reconciler signals that it failed to notify compute, set this state on
    1311              :         // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
    1312              :         tenant.pending_compute_notification = result.pending_compute_notification;
    1313              : 
    1314              :         // Let the TenantShard know it is idle.
    1315              :         tenant.reconcile_complete(result.sequence);
    1316              : 
    1317              :         // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1318              :         // make to the tenant
    1319            0 :         let deltas = result.observed_deltas.into_iter().flat_map(|delta| {
    1320              :             // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1321              :             // make to the tenant
    1322            0 :             let node = nodes.get(delta.node_id())?;
    1323              : 
    1324            0 :             if node.is_available() {
    1325            0 :                 return Some(delta);
    1326            0 :             }
    1327            0 : 
    1328            0 :             // In case a node became unavailable concurrently with the reconcile, observed
    1329            0 :             // locations on it are now uncertain. By convention, set them to None in order
    1330            0 :             // for them to get refreshed when the node comes back online.
    1331            0 :             Some(ObservedStateDelta::Upsert(Box::new((
    1332            0 :                 node.get_id(),
    1333            0 :                 ObservedStateLocation { conf: None },
    1334            0 :             ))))
    1335            0 :         });
    1336              : 
    1337              :         match result.result {
    1338              :             Ok(()) => {
    1339              :                 tenant.apply_observed_deltas(deltas);
    1340              :                 tenant.waiter.advance(result.sequence);
    1341              :             }
    1342              :             Err(e) => {
    1343              :                 match e {
    1344              :                     ReconcileError::Cancel => {
    1345              :                         tracing::info!("Reconciler was cancelled");
    1346              :                     }
    1347              :                     ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
    1348              :                         // This might be due to the reconciler getting cancelled, or it might
    1349              :                         // be due to the `Node` being marked offline.
    1350              :                         tracing::info!("Reconciler cancelled during pageserver API call");
    1351              :                     }
    1352              :                     _ => {
    1353              :                         tracing::warn!("Reconcile error: {}", e);
    1354              :                     }
    1355              :                 }
    1356              : 
    1357              :                 // Ordering: populate last_error before advancing error_seq,
    1358              :                 // so that waiters will see the correct error after waiting.
    1359              :                 tenant.set_last_error(result.sequence, e);
    1360              : 
    1361              :                 // Skip deletions on reconcile failures
    1362              :                 let upsert_deltas =
    1363            0 :                     deltas.filter(|delta| matches!(delta, ObservedStateDelta::Upsert(_)));
    1364              :                 tenant.apply_observed_deltas(upsert_deltas);
    1365              :             }
    1366              :         }
    1367              : 
    1368              :         // If we just finished detaching all shards for a tenant, it might be time to drop it from memory.
    1369              :         if tenant.policy == PlacementPolicy::Detached {
    1370              :             // We may only drop a tenant from memory while holding the exclusive lock on the tenant ID: this protects us
    1371              :             // from concurrent execution wrt a request handler that might expect the tenant to remain in memory for the
    1372              :             // duration of the request.
    1373              :             let guard = self.tenant_op_locks.try_exclusive(
    1374              :                 tenant.tenant_shard_id.tenant_id,
    1375              :                 TenantOperations::DropDetached,
    1376              :             );
    1377              :             if let Some(guard) = guard {
    1378              :                 self.maybe_drop_tenant(tenant.tenant_shard_id.tenant_id, &mut locked, &guard);
    1379              :             }
    1380              :         }
    1381              : 
    1382              :         // Maybe some other work can proceed now that this job finished.
    1383              :         //
    1384              :         // Only bother with this if we have some semaphore units available in the normal-priority semaphore (these
    1385              :         // reconciles are scheduled at `[ReconcilerPriority::Normal]`).
    1386              :         if self.reconciler_concurrency.available_permits() > 0 {
    1387              :             while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
    1388              :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    1389              :                 if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
    1390              :                     shard.delayed_reconcile = false;
    1391              :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    1392              :                 }
    1393              : 
    1394              :                 if self.reconciler_concurrency.available_permits() == 0 {
    1395              :                     break;
    1396              :                 }
    1397              :             }
    1398              :         }
    1399              :     }
    1400              : 
    1401            0 :     async fn process_results(
    1402            0 :         &self,
    1403            0 :         mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResultRequest>,
    1404            0 :         mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
    1405            0 :             Result<(), (TenantShardId, NotifyError)>,
    1406            0 :         >,
    1407            0 :     ) {
    1408              :         loop {
    1409              :             // Wait for the next result, or for cancellation
    1410            0 :             tokio::select! {
    1411            0 :                 r = result_rx.recv() => {
    1412            0 :                     match r {
    1413            0 :                         Some(ReconcileResultRequest::ReconcileResult(result)) => {self.process_result(result);},
    1414            0 :                         None | Some(ReconcileResultRequest::Stop) => {break;}
    1415              :                     }
    1416              :                 }
    1417            0 :                 _ = async{
    1418            0 :                     match bg_compute_hook_result_rx.recv().await {
    1419            0 :                         Some(result) => {
    1420            0 :                             if let Err((tenant_shard_id, notify_error)) = result {
    1421            0 :                                 tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
    1422            0 :                                 let mut locked = self.inner.write().unwrap();
    1423            0 :                                 if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
    1424            0 :                                     shard.pending_compute_notification = true;
    1425            0 :                                 }
    1426              : 
    1427            0 :                             }
    1428              :                         },
    1429              :                         None => {
    1430              :                             // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
    1431            0 :                             self.cancel.cancelled().await;
    1432              :                         }
    1433              :                     }
    1434            0 :                 } => {},
    1435            0 :                 _ = self.cancel.cancelled() => {
    1436            0 :                     break;
    1437              :                 }
    1438              :             };
    1439              :         }
    1440            0 :     }
    1441              : 
    1442            0 :     async fn process_aborts(
    1443            0 :         &self,
    1444            0 :         mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
    1445            0 :     ) {
    1446              :         loop {
    1447              :             // Wait for the next result, or for cancellation
    1448            0 :             let op = tokio::select! {
    1449            0 :                 r = abort_rx.recv() => {
    1450            0 :                     match r {
    1451            0 :                         Some(op) => {op},
    1452            0 :                         None => {break;}
    1453              :                     }
    1454              :                 }
    1455            0 :                 _ = self.cancel.cancelled() => {
    1456            0 :                     break;
    1457              :                 }
    1458              :             };
    1459              : 
    1460              :             // Retry until shutdown: we must keep this request object alive until it is properly
    1461              :             // processed, as it holds a lock guard that prevents other operations trying to do things
    1462              :             // to the tenant while it is in a weird part-split state.
    1463            0 :             while !self.cancel.is_cancelled() {
    1464            0 :                 match self.abort_tenant_shard_split(&op).await {
    1465            0 :                     Ok(_) => break,
    1466            0 :                     Err(e) => {
    1467            0 :                         tracing::warn!(
    1468            0 :                             "Failed to abort shard split on {}, will retry: {e}",
    1469              :                             op.tenant_id
    1470              :                         );
    1471              : 
    1472              :                         // If a node is unavailable, we hope that it has been properly marked Offline
    1473              :                         // when we retry, so that the abort op will succeed.  If the abort op is failing
    1474              :                         // for some other reason, we will keep retrying forever, or until a human notices
    1475              :                         // and does something about it (either fixing a pageserver or restarting the controller).
    1476            0 :                         tokio::time::timeout(Duration::from_secs(5), self.cancel.cancelled())
    1477            0 :                             .await
    1478            0 :                             .ok();
    1479              :                     }
    1480              :                 }
    1481              :             }
    1482              :         }
    1483            0 :     }
    1484              : 
    1485            0 :     pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
    1486            0 :         let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
    1487            0 :         let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
    1488            0 : 
    1489            0 :         let leadership_cancel = CancellationToken::new();
    1490            0 :         let leadership = Leadership::new(persistence.clone(), config.clone(), leadership_cancel);
    1491            0 :         let (leader, leader_step_down_state) = leadership.step_down_current_leader().await?;
    1492              : 
    1493              :         // Apply the migrations **after** the current leader has stepped down
    1494              :         // (or we've given up waiting for it), but **before** reading from the
    1495              :         // database. The only exception is reading the current leader before
    1496              :         // migrating.
    1497            0 :         persistence.migration_run().await?;
    1498              : 
    1499            0 :         tracing::info!("Loading nodes from database...");
    1500            0 :         let nodes = persistence
    1501            0 :             .list_nodes()
    1502            0 :             .await?
    1503            0 :             .into_iter()
    1504            0 :             .map(|x| Node::from_persistent(x, config.use_https_pageserver_api))
    1505            0 :             .collect::<anyhow::Result<Vec<Node>>>()?;
    1506            0 :         let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
    1507            0 :         tracing::info!("Loaded {} nodes from database.", nodes.len());
    1508            0 :         metrics::METRICS_REGISTRY
    1509            0 :             .metrics_group
    1510            0 :             .storage_controller_pageserver_nodes
    1511            0 :             .set(nodes.len() as i64);
    1512            0 : 
    1513            0 :         tracing::info!("Loading safekeepers from database...");
    1514            0 :         let safekeepers = persistence
    1515            0 :             .list_safekeepers()
    1516            0 :             .await?
    1517            0 :             .into_iter()
    1518            0 :             .map(|skp| {
    1519            0 :                 Safekeeper::from_persistence(
    1520            0 :                     skp,
    1521            0 :                     CancellationToken::new(),
    1522            0 :                     config.use_https_safekeeper_api,
    1523            0 :                 )
    1524            0 :             })
    1525            0 :             .collect::<anyhow::Result<Vec<_>>>()?;
    1526            0 :         let safekeepers: HashMap<NodeId, Safekeeper> =
    1527            0 :             safekeepers.into_iter().map(|n| (n.get_id(), n)).collect();
    1528            0 :         tracing::info!("Loaded {} safekeepers from database.", safekeepers.len());
    1529              : 
    1530            0 :         tracing::info!("Loading shards from database...");
    1531            0 :         let mut tenant_shard_persistence = persistence.load_active_tenant_shards().await?;
    1532            0 :         tracing::info!(
    1533            0 :             "Loaded {} shards from database.",
    1534            0 :             tenant_shard_persistence.len()
    1535              :         );
    1536              : 
    1537              :         // If any shard splits were in progress, reset the database state to abort them
    1538            0 :         let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
    1539            0 :             HashMap::new();
    1540            0 :         for tsp in &mut tenant_shard_persistence {
    1541            0 :             let shard = tsp.get_shard_identity()?;
    1542            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1543            0 :             let entry = tenant_shard_count_min_max
    1544            0 :                 .entry(tenant_shard_id.tenant_id)
    1545            0 :                 .or_insert_with(|| (shard.count, shard.count));
    1546            0 :             entry.0 = std::cmp::min(entry.0, shard.count);
    1547            0 :             entry.1 = std::cmp::max(entry.1, shard.count);
    1548            0 :         }
    1549              : 
    1550            0 :         for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
    1551            0 :             if count_min != count_max {
    1552              :                 // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
    1553              :                 // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
    1554              :                 // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
    1555            0 :                 tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
    1556            0 :                 let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
    1557              : 
    1558              :                 // We may never see the Complete status here: if the split was complete, we wouldn't have
    1559              :                 // identified this tenant has having mismatching min/max counts.
    1560            0 :                 assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
    1561              : 
    1562              :                 // Clear the splitting status in-memory, to reflect that we just aborted in the database
    1563            0 :                 tenant_shard_persistence.iter_mut().for_each(|tsp| {
    1564            0 :                     // Set idle split state on those shards that we will retain.
    1565            0 :                     let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
    1566            0 :                     if tsp_tenant_id == tenant_id
    1567            0 :                         && tsp.get_shard_identity().unwrap().count == count_min
    1568            0 :                     {
    1569            0 :                         tsp.splitting = SplitState::Idle;
    1570            0 :                     } else if tsp_tenant_id == tenant_id {
    1571              :                         // Leave the splitting state on the child shards: this will be used next to
    1572              :                         // drop them.
    1573            0 :                         tracing::info!(
    1574            0 :                             "Shard {tsp_tenant_id} will be dropped after shard split abort",
    1575              :                         );
    1576            0 :                     }
    1577            0 :                 });
    1578            0 : 
    1579            0 :                 // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
    1580            0 :                 tenant_shard_persistence.retain(|tsp| {
    1581            0 :                     TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
    1582            0 :                         || tsp.splitting == SplitState::Idle
    1583            0 :                 });
    1584            0 :             }
    1585              :         }
    1586              : 
    1587            0 :         let mut tenants = BTreeMap::new();
    1588            0 : 
    1589            0 :         let mut scheduler = Scheduler::new(nodes.values());
    1590              : 
    1591              :         #[cfg(feature = "testing")]
    1592              :         {
    1593              :             use pageserver_api::controller_api::AvailabilityZone;
    1594              : 
    1595              :             // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
    1596              :             // tests only store the shards, not the nodes.  The nodes will be loaded shortly
    1597              :             // after when pageservers start up and register.
    1598            0 :             let mut node_ids = HashSet::new();
    1599            0 :             for tsp in &tenant_shard_persistence {
    1600            0 :                 if let Some(node_id) = tsp.generation_pageserver {
    1601            0 :                     node_ids.insert(node_id);
    1602            0 :                 }
    1603              :             }
    1604            0 :             for node_id in node_ids {
    1605            0 :                 tracing::info!("Creating node {} in scheduler for tests", node_id);
    1606            0 :                 let node = Node::new(
    1607            0 :                     NodeId(node_id as u64),
    1608            0 :                     "".to_string(),
    1609            0 :                     123,
    1610            0 :                     None,
    1611            0 :                     "".to_string(),
    1612            0 :                     123,
    1613            0 :                     AvailabilityZone("test_az".to_string()),
    1614            0 :                     false,
    1615            0 :                 )
    1616            0 :                 .unwrap();
    1617            0 : 
    1618            0 :                 scheduler.node_upsert(&node);
    1619              :             }
    1620              :         }
    1621            0 :         for tsp in tenant_shard_persistence {
    1622            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1623              : 
    1624              :             // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
    1625              :             // it with what we can infer: the node for which a generation was most recently issued.
    1626            0 :             let mut intent = IntentState::new(
    1627            0 :                 tsp.preferred_az_id
    1628            0 :                     .as_ref()
    1629            0 :                     .map(|az| AvailabilityZone(az.clone())),
    1630            0 :             );
    1631            0 :             if let Some(generation_pageserver) = tsp.generation_pageserver.map(|n| NodeId(n as u64))
    1632              :             {
    1633            0 :                 if nodes.contains_key(&generation_pageserver) {
    1634            0 :                     intent.set_attached(&mut scheduler, Some(generation_pageserver));
    1635            0 :                 } else {
    1636              :                     // If a node was removed before being completely drained, it is legal for it to leave behind a `generation_pageserver` referring
    1637              :                     // to a non-existent node, because node deletion doesn't block on completing the reconciliations that will issue new generations
    1638              :                     // on different pageservers.
    1639            0 :                     tracing::warn!(
    1640            0 :                         "Tenant shard {tenant_shard_id} references non-existent node {generation_pageserver} in database, will be rescheduled"
    1641              :                     );
    1642              :                 }
    1643            0 :             }
    1644            0 :             let new_tenant = TenantShard::from_persistent(tsp, intent)?;
    1645              : 
    1646            0 :             tenants.insert(tenant_shard_id, new_tenant);
    1647              :         }
    1648              : 
    1649            0 :         let (startup_completion, startup_complete) = utils::completion::channel();
    1650            0 : 
    1651            0 :         // This channel is continuously consumed by process_results, so doesn't need to be very large.
    1652            0 :         let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
    1653            0 :             tokio::sync::mpsc::channel(512);
    1654            0 : 
    1655            0 :         let (delayed_reconcile_tx, delayed_reconcile_rx) =
    1656            0 :             tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
    1657            0 : 
    1658            0 :         let cancel = CancellationToken::new();
    1659            0 :         let reconcilers_cancel = cancel.child_token();
    1660            0 : 
    1661            0 :         let mut http_client = reqwest::Client::builder();
    1662            0 :         // We intentionally disable the connection pool, so every request will create its own TCP connection.
    1663            0 :         // It's especially important for heartbeaters to notice more network problems.
    1664            0 :         //
    1665            0 :         // TODO: It makes sense to use this client only in heartbeaters and create a second one with
    1666            0 :         // connection pooling for everything else. But reqwest::Client may create a connection without
    1667            0 :         // ever using it (it uses hyper's Client under the hood):
    1668            0 :         // https://github.com/hyperium/hyper-util/blob/d51318df3461d40e5f5e5ca163cb3905ac960209/src/client/legacy/client.rs#L415
    1669            0 :         //
    1670            0 :         // Because of a bug in hyper0::Connection::graceful_shutdown such connections hang during
    1671            0 :         // graceful server shutdown: https://github.com/hyperium/hyper/issues/2730
    1672            0 :         //
    1673            0 :         // The bug has been fixed in hyper v1, so keep alive may be enabled only after we migrate to hyper1.
    1674            0 :         http_client = http_client.pool_max_idle_per_host(0);
    1675            0 :         for ssl_ca_cert in &config.ssl_ca_certs {
    1676            0 :             http_client = http_client.add_root_certificate(ssl_ca_cert.clone());
    1677            0 :         }
    1678            0 :         let http_client = http_client.build()?;
    1679              : 
    1680            0 :         let heartbeater_ps = Heartbeater::new(
    1681            0 :             http_client.clone(),
    1682            0 :             config.pageserver_jwt_token.clone(),
    1683            0 :             config.max_offline_interval,
    1684            0 :             config.max_warming_up_interval,
    1685            0 :             cancel.clone(),
    1686            0 :         );
    1687            0 : 
    1688            0 :         let heartbeater_sk = Heartbeater::new(
    1689            0 :             http_client.clone(),
    1690            0 :             config.safekeeper_jwt_token.clone(),
    1691            0 :             config.max_offline_interval,
    1692            0 :             config.max_warming_up_interval,
    1693            0 :             cancel.clone(),
    1694            0 :         );
    1695              : 
    1696            0 :         let initial_leadership_status = if config.start_as_candidate {
    1697            0 :             LeadershipStatus::Candidate
    1698              :         } else {
    1699            0 :             LeadershipStatus::Leader
    1700              :         };
    1701              : 
    1702            0 :         let this = Arc::new(Self {
    1703            0 :             inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
    1704            0 :                 nodes,
    1705            0 :                 safekeepers,
    1706            0 :                 tenants,
    1707            0 :                 scheduler,
    1708            0 :                 delayed_reconcile_rx,
    1709            0 :                 initial_leadership_status,
    1710            0 :                 reconcilers_cancel.clone(),
    1711            0 :             ))),
    1712            0 :             config: config.clone(),
    1713            0 :             persistence,
    1714            0 :             compute_hook: Arc::new(ComputeHook::new(config.clone())),
    1715            0 :             result_tx,
    1716            0 :             heartbeater_ps,
    1717            0 :             heartbeater_sk,
    1718            0 :             reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1719            0 :                 config.reconciler_concurrency,
    1720            0 :             )),
    1721            0 :             priority_reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1722            0 :                 config.priority_reconciler_concurrency,
    1723            0 :             )),
    1724            0 :             delayed_reconcile_tx,
    1725            0 :             abort_tx,
    1726            0 :             startup_complete: startup_complete.clone(),
    1727            0 :             cancel,
    1728            0 :             reconcilers_cancel,
    1729            0 :             gate: Gate::default(),
    1730            0 :             reconcilers_gate: Gate::default(),
    1731            0 :             tenant_op_locks: Default::default(),
    1732            0 :             node_op_locks: Default::default(),
    1733            0 :             http_client,
    1734            0 :         });
    1735            0 : 
    1736            0 :         let result_task_this = this.clone();
    1737            0 :         tokio::task::spawn(async move {
    1738              :             // Block shutdown until we're done (we must respect self.cancel)
    1739            0 :             if let Ok(_gate) = result_task_this.gate.enter() {
    1740            0 :                 result_task_this
    1741            0 :                     .process_results(result_rx, bg_compute_notify_result_rx)
    1742            0 :                     .await
    1743            0 :             }
    1744            0 :         });
    1745            0 : 
    1746            0 :         tokio::task::spawn({
    1747            0 :             let this = this.clone();
    1748            0 :             async move {
    1749              :                 // Block shutdown until we're done (we must respect self.cancel)
    1750            0 :                 if let Ok(_gate) = this.gate.enter() {
    1751            0 :                     this.process_aborts(abort_rx).await
    1752            0 :                 }
    1753            0 :             }
    1754            0 :         });
    1755            0 : 
    1756            0 :         tokio::task::spawn({
    1757            0 :             let this = this.clone();
    1758            0 :             async move {
    1759            0 :                 if let Ok(_gate) = this.gate.enter() {
    1760              :                     loop {
    1761            0 :                         tokio::select! {
    1762            0 :                             _ = this.cancel.cancelled() => {
    1763            0 :                                 break;
    1764              :                             },
    1765            0 :                             _ = tokio::time::sleep(Duration::from_secs(60)) => {}
    1766            0 :                         };
    1767            0 :                         this.tenant_op_locks.housekeeping();
    1768              :                     }
    1769            0 :                 }
    1770            0 :             }
    1771            0 :         });
    1772            0 : 
    1773            0 :         tokio::task::spawn({
    1774            0 :             let this = this.clone();
    1775            0 :             // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
    1776            0 :             // is done.
    1777            0 :             let startup_completion = startup_completion.clone();
    1778            0 :             async move {
    1779              :                 // Block shutdown until we're done (we must respect self.cancel)
    1780            0 :                 let Ok(_gate) = this.gate.enter() else {
    1781            0 :                     return;
    1782              :                 };
    1783              : 
    1784            0 :                 this.startup_reconcile(leader, leader_step_down_state, bg_compute_notify_result_tx)
    1785            0 :                     .await;
    1786              : 
    1787            0 :                 drop(startup_completion);
    1788            0 :             }
    1789            0 :         });
    1790            0 : 
    1791            0 :         tokio::task::spawn({
    1792            0 :             let this = this.clone();
    1793            0 :             let startup_complete = startup_complete.clone();
    1794            0 :             async move {
    1795            0 :                 startup_complete.wait().await;
    1796            0 :                 this.background_reconcile().await;
    1797            0 :             }
    1798            0 :         });
    1799            0 : 
    1800            0 :         tokio::task::spawn({
    1801            0 :             let this = this.clone();
    1802            0 :             let startup_complete = startup_complete.clone();
    1803            0 :             async move {
    1804            0 :                 startup_complete.wait().await;
    1805            0 :                 this.spawn_heartbeat_driver().await;
    1806            0 :             }
    1807            0 :         });
    1808            0 : 
    1809            0 :         Ok(this)
    1810            0 :     }
    1811              : 
    1812            0 :     pub(crate) async fn attach_hook(
    1813            0 :         &self,
    1814            0 :         attach_req: AttachHookRequest,
    1815            0 :     ) -> anyhow::Result<AttachHookResponse> {
    1816            0 :         let _tenant_lock = trace_exclusive_lock(
    1817            0 :             &self.tenant_op_locks,
    1818            0 :             attach_req.tenant_shard_id.tenant_id,
    1819            0 :             TenantOperations::AttachHook,
    1820            0 :         )
    1821            0 :         .await;
    1822              : 
    1823              :         // This is a test hook.  To enable using it on tenants that were created directly with
    1824              :         // the pageserver API (not via this service), we will auto-create any missing tenant
    1825              :         // shards with default state.
    1826            0 :         let insert = {
    1827            0 :             match self
    1828            0 :                 .maybe_load_tenant(attach_req.tenant_shard_id.tenant_id, &_tenant_lock)
    1829            0 :                 .await
    1830              :             {
    1831            0 :                 Ok(_) => false,
    1832            0 :                 Err(ApiError::NotFound(_)) => true,
    1833            0 :                 Err(e) => return Err(e.into()),
    1834              :             }
    1835              :         };
    1836              : 
    1837            0 :         if insert {
    1838            0 :             let tsp = TenantShardPersistence {
    1839            0 :                 tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
    1840            0 :                 shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
    1841            0 :                 shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
    1842            0 :                 shard_stripe_size: 0,
    1843            0 :                 generation: attach_req.generation_override.or(Some(0)),
    1844            0 :                 generation_pageserver: None,
    1845            0 :                 placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
    1846            0 :                 config: serde_json::to_string(&TenantConfig::default()).unwrap(),
    1847            0 :                 splitting: SplitState::default(),
    1848            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    1849            0 :                     .unwrap(),
    1850            0 :                 preferred_az_id: None,
    1851            0 :             };
    1852            0 : 
    1853            0 :             match self.persistence.insert_tenant_shards(vec![tsp]).await {
    1854            0 :                 Err(e) => match e {
    1855              :                     DatabaseError::Query(diesel::result::Error::DatabaseError(
    1856              :                         DatabaseErrorKind::UniqueViolation,
    1857              :                         _,
    1858              :                     )) => {
    1859            0 :                         tracing::info!(
    1860            0 :                             "Raced with another request to insert tenant {}",
    1861              :                             attach_req.tenant_shard_id
    1862              :                         )
    1863              :                     }
    1864            0 :                     _ => return Err(e.into()),
    1865              :                 },
    1866              :                 Ok(()) => {
    1867            0 :                     tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
    1868              : 
    1869            0 :                     let mut locked = self.inner.write().unwrap();
    1870            0 :                     locked.tenants.insert(
    1871            0 :                         attach_req.tenant_shard_id,
    1872            0 :                         TenantShard::new(
    1873            0 :                             attach_req.tenant_shard_id,
    1874            0 :                             ShardIdentity::unsharded(),
    1875            0 :                             PlacementPolicy::Attached(0),
    1876            0 :                             None,
    1877            0 :                         ),
    1878            0 :                     );
    1879            0 :                     tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
    1880              :                 }
    1881              :             }
    1882            0 :         }
    1883              : 
    1884            0 :         let new_generation = if let Some(req_node_id) = attach_req.node_id {
    1885            0 :             let maybe_tenant_conf = {
    1886            0 :                 let locked = self.inner.write().unwrap();
    1887            0 :                 locked
    1888            0 :                     .tenants
    1889            0 :                     .get(&attach_req.tenant_shard_id)
    1890            0 :                     .map(|t| t.config.clone())
    1891            0 :             };
    1892            0 : 
    1893            0 :             match maybe_tenant_conf {
    1894            0 :                 Some(conf) => {
    1895            0 :                     let new_generation = self
    1896            0 :                         .persistence
    1897            0 :                         .increment_generation(attach_req.tenant_shard_id, req_node_id)
    1898            0 :                         .await?;
    1899              : 
    1900              :                     // Persist the placement policy update. This is required
    1901              :                     // when we reattaching a detached tenant.
    1902            0 :                     self.persistence
    1903            0 :                         .update_tenant_shard(
    1904            0 :                             TenantFilter::Shard(attach_req.tenant_shard_id),
    1905            0 :                             Some(PlacementPolicy::Attached(0)),
    1906            0 :                             Some(conf),
    1907            0 :                             None,
    1908            0 :                             None,
    1909            0 :                         )
    1910            0 :                         .await?;
    1911            0 :                     Some(new_generation)
    1912              :                 }
    1913              :                 None => {
    1914            0 :                     anyhow::bail!("Attach hook handling raced with tenant removal")
    1915              :                 }
    1916              :             }
    1917              :         } else {
    1918            0 :             self.persistence.detach(attach_req.tenant_shard_id).await?;
    1919            0 :             None
    1920              :         };
    1921              : 
    1922            0 :         let mut locked = self.inner.write().unwrap();
    1923            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    1924            0 : 
    1925            0 :         let tenant_shard = tenants
    1926            0 :             .get_mut(&attach_req.tenant_shard_id)
    1927            0 :             .expect("Checked for existence above");
    1928              : 
    1929            0 :         if let Some(new_generation) = new_generation {
    1930            0 :             tenant_shard.generation = Some(new_generation);
    1931            0 :             tenant_shard.policy = PlacementPolicy::Attached(0);
    1932            0 :         } else {
    1933              :             // This is a detach notification.  We must update placement policy to avoid re-attaching
    1934              :             // during background scheduling/reconciliation, or during storage controller restart.
    1935            0 :             assert!(attach_req.node_id.is_none());
    1936            0 :             tenant_shard.policy = PlacementPolicy::Detached;
    1937              :         }
    1938              : 
    1939            0 :         if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
    1940            0 :             tracing::info!(
    1941              :                 tenant_id = %attach_req.tenant_shard_id,
    1942              :                 ps_id = %attaching_pageserver,
    1943              :                 generation = ?tenant_shard.generation,
    1944            0 :                 "issuing",
    1945              :             );
    1946            0 :         } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
    1947            0 :             tracing::info!(
    1948              :                 tenant_id = %attach_req.tenant_shard_id,
    1949              :                 %ps_id,
    1950              :                 generation = ?tenant_shard.generation,
    1951            0 :                 "dropping",
    1952              :             );
    1953              :         } else {
    1954            0 :             tracing::info!(
    1955              :             tenant_id = %attach_req.tenant_shard_id,
    1956            0 :             "no-op: tenant already has no pageserver");
    1957              :         }
    1958            0 :         tenant_shard
    1959            0 :             .intent
    1960            0 :             .set_attached(scheduler, attach_req.node_id);
    1961            0 : 
    1962            0 :         tracing::info!(
    1963            0 :             "attach_hook: tenant {} set generation {:?}, pageserver {}",
    1964            0 :             attach_req.tenant_shard_id,
    1965            0 :             tenant_shard.generation,
    1966            0 :             // TODO: this is an odd number of 0xf's
    1967            0 :             attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff))
    1968              :         );
    1969              : 
    1970              :         // Trick the reconciler into not doing anything for this tenant: this helps
    1971              :         // tests that manually configure a tenant on the pagesrever, and then call this
    1972              :         // attach hook: they don't want background reconciliation to modify what they
    1973              :         // did to the pageserver.
    1974              :         #[cfg(feature = "testing")]
    1975              :         {
    1976            0 :             if let Some(node_id) = attach_req.node_id {
    1977            0 :                 tenant_shard.observed.locations = HashMap::from([(
    1978            0 :                     node_id,
    1979            0 :                     ObservedStateLocation {
    1980            0 :                         conf: Some(attached_location_conf(
    1981            0 :                             tenant_shard.generation.unwrap(),
    1982            0 :                             &tenant_shard.shard,
    1983            0 :                             &tenant_shard.config,
    1984            0 :                             &PlacementPolicy::Attached(0),
    1985            0 :                         )),
    1986            0 :                     },
    1987            0 :                 )]);
    1988            0 :             } else {
    1989            0 :                 tenant_shard.observed.locations.clear();
    1990            0 :             }
    1991              :         }
    1992              : 
    1993            0 :         Ok(AttachHookResponse {
    1994            0 :             generation: attach_req
    1995            0 :                 .node_id
    1996            0 :                 .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
    1997            0 :         })
    1998            0 :     }
    1999              : 
    2000            0 :     pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
    2001            0 :         let locked = self.inner.read().unwrap();
    2002            0 : 
    2003            0 :         let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
    2004            0 : 
    2005            0 :         InspectResponse {
    2006            0 :             attachment: tenant_shard.and_then(|s| {
    2007            0 :                 s.intent
    2008            0 :                     .get_attached()
    2009            0 :                     .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
    2010            0 :             }),
    2011            0 :         }
    2012            0 :     }
    2013              : 
    2014              :     // When the availability state of a node transitions to active, we must do a full reconciliation
    2015              :     // of LocationConfigs on that node.  This is because while a node was offline:
    2016              :     // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
    2017              :     // - aborting a tenant shard split might have left rogue child shards behind on this node.
    2018              :     //
    2019              :     // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
    2020              :     // Reconcilers might communicate with the node, and these must not overlap with the work we do in
    2021              :     // this function.
    2022              :     //
    2023              :     // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
    2024              :     // for written for a single node rather than as a batch job for all nodes.
    2025              :     #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
    2026              :     async fn node_activate_reconcile(
    2027              :         &self,
    2028              :         mut node: Node,
    2029              :         _lock: &TracingExclusiveGuard<NodeOperations>,
    2030              :     ) -> Result<(), ApiError> {
    2031              :         // This Node is a mutable local copy: we will set it active so that we can use its
    2032              :         // API client to reconcile with the node.  The Node in [`Self::nodes`] will get updated
    2033              :         // later.
    2034              :         node.set_availability(NodeAvailability::Active(PageserverUtilization::full()));
    2035              : 
    2036              :         let configs = match node
    2037              :             .with_client_retries(
    2038            0 :                 |client| async move { client.list_location_config().await },
    2039              :                 &self.http_client,
    2040              :                 &self.config.pageserver_jwt_token,
    2041              :                 1,
    2042              :                 5,
    2043              :                 SHORT_RECONCILE_TIMEOUT,
    2044              :                 &self.cancel,
    2045              :             )
    2046              :             .await
    2047              :         {
    2048              :             None => {
    2049              :                 // We're shutting down (the Node's cancellation token can't have fired, because
    2050              :                 // we're the only scope that has a reference to it, and we didn't fire it).
    2051              :                 return Err(ApiError::ShuttingDown);
    2052              :             }
    2053              :             Some(Err(e)) => {
    2054              :                 // This node didn't succeed listing its locations: it may not proceed to active state
    2055              :                 // as it is apparently unavailable.
    2056              :                 return Err(ApiError::PreconditionFailed(
    2057              :                     format!("Failed to query node location configs, cannot activate ({e})").into(),
    2058              :                 ));
    2059              :             }
    2060              :             Some(Ok(configs)) => configs,
    2061              :         };
    2062              :         tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
    2063              : 
    2064              :         let mut cleanup = Vec::new();
    2065              :         let mut mismatched_locations = 0;
    2066              :         {
    2067              :             let mut locked = self.inner.write().unwrap();
    2068              : 
    2069              :             for (tenant_shard_id, reported) in configs.tenant_shards {
    2070              :                 let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
    2071              :                     cleanup.push(tenant_shard_id);
    2072              :                     continue;
    2073              :                 };
    2074              : 
    2075              :                 let on_record = &mut tenant_shard
    2076              :                     .observed
    2077              :                     .locations
    2078              :                     .entry(node.get_id())
    2079            0 :                     .or_insert_with(|| ObservedStateLocation { conf: None })
    2080              :                     .conf;
    2081              : 
    2082              :                 // If the location reported by the node does not match our observed state,
    2083              :                 // then we mark it as uncertain and let the background reconciliation loop
    2084              :                 // deal with it.
    2085              :                 //
    2086              :                 // Note that this also covers net new locations reported by the node.
    2087              :                 if *on_record != reported {
    2088              :                     mismatched_locations += 1;
    2089              :                     *on_record = None;
    2090              :                 }
    2091              :             }
    2092              :         }
    2093              : 
    2094              :         if mismatched_locations > 0 {
    2095              :             tracing::info!(
    2096              :                 "Set observed state to None for {mismatched_locations} mismatched locations"
    2097              :             );
    2098              :         }
    2099              : 
    2100              :         for tenant_shard_id in cleanup {
    2101              :             tracing::info!("Detaching {tenant_shard_id}");
    2102              :             match node
    2103              :                 .with_client_retries(
    2104            0 :                     |client| async move {
    2105            0 :                         let config = LocationConfig {
    2106            0 :                             mode: LocationConfigMode::Detached,
    2107            0 :                             generation: None,
    2108            0 :                             secondary_conf: None,
    2109            0 :                             shard_number: tenant_shard_id.shard_number.0,
    2110            0 :                             shard_count: tenant_shard_id.shard_count.literal(),
    2111            0 :                             shard_stripe_size: 0,
    2112            0 :                             tenant_conf: models::TenantConfig::default(),
    2113            0 :                         };
    2114            0 :                         client
    2115            0 :                             .location_config(tenant_shard_id, config, None, false)
    2116            0 :                             .await
    2117            0 :                     },
    2118              :                     &self.http_client,
    2119              :                     &self.config.pageserver_jwt_token,
    2120              :                     1,
    2121              :                     5,
    2122              :                     SHORT_RECONCILE_TIMEOUT,
    2123              :                     &self.cancel,
    2124              :                 )
    2125              :                 .await
    2126              :             {
    2127              :                 None => {
    2128              :                     // We're shutting down (the Node's cancellation token can't have fired, because
    2129              :                     // we're the only scope that has a reference to it, and we didn't fire it).
    2130              :                     return Err(ApiError::ShuttingDown);
    2131              :                 }
    2132              :                 Some(Err(e)) => {
    2133              :                     // Do not let the node proceed to Active state if it is not responsive to requests
    2134              :                     // to detach.  This could happen if e.g. a shutdown bug in the pageserver is preventing
    2135              :                     // detach completing: we should not let this node back into the set of nodes considered
    2136              :                     // okay for scheduling.
    2137              :                     return Err(ApiError::Conflict(format!(
    2138              :                         "Node {node} failed to detach {tenant_shard_id}: {e}"
    2139              :                     )));
    2140              :                 }
    2141              :                 Some(Ok(_)) => {}
    2142              :             };
    2143              :         }
    2144              : 
    2145              :         Ok(())
    2146              :     }
    2147              : 
    2148            0 :     pub(crate) async fn re_attach(
    2149            0 :         &self,
    2150            0 :         reattach_req: ReAttachRequest,
    2151            0 :     ) -> Result<ReAttachResponse, ApiError> {
    2152            0 :         if let Some(register_req) = reattach_req.register {
    2153            0 :             self.node_register(register_req).await?;
    2154            0 :         }
    2155              : 
    2156              :         // Ordering: we must persist generation number updates before making them visible in the in-memory state
    2157            0 :         let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
    2158              : 
    2159            0 :         tracing::info!(
    2160              :             node_id=%reattach_req.node_id,
    2161            0 :             "Incremented {} tenant shards' generations",
    2162            0 :             incremented_generations.len()
    2163              :         );
    2164              : 
    2165              :         // Apply the updated generation to our in-memory state, and
    2166              :         // gather discover secondary locations.
    2167            0 :         let mut locked = self.inner.write().unwrap();
    2168            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    2169            0 : 
    2170            0 :         let mut response = ReAttachResponse {
    2171            0 :             tenants: Vec::new(),
    2172            0 :         };
    2173              : 
    2174              :         // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
    2175              :         // to call location_conf API with an old generation.  Wait for cancellation to complete
    2176              :         // before responding to this request.  Requires well implemented CancellationToken logic
    2177              :         // all the way to where we call location_conf.  Even then, there can still be a location_conf
    2178              :         // request in flight over the network: TODO handle that by making location_conf API refuse
    2179              :         // to go backward in generations.
    2180              : 
    2181              :         // Scan through all shards, applying updates for ones where we updated generation
    2182              :         // and identifying shards that intend to have a secondary location on this node.
    2183            0 :         for (tenant_shard_id, shard) in tenants {
    2184            0 :             if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
    2185            0 :                 let new_gen = *new_gen;
    2186            0 :                 response.tenants.push(ReAttachResponseTenant {
    2187            0 :                     id: *tenant_shard_id,
    2188            0 :                     r#gen: Some(new_gen.into().unwrap()),
    2189            0 :                     // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
    2190            0 :                     // execution.  If a pageserver is restarted during that process, then the reconcile pass will
    2191            0 :                     // fail, and start from scratch, so it doesn't make sense for us to try and preserve
    2192            0 :                     // the stale/multi states at this point.
    2193            0 :                     mode: LocationConfigMode::AttachedSingle,
    2194            0 :                 });
    2195            0 : 
    2196            0 :                 shard.generation = std::cmp::max(shard.generation, Some(new_gen));
    2197            0 :                 if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
    2198              :                     // Why can we update `observed` even though we're not sure our response will be received
    2199              :                     // by the pageserver?  Because the pageserver will not proceed with startup until
    2200              :                     // it has processed response: if it loses it, we'll see another request and increment
    2201              :                     // generation again, avoiding any uncertainty about dirtiness of tenant's state.
    2202            0 :                     if let Some(conf) = observed.conf.as_mut() {
    2203            0 :                         conf.generation = new_gen.into();
    2204            0 :                     }
    2205            0 :                 } else {
    2206            0 :                     // This node has no observed state for the shard: perhaps it was offline
    2207            0 :                     // when the pageserver restarted.  Insert a None, so that the Reconciler
    2208            0 :                     // will be prompted to learn the location's state before it makes changes.
    2209            0 :                     shard
    2210            0 :                         .observed
    2211            0 :                         .locations
    2212            0 :                         .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
    2213            0 :                 }
    2214            0 :             } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
    2215            0 :                 // Ordering: pageserver will not accept /location_config requests until it has
    2216            0 :                 // finished processing the response from re-attach.  So we can update our in-memory state
    2217            0 :                 // now, and be confident that we are not stamping on the result of some later location config.
    2218            0 :                 // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
    2219            0 :                 // so we might update observed state here, and then get over-written by some racing
    2220            0 :                 // ReconcileResult.  The impact is low however, since we have set state on pageserver something
    2221            0 :                 // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
    2222            0 : 
    2223            0 :                 response.tenants.push(ReAttachResponseTenant {
    2224            0 :                     id: *tenant_shard_id,
    2225            0 :                     r#gen: None,
    2226            0 :                     mode: LocationConfigMode::Secondary,
    2227            0 :                 });
    2228            0 : 
    2229            0 :                 // We must not update observed, because we have no guarantee that our
    2230            0 :                 // response will be received by the pageserver. This could leave it
    2231            0 :                 // falsely dirty, but the resulting reconcile should be idempotent.
    2232            0 :             }
    2233              :         }
    2234              : 
    2235              :         // We consider a node Active once we have composed a re-attach response, but we
    2236              :         // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
    2237              :         // implicitly synchronizes the LocationConfigs on the node.
    2238              :         //
    2239              :         // Setting a node active unblocks any Reconcilers that might write to the location config API,
    2240              :         // but those requests will not be accepted by the node until it has finished processing
    2241              :         // the re-attach response.
    2242              :         //
    2243              :         // Additionally, reset the nodes scheduling policy to match the conditional update done
    2244              :         // in [`Persistence::re_attach`].
    2245            0 :         if let Some(node) = nodes.get(&reattach_req.node_id) {
    2246            0 :             let reset_scheduling = matches!(
    2247            0 :                 node.get_scheduling(),
    2248              :                 NodeSchedulingPolicy::PauseForRestart
    2249              :                     | NodeSchedulingPolicy::Draining
    2250              :                     | NodeSchedulingPolicy::Filling
    2251              :             );
    2252              : 
    2253            0 :             let mut new_nodes = (**nodes).clone();
    2254            0 :             if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
    2255            0 :                 if reset_scheduling {
    2256            0 :                     node.set_scheduling(NodeSchedulingPolicy::Active);
    2257            0 :                 }
    2258              : 
    2259            0 :                 tracing::info!("Marking {} warming-up on reattach", reattach_req.node_id);
    2260            0 :                 node.set_availability(NodeAvailability::WarmingUp(std::time::Instant::now()));
    2261            0 : 
    2262            0 :                 scheduler.node_upsert(node);
    2263            0 :                 let new_nodes = Arc::new(new_nodes);
    2264            0 :                 *nodes = new_nodes;
    2265              :             } else {
    2266            0 :                 tracing::error!(
    2267            0 :                     "Reattaching node {} was removed while processing the request",
    2268              :                     reattach_req.node_id
    2269              :                 );
    2270              :             }
    2271            0 :         }
    2272              : 
    2273            0 :         Ok(response)
    2274            0 :     }
    2275              : 
    2276            0 :     pub(crate) async fn validate(
    2277            0 :         &self,
    2278            0 :         validate_req: ValidateRequest,
    2279            0 :     ) -> Result<ValidateResponse, DatabaseError> {
    2280              :         // Fast in-memory check: we may reject validation on anything that doesn't match our
    2281              :         // in-memory generation for a shard
    2282            0 :         let in_memory_result = {
    2283            0 :             let mut in_memory_result = Vec::new();
    2284            0 :             let locked = self.inner.read().unwrap();
    2285            0 :             for req_tenant in validate_req.tenants {
    2286            0 :                 if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
    2287            0 :                     let valid = tenant_shard.generation == Some(Generation::new(req_tenant.r#gen));
    2288            0 :                     tracing::info!(
    2289            0 :                         "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
    2290              :                         req_tenant.id,
    2291              :                         req_tenant.r#gen,
    2292              :                         tenant_shard.generation
    2293              :                     );
    2294              : 
    2295            0 :                     in_memory_result.push((
    2296            0 :                         req_tenant.id,
    2297            0 :                         Generation::new(req_tenant.r#gen),
    2298            0 :                         valid,
    2299            0 :                     ));
    2300              :                 } else {
    2301              :                     // This is legal: for example during a shard split the pageserver may still
    2302              :                     // have deletions in its queue from the old pre-split shard, or after deletion
    2303              :                     // of a tenant that was busy with compaction/gc while being deleted.
    2304            0 :                     tracing::info!(
    2305            0 :                         "Refusing deletion validation for missing shard {}",
    2306              :                         req_tenant.id
    2307              :                     );
    2308              :                 }
    2309              :             }
    2310              : 
    2311            0 :             in_memory_result
    2312              :         };
    2313              : 
    2314              :         // Database calls to confirm validity for anything that passed the in-memory check.  We must do this
    2315              :         // in case of controller split-brain, where some other controller process might have incremented the generation.
    2316            0 :         let db_generations = self
    2317            0 :             .persistence
    2318            0 :             .shard_generations(
    2319            0 :                 in_memory_result
    2320            0 :                     .iter()
    2321            0 :                     .filter_map(|i| if i.2 { Some(&i.0) } else { None }),
    2322            0 :             )
    2323            0 :             .await?;
    2324            0 :         let db_generations = db_generations.into_iter().collect::<HashMap<_, _>>();
    2325            0 : 
    2326            0 :         let mut response = ValidateResponse {
    2327            0 :             tenants: Vec::new(),
    2328            0 :         };
    2329            0 :         for (tenant_shard_id, validate_generation, valid) in in_memory_result.into_iter() {
    2330            0 :             let valid = if valid {
    2331            0 :                 let db_generation = db_generations.get(&tenant_shard_id);
    2332            0 :                 db_generation == Some(&Some(validate_generation))
    2333              :             } else {
    2334              :                 // If in-memory state says it's invalid, trust that.  It's always safe to fail a validation, at worst
    2335              :                 // this prevents a pageserver from cleaning up an object in S3.
    2336            0 :                 false
    2337              :             };
    2338              : 
    2339            0 :             response.tenants.push(ValidateResponseTenant {
    2340            0 :                 id: tenant_shard_id,
    2341            0 :                 valid,
    2342            0 :             })
    2343              :         }
    2344              : 
    2345            0 :         Ok(response)
    2346            0 :     }
    2347              : 
    2348            0 :     pub(crate) async fn tenant_create(
    2349            0 :         &self,
    2350            0 :         create_req: TenantCreateRequest,
    2351            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    2352            0 :         let tenant_id = create_req.new_tenant_id.tenant_id;
    2353              : 
    2354              :         // Exclude any concurrent attempts to create/access the same tenant ID
    2355            0 :         let _tenant_lock = trace_exclusive_lock(
    2356            0 :             &self.tenant_op_locks,
    2357            0 :             create_req.new_tenant_id.tenant_id,
    2358            0 :             TenantOperations::Create,
    2359            0 :         )
    2360            0 :         .await;
    2361            0 :         let (response, waiters) = self.do_tenant_create(create_req).await?;
    2362              : 
    2363            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    2364              :             // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
    2365              :             // accept compute notifications while it is in the process of creating.  Reconciliation will
    2366              :             // be retried in the background.
    2367            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
    2368            0 :         }
    2369            0 :         Ok(response)
    2370            0 :     }
    2371              : 
    2372            0 :     pub(crate) async fn do_tenant_create(
    2373            0 :         &self,
    2374            0 :         create_req: TenantCreateRequest,
    2375            0 :     ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
    2376            0 :         let placement_policy = create_req
    2377            0 :             .placement_policy
    2378            0 :             .clone()
    2379            0 :             // As a default, zero secondaries is convenient for tests that don't choose a policy.
    2380            0 :             .unwrap_or(PlacementPolicy::Attached(0));
    2381              : 
    2382              :         // This service expects to handle sharding itself: it is an error to try and directly create
    2383              :         // a particular shard here.
    2384            0 :         let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
    2385            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    2386            0 :                 "Attempted to create a specific shard, this API is for creating the whole tenant"
    2387            0 :             )));
    2388              :         } else {
    2389            0 :             create_req.new_tenant_id.tenant_id
    2390            0 :         };
    2391            0 : 
    2392            0 :         tracing::info!(
    2393            0 :             "Creating tenant {}, shard_count={:?}",
    2394              :             create_req.new_tenant_id,
    2395              :             create_req.shard_parameters.count,
    2396              :         );
    2397              : 
    2398            0 :         let create_ids = (0..create_req.shard_parameters.count.count())
    2399            0 :             .map(|i| TenantShardId {
    2400            0 :                 tenant_id,
    2401            0 :                 shard_number: ShardNumber(i),
    2402            0 :                 shard_count: create_req.shard_parameters.count,
    2403            0 :             })
    2404            0 :             .collect::<Vec<_>>();
    2405              : 
    2406              :         // If the caller specifies a None generation, it means "start from default".  This is different
    2407              :         // to [`Self::tenant_location_config`], where a None generation is used to represent
    2408              :         // an incompletely-onboarded tenant.
    2409            0 :         let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
    2410            0 :             tracing::info!(
    2411            0 :                 "tenant_create: secondary mode, generation is_some={}",
    2412            0 :                 create_req.generation.is_some()
    2413              :             );
    2414            0 :             create_req.generation.map(Generation::new)
    2415              :         } else {
    2416            0 :             tracing::info!(
    2417            0 :                 "tenant_create: not secondary mode, generation is_some={}",
    2418            0 :                 create_req.generation.is_some()
    2419              :             );
    2420            0 :             Some(
    2421            0 :                 create_req
    2422            0 :                     .generation
    2423            0 :                     .map(Generation::new)
    2424            0 :                     .unwrap_or(INITIAL_GENERATION),
    2425            0 :             )
    2426              :         };
    2427              : 
    2428            0 :         let preferred_az_id = {
    2429            0 :             let locked = self.inner.read().unwrap();
    2430              :             // Idempotency: take the existing value if the tenant already exists
    2431            0 :             if let Some(shard) = locked.tenants.get(create_ids.first().unwrap()) {
    2432            0 :                 shard.preferred_az().cloned()
    2433              :             } else {
    2434            0 :                 locked.scheduler.get_az_for_new_tenant()
    2435              :             }
    2436              :         };
    2437              : 
    2438              :         // Ordering: we persist tenant shards before creating them on the pageserver.  This enables a caller
    2439              :         // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
    2440              :         // during the creation, rather than risking leaving orphan objects in S3.
    2441            0 :         let persist_tenant_shards = create_ids
    2442            0 :             .iter()
    2443            0 :             .map(|tenant_shard_id| TenantShardPersistence {
    2444            0 :                 tenant_id: tenant_shard_id.tenant_id.to_string(),
    2445            0 :                 shard_number: tenant_shard_id.shard_number.0 as i32,
    2446            0 :                 shard_count: tenant_shard_id.shard_count.literal() as i32,
    2447            0 :                 shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
    2448            0 :                 generation: initial_generation.map(|g| g.into().unwrap() as i32),
    2449            0 :                 // The pageserver is not known until scheduling happens: we will set this column when
    2450            0 :                 // incrementing the generation the first time we attach to a pageserver.
    2451            0 :                 generation_pageserver: None,
    2452            0 :                 placement_policy: serde_json::to_string(&placement_policy).unwrap(),
    2453            0 :                 config: serde_json::to_string(&create_req.config).unwrap(),
    2454            0 :                 splitting: SplitState::default(),
    2455            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    2456            0 :                     .unwrap(),
    2457            0 :                 preferred_az_id: preferred_az_id.as_ref().map(|az| az.to_string()),
    2458            0 :             })
    2459            0 :             .collect();
    2460            0 : 
    2461            0 :         match self
    2462            0 :             .persistence
    2463            0 :             .insert_tenant_shards(persist_tenant_shards)
    2464            0 :             .await
    2465              :         {
    2466            0 :             Ok(_) => {}
    2467              :             Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
    2468              :                 DatabaseErrorKind::UniqueViolation,
    2469              :                 _,
    2470              :             ))) => {
    2471              :                 // Unique key violation: this is probably a retry.  Because the shard count is part of the unique key,
    2472              :                 // if we see a unique key violation it means that the creation request's shard count matches the previous
    2473              :                 // creation's shard count.
    2474            0 :                 tracing::info!(
    2475            0 :                     "Tenant shards already present in database, proceeding with idempotent creation..."
    2476              :                 );
    2477              :             }
    2478              :             // Any other database error is unexpected and a bug.
    2479            0 :             Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
    2480              :         };
    2481              : 
    2482            0 :         let mut schedule_context = ScheduleContext::default();
    2483            0 :         let mut schedule_error = None;
    2484            0 :         let mut response_shards = Vec::new();
    2485            0 :         for tenant_shard_id in create_ids {
    2486            0 :             tracing::info!("Creating shard {tenant_shard_id}...");
    2487              : 
    2488            0 :             let outcome = self
    2489            0 :                 .do_initial_shard_scheduling(
    2490            0 :                     tenant_shard_id,
    2491            0 :                     initial_generation,
    2492            0 :                     &create_req.shard_parameters,
    2493            0 :                     create_req.config.clone(),
    2494            0 :                     placement_policy.clone(),
    2495            0 :                     preferred_az_id.as_ref(),
    2496            0 :                     &mut schedule_context,
    2497            0 :                 )
    2498            0 :                 .await;
    2499              : 
    2500            0 :             match outcome {
    2501            0 :                 InitialShardScheduleOutcome::Scheduled(resp) => response_shards.push(resp),
    2502            0 :                 InitialShardScheduleOutcome::NotScheduled => {}
    2503            0 :                 InitialShardScheduleOutcome::ShardScheduleError(err) => {
    2504            0 :                     schedule_error = Some(err);
    2505            0 :                 }
    2506              :             }
    2507              :         }
    2508              : 
    2509              :         // If we failed to schedule shards, then they are still created in the controller,
    2510              :         // but we return an error to the requester to avoid a silent failure when someone
    2511              :         // tries to e.g. create a tenant whose placement policy requires more nodes than
    2512              :         // are present in the system.  We do this here rather than in the above loop, to
    2513              :         // avoid situations where we only create a subset of shards in the tenant.
    2514            0 :         if let Some(e) = schedule_error {
    2515            0 :             return Err(ApiError::Conflict(format!(
    2516            0 :                 "Failed to schedule shard(s): {e}"
    2517            0 :             )));
    2518            0 :         }
    2519            0 : 
    2520            0 :         let waiters = {
    2521            0 :             let mut locked = self.inner.write().unwrap();
    2522            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    2523            0 :             let config = ReconcilerConfigBuilder::new(ReconcilerPriority::High)
    2524            0 :                 .tenant_creation_hint(true)
    2525            0 :                 .build();
    2526            0 :             tenants
    2527            0 :                 .range_mut(TenantShardId::tenant_range(tenant_id))
    2528            0 :                 .filter_map(|(_shard_id, shard)| {
    2529            0 :                     self.maybe_configured_reconcile_shard(shard, nodes, config)
    2530            0 :                 })
    2531            0 :                 .collect::<Vec<_>>()
    2532            0 :         };
    2533            0 : 
    2534            0 :         Ok((
    2535            0 :             TenantCreateResponse {
    2536            0 :                 shards: response_shards,
    2537            0 :             },
    2538            0 :             waiters,
    2539            0 :         ))
    2540            0 :     }
    2541              : 
    2542              :     /// Helper for tenant creation that does the scheduling for an individual shard. Covers both the
    2543              :     /// case of a new tenant and a pre-existing one.
    2544              :     #[allow(clippy::too_many_arguments)]
    2545            0 :     async fn do_initial_shard_scheduling(
    2546            0 :         &self,
    2547            0 :         tenant_shard_id: TenantShardId,
    2548            0 :         initial_generation: Option<Generation>,
    2549            0 :         shard_params: &ShardParameters,
    2550            0 :         config: TenantConfig,
    2551            0 :         placement_policy: PlacementPolicy,
    2552            0 :         preferred_az_id: Option<&AvailabilityZone>,
    2553            0 :         schedule_context: &mut ScheduleContext,
    2554            0 :     ) -> InitialShardScheduleOutcome {
    2555            0 :         let mut locked = self.inner.write().unwrap();
    2556            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    2557              : 
    2558              :         use std::collections::btree_map::Entry;
    2559            0 :         match tenants.entry(tenant_shard_id) {
    2560            0 :             Entry::Occupied(mut entry) => {
    2561            0 :                 tracing::info!("Tenant shard {tenant_shard_id} already exists while creating");
    2562              : 
    2563            0 :                 if let Err(err) = entry.get_mut().schedule(scheduler, schedule_context) {
    2564            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(err);
    2565            0 :                 }
    2566              : 
    2567            0 :                 if let Some(node_id) = entry.get().intent.get_attached() {
    2568            0 :                     let generation = entry
    2569            0 :                         .get()
    2570            0 :                         .generation
    2571            0 :                         .expect("Generation is set when in attached mode");
    2572            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2573            0 :                         shard_id: tenant_shard_id,
    2574            0 :                         node_id: *node_id,
    2575            0 :                         generation: generation.into().unwrap(),
    2576            0 :                     })
    2577              :                 } else {
    2578            0 :                     InitialShardScheduleOutcome::NotScheduled
    2579              :                 }
    2580              :             }
    2581            0 :             Entry::Vacant(entry) => {
    2582            0 :                 let state = entry.insert(TenantShard::new(
    2583            0 :                     tenant_shard_id,
    2584            0 :                     ShardIdentity::from_params(tenant_shard_id.shard_number, shard_params),
    2585            0 :                     placement_policy,
    2586            0 :                     preferred_az_id.cloned(),
    2587            0 :                 ));
    2588            0 : 
    2589            0 :                 state.generation = initial_generation;
    2590            0 :                 state.config = config;
    2591            0 :                 if let Err(e) = state.schedule(scheduler, schedule_context) {
    2592            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(e);
    2593            0 :                 }
    2594              : 
    2595              :                 // Only include shards in result if we are attaching: the purpose
    2596              :                 // of the response is to tell the caller where the shards are attached.
    2597            0 :                 if let Some(node_id) = state.intent.get_attached() {
    2598            0 :                     let generation = state
    2599            0 :                         .generation
    2600            0 :                         .expect("Generation is set when in attached mode");
    2601            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2602            0 :                         shard_id: tenant_shard_id,
    2603            0 :                         node_id: *node_id,
    2604            0 :                         generation: generation.into().unwrap(),
    2605            0 :                     })
    2606              :                 } else {
    2607            0 :                     InitialShardScheduleOutcome::NotScheduled
    2608              :                 }
    2609              :             }
    2610              :         }
    2611            0 :     }
    2612              : 
    2613              :     /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
    2614              :     /// wait for reconciliation to complete before responding.
    2615            0 :     async fn await_waiters(
    2616            0 :         &self,
    2617            0 :         waiters: Vec<ReconcilerWaiter>,
    2618            0 :         timeout: Duration,
    2619            0 :     ) -> Result<(), ReconcileWaitError> {
    2620            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2621            0 :         for waiter in waiters {
    2622            0 :             let timeout = deadline.duration_since(Instant::now());
    2623            0 :             waiter.wait_timeout(timeout).await?;
    2624              :         }
    2625              : 
    2626            0 :         Ok(())
    2627            0 :     }
    2628              : 
    2629              :     /// Same as [`Service::await_waiters`], but returns the waiters which are still
    2630              :     /// in progress
    2631            0 :     async fn await_waiters_remainder(
    2632            0 :         &self,
    2633            0 :         waiters: Vec<ReconcilerWaiter>,
    2634            0 :         timeout: Duration,
    2635            0 :     ) -> Vec<ReconcilerWaiter> {
    2636            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2637            0 :         for waiter in waiters.iter() {
    2638            0 :             let timeout = deadline.duration_since(Instant::now());
    2639            0 :             let _ = waiter.wait_timeout(timeout).await;
    2640              :         }
    2641              : 
    2642            0 :         waiters
    2643            0 :             .into_iter()
    2644            0 :             .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress))
    2645            0 :             .collect::<Vec<_>>()
    2646            0 :     }
    2647              : 
    2648              :     /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
    2649              :     /// and transform it into either a tenant creation of a series of shard updates.
    2650              :     ///
    2651              :     /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
    2652              :     /// still be returned.
    2653            0 :     fn tenant_location_config_prepare(
    2654            0 :         &self,
    2655            0 :         tenant_id: TenantId,
    2656            0 :         req: TenantLocationConfigRequest,
    2657            0 :     ) -> TenantCreateOrUpdate {
    2658            0 :         let mut updates = Vec::new();
    2659            0 :         let mut locked = self.inner.write().unwrap();
    2660            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    2661            0 :         let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2662              : 
    2663              :         // Use location config mode as an indicator of policy.
    2664            0 :         let placement_policy = match req.config.mode {
    2665            0 :             LocationConfigMode::Detached => PlacementPolicy::Detached,
    2666            0 :             LocationConfigMode::Secondary => PlacementPolicy::Secondary,
    2667              :             LocationConfigMode::AttachedMulti
    2668              :             | LocationConfigMode::AttachedSingle
    2669              :             | LocationConfigMode::AttachedStale => {
    2670            0 :                 if nodes.len() > 1 {
    2671            0 :                     PlacementPolicy::Attached(1)
    2672              :                 } else {
    2673              :                     // Convenience for dev/test: if we just have one pageserver, import
    2674              :                     // tenants into non-HA mode so that scheduling will succeed.
    2675            0 :                     PlacementPolicy::Attached(0)
    2676              :                 }
    2677              :             }
    2678              :         };
    2679              : 
    2680              :         // Ordinarily we do not update scheduling policy, but when making major changes
    2681              :         // like detaching or demoting to secondary-only, we need to force the scheduling
    2682              :         // mode to Active, or the caller's expected outcome (detach it) will not happen.
    2683            0 :         let scheduling_policy = match req.config.mode {
    2684              :             LocationConfigMode::Detached | LocationConfigMode::Secondary => {
    2685              :                 // Special case: when making major changes like detaching or demoting to secondary-only,
    2686              :                 // we need to force the scheduling mode to Active, or nothing will happen.
    2687            0 :                 Some(ShardSchedulingPolicy::Active)
    2688              :             }
    2689              :             LocationConfigMode::AttachedMulti
    2690              :             | LocationConfigMode::AttachedSingle
    2691              :             | LocationConfigMode::AttachedStale => {
    2692              :                 // While attached, continue to respect whatever the existing scheduling mode is.
    2693            0 :                 None
    2694              :             }
    2695              :         };
    2696              : 
    2697            0 :         let mut create = true;
    2698            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    2699              :             // Saw an existing shard: this is not a creation
    2700            0 :             create = false;
    2701              : 
    2702              :             // Shards may have initially been created by a Secondary request, where we
    2703              :             // would have left generation as None.
    2704              :             //
    2705              :             // We only update generation the first time we see an attached-mode request,
    2706              :             // and if there is no existing generation set. The caller is responsible for
    2707              :             // ensuring that no non-storage-controller pageserver ever uses a higher
    2708              :             // generation than they passed in here.
    2709              :             use LocationConfigMode::*;
    2710            0 :             let set_generation = match req.config.mode {
    2711            0 :                 AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
    2712            0 :                     req.config.generation.map(Generation::new)
    2713              :                 }
    2714            0 :                 _ => None,
    2715              :             };
    2716              : 
    2717            0 :             updates.push(ShardUpdate {
    2718            0 :                 tenant_shard_id: *shard_id,
    2719            0 :                 placement_policy: placement_policy.clone(),
    2720            0 :                 tenant_config: req.config.tenant_conf.clone(),
    2721            0 :                 generation: set_generation,
    2722            0 :                 scheduling_policy,
    2723            0 :             });
    2724              :         }
    2725              : 
    2726            0 :         if create {
    2727              :             use LocationConfigMode::*;
    2728            0 :             let generation = match req.config.mode {
    2729            0 :                 AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
    2730              :                 // If a caller provided a generation in a non-attached request, ignore it
    2731              :                 // and leave our generation as None: this enables a subsequent update to set
    2732              :                 // the generation when setting an attached mode for the first time.
    2733            0 :                 _ => None,
    2734              :             };
    2735              : 
    2736            0 :             TenantCreateOrUpdate::Create(
    2737            0 :                 // Synthesize a creation request
    2738            0 :                 TenantCreateRequest {
    2739            0 :                     new_tenant_id: tenant_shard_id,
    2740            0 :                     generation,
    2741            0 :                     shard_parameters: ShardParameters {
    2742            0 :                         count: tenant_shard_id.shard_count,
    2743            0 :                         // We only import un-sharded or single-sharded tenants, so stripe
    2744            0 :                         // size can be made up arbitrarily here.
    2745            0 :                         stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE,
    2746            0 :                     },
    2747            0 :                     placement_policy: Some(placement_policy),
    2748            0 :                     config: req.config.tenant_conf,
    2749            0 :                 },
    2750            0 :             )
    2751              :         } else {
    2752            0 :             assert!(!updates.is_empty());
    2753            0 :             TenantCreateOrUpdate::Update(updates)
    2754              :         }
    2755            0 :     }
    2756              : 
    2757              :     /// For APIs that might act on tenants with [`PlacementPolicy::Detached`], first check if
    2758              :     /// the tenant is present in memory. If not, load it from the database.  If it is found
    2759              :     /// in neither location, return a NotFound error.
    2760              :     ///
    2761              :     /// Caller must demonstrate they hold a lock guard, as otherwise two callers might try and load
    2762              :     /// it at the same time, or we might race with [`Self::maybe_drop_tenant`]
    2763            0 :     async fn maybe_load_tenant(
    2764            0 :         &self,
    2765            0 :         tenant_id: TenantId,
    2766            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    2767            0 :     ) -> Result<(), ApiError> {
    2768              :         // Check if the tenant is present in memory, and select an AZ to use when loading
    2769              :         // if we will load it.
    2770            0 :         let load_in_az = {
    2771            0 :             let locked = self.inner.read().unwrap();
    2772            0 :             let existing = locked
    2773            0 :                 .tenants
    2774            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    2775            0 :                 .next();
    2776            0 : 
    2777            0 :             // If the tenant is not present in memory, we expect to load it from database,
    2778            0 :             // so let's figure out what AZ to load it into while we have self.inner locked.
    2779            0 :             if existing.is_none() {
    2780            0 :                 locked
    2781            0 :                     .scheduler
    2782            0 :                     .get_az_for_new_tenant()
    2783            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    2784            0 :                         "No AZ with nodes found to load tenant"
    2785            0 :                     )))?
    2786              :             } else {
    2787              :                 // We already have this tenant in memory
    2788            0 :                 return Ok(());
    2789              :             }
    2790              :         };
    2791              : 
    2792            0 :         let tenant_shards = self.persistence.load_tenant(tenant_id).await?;
    2793            0 :         if tenant_shards.is_empty() {
    2794            0 :             return Err(ApiError::NotFound(
    2795            0 :                 anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    2796            0 :             ));
    2797            0 :         }
    2798            0 : 
    2799            0 :         // Update the persistent shards with the AZ that we are about to apply to in-memory state
    2800            0 :         self.persistence
    2801            0 :             .set_tenant_shard_preferred_azs(
    2802            0 :                 tenant_shards
    2803            0 :                     .iter()
    2804            0 :                     .map(|t| {
    2805            0 :                         (
    2806            0 :                             t.get_tenant_shard_id().expect("Corrupt shard in database"),
    2807            0 :                             Some(load_in_az.clone()),
    2808            0 :                         )
    2809            0 :                     })
    2810            0 :                     .collect(),
    2811            0 :             )
    2812            0 :             .await?;
    2813              : 
    2814            0 :         let mut locked = self.inner.write().unwrap();
    2815            0 :         tracing::info!(
    2816            0 :             "Loaded {} shards for tenant {}",
    2817            0 :             tenant_shards.len(),
    2818              :             tenant_id
    2819              :         );
    2820              : 
    2821            0 :         locked.tenants.extend(tenant_shards.into_iter().map(|p| {
    2822            0 :             let intent = IntentState::new(Some(load_in_az.clone()));
    2823            0 :             let shard =
    2824            0 :                 TenantShard::from_persistent(p, intent).expect("Corrupt shard row in database");
    2825            0 : 
    2826            0 :             // Sanity check: when loading on-demand, we should always be loaded something Detached
    2827            0 :             debug_assert!(shard.policy == PlacementPolicy::Detached);
    2828            0 :             if shard.policy != PlacementPolicy::Detached {
    2829            0 :                 tracing::error!(
    2830            0 :                     "Tenant shard {} loaded on-demand, but has non-Detached policy {:?}",
    2831              :                     shard.tenant_shard_id,
    2832              :                     shard.policy
    2833              :                 );
    2834            0 :             }
    2835              : 
    2836            0 :             (shard.tenant_shard_id, shard)
    2837            0 :         }));
    2838            0 : 
    2839            0 :         Ok(())
    2840            0 :     }
    2841              : 
    2842              :     /// If all shards for a tenant are detached, and in a fully quiescent state (no observed locations on pageservers),
    2843              :     /// and have no reconciler running, then we can drop the tenant from memory.  It will be reloaded on-demand
    2844              :     /// if we are asked to attach it again (see [`Self::maybe_load_tenant`]).
    2845              :     ///
    2846              :     /// Caller must demonstrate they hold a lock guard, as otherwise it is unsafe to drop a tenant from
    2847              :     /// memory while some other function might assume it continues to exist while not holding the lock on Self::inner.
    2848            0 :     fn maybe_drop_tenant(
    2849            0 :         &self,
    2850            0 :         tenant_id: TenantId,
    2851            0 :         locked: &mut std::sync::RwLockWriteGuard<ServiceState>,
    2852            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    2853            0 :     ) {
    2854            0 :         let mut tenant_shards = locked.tenants.range(TenantShardId::tenant_range(tenant_id));
    2855            0 :         if tenant_shards.all(|(_id, shard)| {
    2856            0 :             shard.policy == PlacementPolicy::Detached
    2857            0 :                 && shard.reconciler.is_none()
    2858            0 :                 && shard.observed.is_empty()
    2859            0 :         }) {
    2860            0 :             let keys = locked
    2861            0 :                 .tenants
    2862            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    2863            0 :                 .map(|(id, _)| id)
    2864            0 :                 .copied()
    2865            0 :                 .collect::<Vec<_>>();
    2866            0 :             for key in keys {
    2867            0 :                 tracing::info!("Dropping detached tenant shard {} from memory", key);
    2868            0 :                 locked.tenants.remove(&key);
    2869              :             }
    2870            0 :         }
    2871            0 :     }
    2872              : 
    2873              :     /// This API is used by the cloud control plane to migrate unsharded tenants that it created
    2874              :     /// directly with pageservers into this service.
    2875              :     ///
    2876              :     /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
    2877              :     /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
    2878              :     /// Think of the first attempt to call this API as a transfer of absolute authority over the
    2879              :     /// tenant's source of generation numbers.
    2880              :     ///
    2881              :     /// The mode in this request coarse-grained control of tenants:
    2882              :     /// - Call with mode Attached* to upsert the tenant.
    2883              :     /// - Call with mode Secondary to either onboard a tenant without attaching it, or
    2884              :     ///   to set an existing tenant to PolicyMode::Secondary
    2885              :     /// - Call with mode Detached to switch to PolicyMode::Detached
    2886            0 :     pub(crate) async fn tenant_location_config(
    2887            0 :         &self,
    2888            0 :         tenant_shard_id: TenantShardId,
    2889            0 :         req: TenantLocationConfigRequest,
    2890            0 :     ) -> Result<TenantLocationConfigResponse, ApiError> {
    2891              :         // We require an exclusive lock, because we are updating both persistent and in-memory state
    2892            0 :         let _tenant_lock = trace_exclusive_lock(
    2893            0 :             &self.tenant_op_locks,
    2894            0 :             tenant_shard_id.tenant_id,
    2895            0 :             TenantOperations::LocationConfig,
    2896            0 :         )
    2897            0 :         .await;
    2898              : 
    2899            0 :         let tenant_id = if !tenant_shard_id.is_unsharded() {
    2900            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    2901            0 :                 "This API is for importing single-sharded or unsharded tenants"
    2902            0 :             )));
    2903              :         } else {
    2904            0 :             tenant_shard_id.tenant_id
    2905            0 :         };
    2906            0 : 
    2907            0 :         // In case we are waking up a Detached tenant
    2908            0 :         match self.maybe_load_tenant(tenant_id, &_tenant_lock).await {
    2909            0 :             Ok(()) | Err(ApiError::NotFound(_)) => {
    2910            0 :                 // This is a creation or an update
    2911            0 :             }
    2912            0 :             Err(e) => {
    2913            0 :                 return Err(e);
    2914              :             }
    2915              :         };
    2916              : 
    2917              :         // First check if this is a creation or an update
    2918            0 :         let create_or_update = self.tenant_location_config_prepare(tenant_id, req);
    2919            0 : 
    2920            0 :         let mut result = TenantLocationConfigResponse {
    2921            0 :             shards: Vec::new(),
    2922            0 :             stripe_size: None,
    2923            0 :         };
    2924            0 :         let waiters = match create_or_update {
    2925            0 :             TenantCreateOrUpdate::Create(create_req) => {
    2926            0 :                 let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
    2927            0 :                 result.shards = create_resp
    2928            0 :                     .shards
    2929            0 :                     .into_iter()
    2930            0 :                     .map(|s| TenantShardLocation {
    2931            0 :                         node_id: s.node_id,
    2932            0 :                         shard_id: s.shard_id,
    2933            0 :                     })
    2934            0 :                     .collect();
    2935            0 :                 waiters
    2936              :             }
    2937            0 :             TenantCreateOrUpdate::Update(updates) => {
    2938            0 :                 // Persist updates
    2939            0 :                 // Ordering: write to the database before applying changes in-memory, so that
    2940            0 :                 // we will not appear time-travel backwards on a restart.
    2941            0 : 
    2942            0 :                 let mut schedule_context = ScheduleContext::default();
    2943              :                 for ShardUpdate {
    2944            0 :                     tenant_shard_id,
    2945            0 :                     placement_policy,
    2946            0 :                     tenant_config,
    2947            0 :                     generation,
    2948            0 :                     scheduling_policy,
    2949            0 :                 } in &updates
    2950              :                 {
    2951            0 :                     self.persistence
    2952            0 :                         .update_tenant_shard(
    2953            0 :                             TenantFilter::Shard(*tenant_shard_id),
    2954            0 :                             Some(placement_policy.clone()),
    2955            0 :                             Some(tenant_config.clone()),
    2956            0 :                             *generation,
    2957            0 :                             *scheduling_policy,
    2958            0 :                         )
    2959            0 :                         .await?;
    2960              :                 }
    2961              : 
    2962              :                 // Apply updates in-memory
    2963            0 :                 let mut waiters = Vec::new();
    2964            0 :                 {
    2965            0 :                     let mut locked = self.inner.write().unwrap();
    2966            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    2967              : 
    2968              :                     for ShardUpdate {
    2969            0 :                         tenant_shard_id,
    2970            0 :                         placement_policy,
    2971            0 :                         tenant_config,
    2972            0 :                         generation: update_generation,
    2973            0 :                         scheduling_policy,
    2974            0 :                     } in updates
    2975              :                     {
    2976            0 :                         let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    2977            0 :                             tracing::warn!("Shard {tenant_shard_id} removed while updating");
    2978            0 :                             continue;
    2979              :                         };
    2980              : 
    2981              :                         // Update stripe size
    2982            0 :                         if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
    2983            0 :                             result.stripe_size = Some(shard.shard.stripe_size);
    2984            0 :                         }
    2985              : 
    2986            0 :                         shard.policy = placement_policy;
    2987            0 :                         shard.config = tenant_config;
    2988            0 :                         if let Some(generation) = update_generation {
    2989            0 :                             shard.generation = Some(generation);
    2990            0 :                         }
    2991              : 
    2992            0 :                         if let Some(scheduling_policy) = scheduling_policy {
    2993            0 :                             shard.set_scheduling_policy(scheduling_policy);
    2994            0 :                         }
    2995              : 
    2996            0 :                         shard.schedule(scheduler, &mut schedule_context)?;
    2997              : 
    2998            0 :                         let maybe_waiter =
    2999            0 :                             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3000            0 :                         if let Some(waiter) = maybe_waiter {
    3001            0 :                             waiters.push(waiter);
    3002            0 :                         }
    3003              : 
    3004            0 :                         if let Some(node_id) = shard.intent.get_attached() {
    3005            0 :                             result.shards.push(TenantShardLocation {
    3006            0 :                                 shard_id: tenant_shard_id,
    3007            0 :                                 node_id: *node_id,
    3008            0 :                             })
    3009            0 :                         }
    3010              :                     }
    3011              :                 }
    3012            0 :                 waiters
    3013              :             }
    3014              :         };
    3015              : 
    3016            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3017              :             // Do not treat a reconcile error as fatal: we have already applied any requested
    3018              :             // Intent changes, and the reconcile can fail for external reasons like unavailable
    3019              :             // compute notification API.  In these cases, it is important that we do not
    3020              :             // cause the cloud control plane to retry forever on this API.
    3021            0 :             tracing::warn!(
    3022            0 :                 "Failed to reconcile after /location_config: {e}, returning success anyway"
    3023              :             );
    3024            0 :         }
    3025              : 
    3026              :         // Logging the full result is useful because it lets us cross-check what the cloud control
    3027              :         // plane's tenant_shards table should contain.
    3028            0 :         tracing::info!("Complete, returning {result:?}");
    3029              : 
    3030            0 :         Ok(result)
    3031            0 :     }
    3032              : 
    3033            0 :     pub(crate) async fn tenant_config_patch(
    3034            0 :         &self,
    3035            0 :         req: TenantConfigPatchRequest,
    3036            0 :     ) -> Result<(), ApiError> {
    3037            0 :         let _tenant_lock = trace_exclusive_lock(
    3038            0 :             &self.tenant_op_locks,
    3039            0 :             req.tenant_id,
    3040            0 :             TenantOperations::ConfigPatch,
    3041            0 :         )
    3042            0 :         .await;
    3043              : 
    3044            0 :         let tenant_id = req.tenant_id;
    3045            0 :         let patch = req.config;
    3046            0 : 
    3047            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3048              : 
    3049            0 :         let base = {
    3050            0 :             let locked = self.inner.read().unwrap();
    3051            0 :             let shards = locked
    3052            0 :                 .tenants
    3053            0 :                 .range(TenantShardId::tenant_range(req.tenant_id));
    3054            0 : 
    3055            0 :             let mut configs = shards.map(|(_sid, shard)| &shard.config).peekable();
    3056              : 
    3057            0 :             let first = match configs.peek() {
    3058            0 :                 Some(first) => (*first).clone(),
    3059              :                 None => {
    3060            0 :                     return Err(ApiError::NotFound(
    3061            0 :                         anyhow::anyhow!("Tenant {} not found", req.tenant_id).into(),
    3062            0 :                     ));
    3063              :                 }
    3064              :             };
    3065              : 
    3066            0 :             if !configs.all_equal() {
    3067            0 :                 tracing::error!("Tenant configs for {} are mismatched. ", req.tenant_id);
    3068              :                 // This can't happen because we atomically update the database records
    3069              :                 // of all shards to the new value in [`Self::set_tenant_config_and_reconcile`].
    3070            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3071            0 :                     "Tenant configs for {} are mismatched",
    3072            0 :                     req.tenant_id
    3073            0 :                 )));
    3074            0 :             }
    3075            0 : 
    3076            0 :             first
    3077              :         };
    3078              : 
    3079            0 :         let updated_config = base
    3080            0 :             .apply_patch(patch)
    3081            0 :             .map_err(|err| ApiError::BadRequest(anyhow::anyhow!(err)))?;
    3082            0 :         self.set_tenant_config_and_reconcile(tenant_id, updated_config)
    3083            0 :             .await
    3084            0 :     }
    3085              : 
    3086            0 :     pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
    3087              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3088            0 :         let _tenant_lock = trace_exclusive_lock(
    3089            0 :             &self.tenant_op_locks,
    3090            0 :             req.tenant_id,
    3091            0 :             TenantOperations::ConfigSet,
    3092            0 :         )
    3093            0 :         .await;
    3094              : 
    3095            0 :         self.maybe_load_tenant(req.tenant_id, &_tenant_lock).await?;
    3096              : 
    3097            0 :         self.set_tenant_config_and_reconcile(req.tenant_id, req.config)
    3098            0 :             .await
    3099            0 :     }
    3100              : 
    3101            0 :     async fn set_tenant_config_and_reconcile(
    3102            0 :         &self,
    3103            0 :         tenant_id: TenantId,
    3104            0 :         config: TenantConfig,
    3105            0 :     ) -> Result<(), ApiError> {
    3106            0 :         self.persistence
    3107            0 :             .update_tenant_shard(
    3108            0 :                 TenantFilter::Tenant(tenant_id),
    3109            0 :                 None,
    3110            0 :                 Some(config.clone()),
    3111            0 :                 None,
    3112            0 :                 None,
    3113            0 :             )
    3114            0 :             .await?;
    3115              : 
    3116            0 :         let waiters = {
    3117            0 :             let mut waiters = Vec::new();
    3118            0 :             let mut locked = self.inner.write().unwrap();
    3119            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    3120            0 :             for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3121            0 :                 shard.config = config.clone();
    3122            0 :                 if let Some(waiter) =
    3123            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3124            0 :                 {
    3125            0 :                     waiters.push(waiter);
    3126            0 :                 }
    3127              :             }
    3128            0 :             waiters
    3129              :         };
    3130              : 
    3131            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3132              :             // Treat this as success because we have stored the configuration.  If e.g.
    3133              :             // a node was unavailable at this time, it should not stop us accepting a
    3134              :             // configuration change.
    3135            0 :             tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
    3136            0 :         }
    3137              : 
    3138            0 :         Ok(())
    3139            0 :     }
    3140              : 
    3141            0 :     pub(crate) fn tenant_config_get(
    3142            0 :         &self,
    3143            0 :         tenant_id: TenantId,
    3144            0 :     ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
    3145            0 :         let config = {
    3146            0 :             let locked = self.inner.read().unwrap();
    3147            0 : 
    3148            0 :             match locked
    3149            0 :                 .tenants
    3150            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3151            0 :                 .next()
    3152              :             {
    3153            0 :                 Some((_tenant_shard_id, shard)) => shard.config.clone(),
    3154              :                 None => {
    3155            0 :                     return Err(ApiError::NotFound(
    3156            0 :                         anyhow::anyhow!("Tenant not found").into(),
    3157            0 :                     ));
    3158              :                 }
    3159              :             }
    3160              :         };
    3161              : 
    3162              :         // Unlike the pageserver, we do not have a set of global defaults: the config is
    3163              :         // entirely per-tenant.  Therefore the distinction between `tenant_specific_overrides`
    3164              :         // and `effective_config` in the response is meaningless, but we retain that syntax
    3165              :         // in order to remain compatible with the pageserver API.
    3166              : 
    3167            0 :         let response = HashMap::from([
    3168              :             (
    3169              :                 "tenant_specific_overrides",
    3170            0 :                 serde_json::to_value(&config)
    3171            0 :                     .context("serializing tenant specific overrides")
    3172            0 :                     .map_err(ApiError::InternalServerError)?,
    3173              :             ),
    3174              :             (
    3175            0 :                 "effective_config",
    3176            0 :                 serde_json::to_value(&config)
    3177            0 :                     .context("serializing effective config")
    3178            0 :                     .map_err(ApiError::InternalServerError)?,
    3179              :             ),
    3180              :         ]);
    3181              : 
    3182            0 :         Ok(response)
    3183            0 :     }
    3184              : 
    3185            0 :     pub(crate) async fn tenant_time_travel_remote_storage(
    3186            0 :         &self,
    3187            0 :         time_travel_req: &TenantTimeTravelRequest,
    3188            0 :         tenant_id: TenantId,
    3189            0 :         timestamp: Cow<'_, str>,
    3190            0 :         done_if_after: Cow<'_, str>,
    3191            0 :     ) -> Result<(), ApiError> {
    3192            0 :         let _tenant_lock = trace_exclusive_lock(
    3193            0 :             &self.tenant_op_locks,
    3194            0 :             tenant_id,
    3195            0 :             TenantOperations::TimeTravelRemoteStorage,
    3196            0 :         )
    3197            0 :         .await;
    3198              : 
    3199            0 :         let node = {
    3200            0 :             let mut locked = self.inner.write().unwrap();
    3201              :             // Just a sanity check to prevent misuse: the API expects that the tenant is fully
    3202              :             // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
    3203              :             // but only at the start of the process, so it's really just to prevent operator
    3204              :             // mistakes.
    3205            0 :             for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
    3206            0 :                 if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
    3207              :                 {
    3208            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3209            0 :                         "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
    3210            0 :                     )));
    3211            0 :                 }
    3212            0 :                 let maybe_attached = shard
    3213            0 :                     .observed
    3214            0 :                     .locations
    3215            0 :                     .iter()
    3216            0 :                     .filter_map(|(node_id, observed_location)| {
    3217            0 :                         observed_location
    3218            0 :                             .conf
    3219            0 :                             .as_ref()
    3220            0 :                             .map(|loc| (node_id, observed_location, loc.mode))
    3221            0 :                     })
    3222            0 :                     .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
    3223            0 :                 if let Some((node_id, _observed_location, mode)) = maybe_attached {
    3224            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3225            0 :                         "We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}"
    3226            0 :                     )));
    3227            0 :                 }
    3228              :             }
    3229            0 :             let scheduler = &mut locked.scheduler;
    3230              :             // Right now we only perform the operation on a single node without parallelization
    3231              :             // TODO fan out the operation to multiple nodes for better performance
    3232            0 :             let node_id = scheduler.any_available_node()?;
    3233            0 :             let node = locked
    3234            0 :                 .nodes
    3235            0 :                 .get(&node_id)
    3236            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3237            0 :             node.clone()
    3238            0 :         };
    3239            0 : 
    3240            0 :         // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
    3241            0 :         let mut counts = time_travel_req
    3242            0 :             .shard_counts
    3243            0 :             .iter()
    3244            0 :             .copied()
    3245            0 :             .collect::<HashSet<_>>()
    3246            0 :             .into_iter()
    3247            0 :             .collect::<Vec<_>>();
    3248            0 :         counts.sort_unstable();
    3249              : 
    3250            0 :         for count in counts {
    3251            0 :             let shard_ids = (0..count.count())
    3252            0 :                 .map(|i| TenantShardId {
    3253            0 :                     tenant_id,
    3254            0 :                     shard_number: ShardNumber(i),
    3255            0 :                     shard_count: count,
    3256            0 :                 })
    3257            0 :                 .collect::<Vec<_>>();
    3258            0 :             for tenant_shard_id in shard_ids {
    3259            0 :                 let client = PageserverClient::new(
    3260            0 :                     node.get_id(),
    3261            0 :                     self.http_client.clone(),
    3262            0 :                     node.base_url(),
    3263            0 :                     self.config.pageserver_jwt_token.as_deref(),
    3264            0 :                 );
    3265            0 : 
    3266            0 :                 tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
    3267              : 
    3268            0 :                 client
    3269            0 :                     .tenant_time_travel_remote_storage(
    3270            0 :                         tenant_shard_id,
    3271            0 :                         &timestamp,
    3272            0 :                         &done_if_after,
    3273            0 :                     )
    3274            0 :                     .await
    3275            0 :                     .map_err(|e| {
    3276            0 :                         ApiError::InternalServerError(anyhow::anyhow!(
    3277            0 :                             "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
    3278            0 :                             node
    3279            0 :                         ))
    3280            0 :                     })?;
    3281              :             }
    3282              :         }
    3283            0 :         Ok(())
    3284            0 :     }
    3285              : 
    3286            0 :     pub(crate) async fn tenant_secondary_download(
    3287            0 :         &self,
    3288            0 :         tenant_id: TenantId,
    3289            0 :         wait: Option<Duration>,
    3290            0 :     ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
    3291            0 :         let _tenant_lock = trace_shared_lock(
    3292            0 :             &self.tenant_op_locks,
    3293            0 :             tenant_id,
    3294            0 :             TenantOperations::SecondaryDownload,
    3295            0 :         )
    3296            0 :         .await;
    3297              : 
    3298              :         // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
    3299            0 :         let targets = {
    3300            0 :             let locked = self.inner.read().unwrap();
    3301            0 :             let mut targets = Vec::new();
    3302              : 
    3303            0 :             for (tenant_shard_id, shard) in
    3304            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    3305              :             {
    3306            0 :                 for node_id in shard.intent.get_secondary() {
    3307            0 :                     let node = locked
    3308            0 :                         .nodes
    3309            0 :                         .get(node_id)
    3310            0 :                         .expect("Pageservers may not be deleted while referenced");
    3311            0 : 
    3312            0 :                     targets.push((*tenant_shard_id, node.clone()));
    3313            0 :                 }
    3314              :             }
    3315            0 :             targets
    3316            0 :         };
    3317            0 : 
    3318            0 :         // Issue concurrent requests to all shards' locations
    3319            0 :         let mut futs = FuturesUnordered::new();
    3320            0 :         for (tenant_shard_id, node) in targets {
    3321            0 :             let client = PageserverClient::new(
    3322            0 :                 node.get_id(),
    3323            0 :                 self.http_client.clone(),
    3324            0 :                 node.base_url(),
    3325            0 :                 self.config.pageserver_jwt_token.as_deref(),
    3326            0 :             );
    3327            0 :             futs.push(async move {
    3328            0 :                 let result = client
    3329            0 :                     .tenant_secondary_download(tenant_shard_id, wait)
    3330            0 :                     .await;
    3331            0 :                 (result, node, tenant_shard_id)
    3332            0 :             })
    3333              :         }
    3334              : 
    3335              :         // Handle any errors returned by pageservers.  This includes cases like this request racing with
    3336              :         // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
    3337              :         // well as more general cases like 503s, 500s, or timeouts.
    3338            0 :         let mut aggregate_progress = SecondaryProgress::default();
    3339            0 :         let mut aggregate_status: Option<StatusCode> = None;
    3340            0 :         let mut error: Option<mgmt_api::Error> = None;
    3341            0 :         while let Some((result, node, tenant_shard_id)) = futs.next().await {
    3342            0 :             match result {
    3343            0 :                 Err(e) => {
    3344            0 :                     // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
    3345            0 :                     // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
    3346            0 :                     // than they had hoped for.
    3347            0 :                     tracing::warn!("Secondary download error from pageserver {node}: {e}",);
    3348            0 :                     error = Some(e)
    3349              :                 }
    3350            0 :                 Ok((status_code, progress)) => {
    3351            0 :                     tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
    3352            0 :                     aggregate_progress.layers_downloaded += progress.layers_downloaded;
    3353            0 :                     aggregate_progress.layers_total += progress.layers_total;
    3354            0 :                     aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
    3355            0 :                     aggregate_progress.bytes_total += progress.bytes_total;
    3356            0 :                     aggregate_progress.heatmap_mtime =
    3357            0 :                         std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
    3358            0 :                     aggregate_status = match aggregate_status {
    3359            0 :                         None => Some(status_code),
    3360            0 :                         Some(StatusCode::OK) => Some(status_code),
    3361            0 :                         Some(cur) => {
    3362            0 :                             // Other status codes (e.g. 202) -- do not overwrite.
    3363            0 :                             Some(cur)
    3364              :                         }
    3365              :                     };
    3366              :                 }
    3367              :             }
    3368              :         }
    3369              : 
    3370              :         // If any of the shards return 202, indicate our result as 202.
    3371            0 :         match aggregate_status {
    3372              :             None => {
    3373            0 :                 match error {
    3374            0 :                     Some(e) => {
    3375            0 :                         // No successes, and an error: surface it
    3376            0 :                         Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
    3377              :                     }
    3378              :                     None => {
    3379              :                         // No shards found
    3380            0 :                         Err(ApiError::NotFound(
    3381            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    3382            0 :                         ))
    3383              :                     }
    3384              :                 }
    3385              :             }
    3386            0 :             Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
    3387              :         }
    3388            0 :     }
    3389              : 
    3390            0 :     pub(crate) async fn tenant_delete(
    3391            0 :         self: &Arc<Self>,
    3392            0 :         tenant_id: TenantId,
    3393            0 :     ) -> Result<StatusCode, ApiError> {
    3394            0 :         let _tenant_lock =
    3395            0 :             trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
    3396              : 
    3397            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3398              : 
    3399              :         // Detach all shards. This also deletes local pageserver shard data.
    3400            0 :         let (detach_waiters, node) = {
    3401            0 :             let mut detach_waiters = Vec::new();
    3402            0 :             let mut locked = self.inner.write().unwrap();
    3403            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    3404            0 :             for (_, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3405              :                 // Update the tenant's intent to remove all attachments
    3406            0 :                 shard.policy = PlacementPolicy::Detached;
    3407            0 :                 shard
    3408            0 :                     .schedule(scheduler, &mut ScheduleContext::default())
    3409            0 :                     .expect("De-scheduling is infallible");
    3410            0 :                 debug_assert!(shard.intent.get_attached().is_none());
    3411            0 :                 debug_assert!(shard.intent.get_secondary().is_empty());
    3412              : 
    3413            0 :                 if let Some(waiter) =
    3414            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3415            0 :                 {
    3416            0 :                     detach_waiters.push(waiter);
    3417            0 :                 }
    3418              :             }
    3419              : 
    3420              :             // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant
    3421              :             // was attached, just has to be able to see the S3 content)
    3422            0 :             let node_id = scheduler.any_available_node()?;
    3423            0 :             let node = nodes
    3424            0 :                 .get(&node_id)
    3425            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3426            0 :             (detach_waiters, node.clone())
    3427            0 :         };
    3428            0 : 
    3429            0 :         // This reconcile wait can fail in a few ways:
    3430            0 :         //  A there is a very long queue for the reconciler semaphore
    3431            0 :         //  B some pageserver is failing to handle a detach promptly
    3432            0 :         //  C some pageserver goes offline right at the moment we send it a request.
    3433            0 :         //
    3434            0 :         // A and C are transient: the semaphore will eventually become available, and once a node is marked offline
    3435            0 :         // the next attempt to reconcile will silently skip detaches for an offline node and succeed.  If B happens,
    3436            0 :         // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while
    3437            0 :         // deleting the underlying data).
    3438            0 :         self.await_waiters(detach_waiters, RECONCILE_TIMEOUT)
    3439            0 :             .await?;
    3440              : 
    3441              :         // Delete the entire tenant (all shards) from remote storage via a random pageserver.
    3442              :         // Passing an unsharded tenant ID will cause the pageserver to remove all remote paths with
    3443              :         // the tenant ID prefix, including all shards (even possibly stale ones).
    3444            0 :         match node
    3445            0 :             .with_client_retries(
    3446            0 :                 |client| async move {
    3447            0 :                     client
    3448            0 :                         .tenant_delete(TenantShardId::unsharded(tenant_id))
    3449            0 :                         .await
    3450            0 :                 },
    3451            0 :                 &self.http_client,
    3452            0 :                 &self.config.pageserver_jwt_token,
    3453            0 :                 1,
    3454            0 :                 3,
    3455            0 :                 RECONCILE_TIMEOUT,
    3456            0 :                 &self.cancel,
    3457            0 :             )
    3458            0 :             .await
    3459            0 :             .unwrap_or(Err(mgmt_api::Error::Cancelled))
    3460              :         {
    3461            0 :             Ok(_) => {}
    3462              :             Err(mgmt_api::Error::Cancelled) => {
    3463            0 :                 return Err(ApiError::ShuttingDown);
    3464              :             }
    3465            0 :             Err(e) => {
    3466            0 :                 // This is unexpected: remote deletion should be infallible, unless the object store
    3467            0 :                 // at large is unavailable.
    3468            0 :                 tracing::error!("Error deleting via node {node}: {e}");
    3469            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    3470              :             }
    3471              :         }
    3472              : 
    3473              :         // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
    3474              :         // our in-memory state and database state.
    3475              : 
    3476              :         // Ordering: we delete persistent state first: if we then
    3477              :         // crash, we will drop the in-memory state.
    3478              : 
    3479              :         // Drop persistent state.
    3480            0 :         self.persistence.delete_tenant(tenant_id).await?;
    3481              : 
    3482              :         // Drop in-memory state
    3483              :         {
    3484            0 :             let mut locked = self.inner.write().unwrap();
    3485            0 :             let (_nodes, tenants, scheduler) = locked.parts_mut();
    3486              : 
    3487              :             // Dereference Scheduler from shards before dropping them
    3488            0 :             for (_tenant_shard_id, shard) in
    3489            0 :                 tenants.range_mut(TenantShardId::tenant_range(tenant_id))
    3490            0 :             {
    3491            0 :                 shard.intent.clear(scheduler);
    3492            0 :             }
    3493              : 
    3494            0 :             tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
    3495            0 :             tracing::info!(
    3496            0 :                 "Deleted tenant {tenant_id}, now have {} tenants",
    3497            0 :                 locked.tenants.len()
    3498              :             );
    3499              :         };
    3500              : 
    3501              :         // Delete the tenant from safekeepers (if needed)
    3502            0 :         self.tenant_delete_safekeepers(tenant_id)
    3503            0 :             .instrument(tracing::info_span!("tenant_delete_safekeepers", %tenant_id))
    3504            0 :             .await?;
    3505              : 
    3506              :         // Success is represented as 404, to imitate the existing pageserver deletion API
    3507            0 :         Ok(StatusCode::NOT_FOUND)
    3508            0 :     }
    3509              : 
    3510              :     /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
    3511              :     /// for a tenant.  The TenantConfig is passed through to pageservers, whereas this function modifies
    3512              :     /// the tenant's policies (configuration) within the storage controller
    3513            0 :     pub(crate) async fn tenant_update_policy(
    3514            0 :         &self,
    3515            0 :         tenant_id: TenantId,
    3516            0 :         req: TenantPolicyRequest,
    3517            0 :     ) -> Result<(), ApiError> {
    3518              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3519            0 :         let _tenant_lock = trace_exclusive_lock(
    3520            0 :             &self.tenant_op_locks,
    3521            0 :             tenant_id,
    3522            0 :             TenantOperations::UpdatePolicy,
    3523            0 :         )
    3524            0 :         .await;
    3525              : 
    3526            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3527              : 
    3528            0 :         failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
    3529              : 
    3530              :         let TenantPolicyRequest {
    3531            0 :             placement,
    3532            0 :             mut scheduling,
    3533            0 :         } = req;
    3534              : 
    3535            0 :         if let Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) = placement {
    3536              :             // When someone configures a tenant to detach, we force the scheduling policy to enable
    3537              :             // this to take effect.
    3538            0 :             if scheduling.is_none() {
    3539            0 :                 scheduling = Some(ShardSchedulingPolicy::Active);
    3540            0 :             }
    3541            0 :         }
    3542              : 
    3543            0 :         self.persistence
    3544            0 :             .update_tenant_shard(
    3545            0 :                 TenantFilter::Tenant(tenant_id),
    3546            0 :                 placement.clone(),
    3547            0 :                 None,
    3548            0 :                 None,
    3549            0 :                 scheduling,
    3550            0 :             )
    3551            0 :             .await?;
    3552              : 
    3553            0 :         let mut schedule_context = ScheduleContext::default();
    3554            0 :         let mut locked = self.inner.write().unwrap();
    3555            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    3556            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3557            0 :             if let Some(placement) = &placement {
    3558            0 :                 shard.policy = placement.clone();
    3559            0 : 
    3560            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3561            0 :                                "Updated placement policy to {placement:?}");
    3562            0 :             }
    3563              : 
    3564            0 :             if let Some(scheduling) = &scheduling {
    3565            0 :                 shard.set_scheduling_policy(*scheduling);
    3566            0 : 
    3567            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3568            0 :                                "Updated scheduling policy to {scheduling:?}");
    3569            0 :             }
    3570              : 
    3571              :             // In case scheduling is being switched back on, try it now.
    3572            0 :             shard.schedule(scheduler, &mut schedule_context).ok();
    3573            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3574              :         }
    3575              : 
    3576            0 :         Ok(())
    3577            0 :     }
    3578              : 
    3579            0 :     pub(crate) async fn tenant_timeline_create_pageservers(
    3580            0 :         &self,
    3581            0 :         tenant_id: TenantId,
    3582            0 :         mut create_req: TimelineCreateRequest,
    3583            0 :     ) -> Result<TimelineInfo, ApiError> {
    3584            0 :         tracing::info!(
    3585            0 :             "Creating timeline {}/{}",
    3586              :             tenant_id,
    3587              :             create_req.new_timeline_id,
    3588              :         );
    3589              : 
    3590            0 :         self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    3591            0 :             if targets.0.is_empty() {
    3592            0 :                 return Err(ApiError::NotFound(
    3593            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3594            0 :                 ));
    3595            0 :             };
    3596            0 : 
    3597            0 :             let (shard_zero_tid, shard_zero_locations) =
    3598            0 :                 targets.0.pop_first().expect("Must have at least one shard");
    3599            0 :             assert!(shard_zero_tid.is_shard_zero());
    3600              : 
    3601            0 :             async fn create_one(
    3602            0 :                 tenant_shard_id: TenantShardId,
    3603            0 :                 locations: ShardMutationLocations,
    3604            0 :                 http_client: reqwest::Client,
    3605            0 :                 jwt: Option<String>,
    3606            0 :                 create_req: TimelineCreateRequest,
    3607            0 :             ) -> Result<TimelineInfo, ApiError> {
    3608            0 :                 let latest = locations.latest.node;
    3609            0 : 
    3610            0 :                 tracing::info!(
    3611            0 :                     "Creating timeline on shard {}/{}, attached to node {latest} in generation {:?}",
    3612              :                     tenant_shard_id,
    3613              :                     create_req.new_timeline_id,
    3614              :                     locations.latest.generation
    3615              :                 );
    3616              : 
    3617            0 :                 let client =
    3618            0 :                     PageserverClient::new(latest.get_id(), http_client.clone(), latest.base_url(), jwt.as_deref());
    3619              : 
    3620            0 :                 let timeline_info = client
    3621            0 :                     .timeline_create(tenant_shard_id, &create_req)
    3622            0 :                     .await
    3623            0 :                     .map_err(|e| passthrough_api_error(&latest, e))?;
    3624              : 
    3625              :                 // We propagate timeline creations to all attached locations such that a compute
    3626              :                 // for the new timeline is able to start regardless of the current state of the
    3627              :                 // tenant shard reconciliation.
    3628            0 :                 for location in locations.other {
    3629            0 :                     tracing::info!(
    3630            0 :                         "Creating timeline on shard {}/{}, stale attached to node {} in generation {:?}",
    3631              :                         tenant_shard_id,
    3632              :                         create_req.new_timeline_id,
    3633              :                         location.node,
    3634              :                         location.generation
    3635              :                     );
    3636              : 
    3637            0 :                     let client = PageserverClient::new(
    3638            0 :                         location.node.get_id(),
    3639            0 :                         http_client.clone(),
    3640            0 :                         location.node.base_url(),
    3641            0 :                         jwt.as_deref(),
    3642            0 :                     );
    3643              : 
    3644            0 :                     let res = client
    3645            0 :                         .timeline_create(tenant_shard_id, &create_req)
    3646            0 :                         .await;
    3647              : 
    3648            0 :                     if let Err(e) = res {
    3649            0 :                         match e {
    3650            0 :                             mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _) => {
    3651            0 :                                 // Tenant might have been detached from the stale location,
    3652            0 :                                 // so ignore 404s.
    3653            0 :                             },
    3654              :                             _ => {
    3655            0 :                                 return Err(passthrough_api_error(&location.node, e));
    3656              :                             }
    3657              :                         }
    3658            0 :                     }
    3659              :                 }
    3660              : 
    3661            0 :                 Ok(timeline_info)
    3662            0 :             }
    3663              : 
    3664              :             // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
    3665              :             // use whatever LSN that shard picked when creating on subsequent shards.  We arbitrarily use shard zero as the shard
    3666              :             // that will get the first creation request, and propagate the LSN to all the >0 shards.
    3667            0 :             let timeline_info = create_one(
    3668            0 :                 shard_zero_tid,
    3669            0 :                 shard_zero_locations,
    3670            0 :                 self.http_client.clone(),
    3671            0 :                 self.config.pageserver_jwt_token.clone(),
    3672            0 :                 create_req.clone(),
    3673            0 :             )
    3674            0 :             .await?;
    3675              : 
    3676              :             // Propagate the LSN that shard zero picked, if caller didn't provide one
    3677            0 :             match &mut create_req.mode {
    3678            0 :                 models::TimelineCreateRequestMode::Branch { ancestor_start_lsn, .. } if ancestor_start_lsn.is_none() => {
    3679            0 :                     *ancestor_start_lsn = timeline_info.ancestor_lsn;
    3680            0 :                 },
    3681            0 :                 _ => {}
    3682              :             }
    3683              : 
    3684              :             // Create timeline on remaining shards with number >0
    3685            0 :             if !targets.0.is_empty() {
    3686              :                 // If we had multiple shards, issue requests for the remainder now.
    3687            0 :                 let jwt = &self.config.pageserver_jwt_token;
    3688            0 :                 self.tenant_for_shards(
    3689            0 :                     targets
    3690            0 :                         .0
    3691            0 :                         .iter()
    3692            0 :                         .map(|t| (*t.0, t.1.latest.node.clone()))
    3693            0 :                         .collect(),
    3694            0 :                     |tenant_shard_id: TenantShardId, _node: Node| {
    3695            0 :                         let create_req = create_req.clone();
    3696            0 :                         let mutation_locations = targets.0.remove(&tenant_shard_id).unwrap();
    3697            0 :                         Box::pin(create_one(
    3698            0 :                             tenant_shard_id,
    3699            0 :                             mutation_locations,
    3700            0 :                             self.http_client.clone(),
    3701            0 :                             jwt.clone(),
    3702            0 :                             create_req,
    3703            0 :                         ))
    3704            0 :                     },
    3705            0 :                 )
    3706            0 :                 .await?;
    3707            0 :             }
    3708              : 
    3709            0 :             Ok(timeline_info)
    3710            0 :         })
    3711            0 :         .await?
    3712            0 :     }
    3713              : 
    3714            0 :     pub(crate) async fn tenant_timeline_create(
    3715            0 :         self: &Arc<Self>,
    3716            0 :         tenant_id: TenantId,
    3717            0 :         create_req: TimelineCreateRequest,
    3718            0 :     ) -> Result<TimelineCreateResponseStorcon, ApiError> {
    3719            0 :         let safekeepers = self.config.timelines_onto_safekeepers;
    3720            0 :         tracing::info!(
    3721              :             %safekeepers,
    3722            0 :             "Creating timeline {}/{}",
    3723              :             tenant_id,
    3724              :             create_req.new_timeline_id,
    3725              :         );
    3726              : 
    3727            0 :         let _tenant_lock = trace_shared_lock(
    3728            0 :             &self.tenant_op_locks,
    3729            0 :             tenant_id,
    3730            0 :             TenantOperations::TimelineCreate,
    3731            0 :         )
    3732            0 :         .await;
    3733            0 :         failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock");
    3734            0 :         let create_mode = create_req.mode.clone();
    3735              : 
    3736            0 :         let timeline_info = self
    3737            0 :             .tenant_timeline_create_pageservers(tenant_id, create_req)
    3738            0 :             .await?;
    3739              : 
    3740            0 :         let safekeepers = if safekeepers {
    3741            0 :             let res = self
    3742            0 :                 .tenant_timeline_create_safekeepers(tenant_id, &timeline_info, create_mode)
    3743            0 :                 .instrument(tracing::info_span!("timeline_create_safekeepers", %tenant_id, timeline_id=%timeline_info.timeline_id))
    3744            0 :                 .await?;
    3745            0 :             Some(res)
    3746              :         } else {
    3747            0 :             None
    3748              :         };
    3749              : 
    3750            0 :         Ok(TimelineCreateResponseStorcon {
    3751            0 :             timeline_info,
    3752            0 :             safekeepers,
    3753            0 :         })
    3754            0 :     }
    3755              : 
    3756            0 :     pub(crate) async fn tenant_timeline_archival_config(
    3757            0 :         &self,
    3758            0 :         tenant_id: TenantId,
    3759            0 :         timeline_id: TimelineId,
    3760            0 :         req: TimelineArchivalConfigRequest,
    3761            0 :     ) -> Result<(), ApiError> {
    3762            0 :         tracing::info!(
    3763            0 :             "Setting archival config of timeline {tenant_id}/{timeline_id} to '{:?}'",
    3764              :             req.state
    3765              :         );
    3766              : 
    3767            0 :         let _tenant_lock = trace_shared_lock(
    3768            0 :             &self.tenant_op_locks,
    3769            0 :             tenant_id,
    3770            0 :             TenantOperations::TimelineArchivalConfig,
    3771            0 :         )
    3772            0 :         .await;
    3773              : 
    3774            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    3775            0 :             if targets.0.is_empty() {
    3776            0 :                 return Err(ApiError::NotFound(
    3777            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3778            0 :                 ));
    3779            0 :             }
    3780            0 :             async fn config_one(
    3781            0 :                 tenant_shard_id: TenantShardId,
    3782            0 :                 timeline_id: TimelineId,
    3783            0 :                 node: Node,
    3784            0 :                 http_client: reqwest::Client,
    3785            0 :                 jwt: Option<String>,
    3786            0 :                 req: TimelineArchivalConfigRequest,
    3787            0 :             ) -> Result<(), ApiError> {
    3788            0 :                 tracing::info!(
    3789            0 :                     "Setting archival config of timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    3790              :                 );
    3791              : 
    3792            0 :                 let client = PageserverClient::new(node.get_id(),  http_client, node.base_url(), jwt.as_deref());
    3793            0 : 
    3794            0 :                 client
    3795            0 :                     .timeline_archival_config(tenant_shard_id, timeline_id, &req)
    3796            0 :                     .await
    3797            0 :                     .map_err(|e| match e {
    3798            0 :                         mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg) => {
    3799            0 :                             ApiError::PreconditionFailed(msg.into_boxed_str())
    3800              :                         }
    3801            0 :                         _ => passthrough_api_error(&node, e),
    3802            0 :                     })
    3803            0 :             }
    3804              : 
    3805              :             // no shard needs to go first/last; the operation should be idempotent
    3806              :             // TODO: it would be great to ensure that all shards return the same error
    3807            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    3808            0 :             let results = self
    3809            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    3810            0 :                     futures::FutureExt::boxed(config_one(
    3811            0 :                         tenant_shard_id,
    3812            0 :                         timeline_id,
    3813            0 :                         node,
    3814            0 :                         self.http_client.clone(),
    3815            0 :                         self.config.pageserver_jwt_token.clone(),
    3816            0 :                         req.clone(),
    3817            0 :                     ))
    3818            0 :                 })
    3819            0 :                 .await?;
    3820            0 :             assert!(!results.is_empty(), "must have at least one result");
    3821              : 
    3822            0 :             Ok(())
    3823            0 :         }).await?
    3824            0 :     }
    3825              : 
    3826            0 :     pub(crate) async fn tenant_timeline_detach_ancestor(
    3827            0 :         &self,
    3828            0 :         tenant_id: TenantId,
    3829            0 :         timeline_id: TimelineId,
    3830            0 :         behavior: Option<DetachBehavior>,
    3831            0 :     ) -> Result<models::detach_ancestor::AncestorDetached, ApiError> {
    3832            0 :         tracing::info!("Detaching timeline {tenant_id}/{timeline_id}",);
    3833              : 
    3834            0 :         let _tenant_lock = trace_shared_lock(
    3835            0 :             &self.tenant_op_locks,
    3836            0 :             tenant_id,
    3837            0 :             TenantOperations::TimelineDetachAncestor,
    3838            0 :         )
    3839            0 :         .await;
    3840              : 
    3841            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    3842            0 :             if targets.0.is_empty() {
    3843            0 :                 return Err(ApiError::NotFound(
    3844            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3845            0 :                 ));
    3846            0 :             }
    3847              : 
    3848            0 :             async fn detach_one(
    3849            0 :                 tenant_shard_id: TenantShardId,
    3850            0 :                 timeline_id: TimelineId,
    3851            0 :                 node: Node,
    3852            0 :                 http_client: reqwest::Client,
    3853            0 :                 jwt: Option<String>,
    3854            0 :                 behavior: Option<DetachBehavior>,
    3855            0 :             ) -> Result<(ShardNumber, models::detach_ancestor::AncestorDetached), ApiError> {
    3856            0 :                 tracing::info!(
    3857            0 :                     "Detaching timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    3858              :                 );
    3859              : 
    3860            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    3861            0 : 
    3862            0 :                 client
    3863            0 :                     .timeline_detach_ancestor(tenant_shard_id, timeline_id, behavior)
    3864            0 :                     .await
    3865            0 :                     .map_err(|e| {
    3866              :                         use mgmt_api::Error;
    3867              : 
    3868            0 :                         match e {
    3869              :                             // no ancestor (ever)
    3870            0 :                             Error::ApiError(StatusCode::CONFLICT, msg) => ApiError::Conflict(format!(
    3871            0 :                                 "{node}: {}",
    3872            0 :                                 msg.strip_prefix("Conflict: ").unwrap_or(&msg)
    3873            0 :                             )),
    3874              :                             // too many ancestors
    3875            0 :                             Error::ApiError(StatusCode::BAD_REQUEST, msg) => {
    3876            0 :                                 ApiError::BadRequest(anyhow::anyhow!("{node}: {msg}"))
    3877              :                             }
    3878            0 :                             Error::ApiError(StatusCode::INTERNAL_SERVER_ERROR, msg) => {
    3879            0 :                                 // avoid turning these into conflicts to remain compatible with
    3880            0 :                                 // pageservers, 500 errors are sadly retryable with timeline ancestor
    3881            0 :                                 // detach
    3882            0 :                                 ApiError::InternalServerError(anyhow::anyhow!("{node}: {msg}"))
    3883              :                             }
    3884              :                             // rest can be mapped as usual
    3885            0 :                             other => passthrough_api_error(&node, other),
    3886              :                         }
    3887            0 :                     })
    3888            0 :                     .map(|res| (tenant_shard_id.shard_number, res))
    3889            0 :             }
    3890              : 
    3891              :             // no shard needs to go first/last; the operation should be idempotent
    3892            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    3893            0 :             let mut results = self
    3894            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    3895            0 :                     futures::FutureExt::boxed(detach_one(
    3896            0 :                         tenant_shard_id,
    3897            0 :                         timeline_id,
    3898            0 :                         node,
    3899            0 :                         self.http_client.clone(),
    3900            0 :                         self.config.pageserver_jwt_token.clone(),
    3901            0 :                         behavior,
    3902            0 :                     ))
    3903            0 :                 })
    3904            0 :                 .await?;
    3905              : 
    3906            0 :             let any = results.pop().expect("we must have at least one response");
    3907            0 : 
    3908            0 :             let mismatching = results
    3909            0 :                 .iter()
    3910            0 :                 .filter(|(_, res)| res != &any.1)
    3911            0 :                 .collect::<Vec<_>>();
    3912            0 :             if !mismatching.is_empty() {
    3913              :                 // this can be hit by races which should not happen because operation lock on cplane
    3914            0 :                 let matching = results.len() - mismatching.len();
    3915            0 :                 tracing::error!(
    3916              :                     matching,
    3917              :                     compared_against=?any,
    3918              :                     ?mismatching,
    3919            0 :                     "shards returned different results"
    3920              :                 );
    3921              : 
    3922            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!("pageservers returned mixed results for ancestor detach; manual intervention is required.")));
    3923            0 :             }
    3924            0 : 
    3925            0 :             Ok(any.1)
    3926            0 :         }).await?
    3927            0 :     }
    3928              : 
    3929            0 :     pub(crate) async fn tenant_timeline_block_unblock_gc(
    3930            0 :         &self,
    3931            0 :         tenant_id: TenantId,
    3932            0 :         timeline_id: TimelineId,
    3933            0 :         dir: BlockUnblock,
    3934            0 :     ) -> Result<(), ApiError> {
    3935            0 :         let _tenant_lock = trace_shared_lock(
    3936            0 :             &self.tenant_op_locks,
    3937            0 :             tenant_id,
    3938            0 :             TenantOperations::TimelineGcBlockUnblock,
    3939            0 :         )
    3940            0 :         .await;
    3941              : 
    3942            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    3943            0 :             if targets.0.is_empty() {
    3944            0 :                 return Err(ApiError::NotFound(
    3945            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3946            0 :                 ));
    3947            0 :             }
    3948              : 
    3949            0 :             async fn do_one(
    3950            0 :                 tenant_shard_id: TenantShardId,
    3951            0 :                 timeline_id: TimelineId,
    3952            0 :                 node: Node,
    3953            0 :                 http_client: reqwest::Client,
    3954            0 :                 jwt: Option<String>,
    3955            0 :                 dir: BlockUnblock,
    3956            0 :             ) -> Result<(), ApiError> {
    3957            0 :                 let client = PageserverClient::new(
    3958            0 :                     node.get_id(),
    3959            0 :                     http_client,
    3960            0 :                     node.base_url(),
    3961            0 :                     jwt.as_deref(),
    3962            0 :                 );
    3963            0 : 
    3964            0 :                 client
    3965            0 :                     .timeline_block_unblock_gc(tenant_shard_id, timeline_id, dir)
    3966            0 :                     .await
    3967            0 :                     .map_err(|e| passthrough_api_error(&node, e))
    3968            0 :             }
    3969              : 
    3970              :             // no shard needs to go first/last; the operation should be idempotent
    3971            0 :             let locations = targets
    3972            0 :                 .0
    3973            0 :                 .iter()
    3974            0 :                 .map(|t| (*t.0, t.1.latest.node.clone()))
    3975            0 :                 .collect();
    3976            0 :             self.tenant_for_shards(locations, |tenant_shard_id, node| {
    3977            0 :                 futures::FutureExt::boxed(do_one(
    3978            0 :                     tenant_shard_id,
    3979            0 :                     timeline_id,
    3980            0 :                     node,
    3981            0 :                     self.http_client.clone(),
    3982            0 :                     self.config.pageserver_jwt_token.clone(),
    3983            0 :                     dir,
    3984            0 :                 ))
    3985            0 :             })
    3986            0 :             .await
    3987            0 :         })
    3988            0 :         .await??;
    3989            0 :         Ok(())
    3990            0 :     }
    3991              : 
    3992            0 :     pub(crate) async fn tenant_timeline_lsn_lease(
    3993            0 :         &self,
    3994            0 :         tenant_id: TenantId,
    3995            0 :         timeline_id: TimelineId,
    3996            0 :         lsn: Lsn,
    3997            0 :     ) -> Result<LsnLease, ApiError> {
    3998            0 :         let _tenant_lock = trace_shared_lock(
    3999            0 :             &self.tenant_op_locks,
    4000            0 :             tenant_id,
    4001            0 :             TenantOperations::TimelineLsnLease,
    4002            0 :         )
    4003            0 :         .await;
    4004              : 
    4005            0 :         let targets = {
    4006            0 :             let locked = self.inner.read().unwrap();
    4007            0 :             let mut targets = Vec::new();
    4008            0 : 
    4009            0 :             // If the request got an unsharded tenant id, then apply
    4010            0 :             // the operation to all shards. Otherwise, apply it to a specific shard.
    4011            0 :             let shards_range = TenantShardId::tenant_range(tenant_id);
    4012              : 
    4013            0 :             for (tenant_shard_id, shard) in locked.tenants.range(shards_range) {
    4014            0 :                 if let Some(node_id) = shard.intent.get_attached() {
    4015            0 :                     let node = locked
    4016            0 :                         .nodes
    4017            0 :                         .get(node_id)
    4018            0 :                         .expect("Pageservers may not be deleted while referenced");
    4019            0 : 
    4020            0 :                     targets.push((*tenant_shard_id, node.clone()));
    4021            0 :                 }
    4022              :             }
    4023            0 :             targets
    4024              :         };
    4025              : 
    4026            0 :         let res = self
    4027            0 :             .tenant_for_shards_api(
    4028            0 :                 targets,
    4029            0 :                 |tenant_shard_id, client| async move {
    4030            0 :                     client
    4031            0 :                         .timeline_lease_lsn(tenant_shard_id, timeline_id, lsn)
    4032            0 :                         .await
    4033            0 :                 },
    4034            0 :                 1,
    4035            0 :                 1,
    4036            0 :                 SHORT_RECONCILE_TIMEOUT,
    4037            0 :                 &self.cancel,
    4038            0 :             )
    4039            0 :             .await;
    4040              : 
    4041            0 :         let mut valid_until = None;
    4042            0 :         for r in res {
    4043            0 :             match r {
    4044            0 :                 Ok(lease) => {
    4045            0 :                     if let Some(ref mut valid_until) = valid_until {
    4046            0 :                         *valid_until = std::cmp::min(*valid_until, lease.valid_until);
    4047            0 :                     } else {
    4048            0 :                         valid_until = Some(lease.valid_until);
    4049            0 :                     }
    4050              :                 }
    4051            0 :                 Err(e) => {
    4052            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    4053              :                 }
    4054              :             }
    4055              :         }
    4056            0 :         Ok(LsnLease {
    4057            0 :             valid_until: valid_until.unwrap_or_else(SystemTime::now),
    4058            0 :         })
    4059            0 :     }
    4060              : 
    4061            0 :     pub(crate) async fn tenant_timeline_download_heatmap_layers(
    4062            0 :         &self,
    4063            0 :         tenant_shard_id: TenantShardId,
    4064            0 :         timeline_id: TimelineId,
    4065            0 :         concurrency: Option<usize>,
    4066            0 :         recurse: bool,
    4067            0 :     ) -> Result<(), ApiError> {
    4068            0 :         let _tenant_lock = trace_shared_lock(
    4069            0 :             &self.tenant_op_locks,
    4070            0 :             tenant_shard_id.tenant_id,
    4071            0 :             TenantOperations::DownloadHeatmapLayers,
    4072            0 :         )
    4073            0 :         .await;
    4074              : 
    4075            0 :         let targets = {
    4076            0 :             let locked = self.inner.read().unwrap();
    4077            0 :             let mut targets = Vec::new();
    4078              : 
    4079              :             // If the request got an unsharded tenant id, then apply
    4080              :             // the operation to all shards. Otherwise, apply it to a specific shard.
    4081            0 :             let shards_range = if tenant_shard_id.is_unsharded() {
    4082            0 :                 TenantShardId::tenant_range(tenant_shard_id.tenant_id)
    4083              :             } else {
    4084            0 :                 tenant_shard_id.range()
    4085              :             };
    4086              : 
    4087            0 :             for (tenant_shard_id, shard) in locked.tenants.range(shards_range) {
    4088            0 :                 if let Some(node_id) = shard.intent.get_attached() {
    4089            0 :                     let node = locked
    4090            0 :                         .nodes
    4091            0 :                         .get(node_id)
    4092            0 :                         .expect("Pageservers may not be deleted while referenced");
    4093            0 : 
    4094            0 :                     targets.push((*tenant_shard_id, node.clone()));
    4095            0 :                 }
    4096              :             }
    4097            0 :             targets
    4098            0 :         };
    4099            0 : 
    4100            0 :         self.tenant_for_shards_api(
    4101            0 :             targets,
    4102            0 :             |tenant_shard_id, client| async move {
    4103            0 :                 client
    4104            0 :                     .timeline_download_heatmap_layers(
    4105            0 :                         tenant_shard_id,
    4106            0 :                         timeline_id,
    4107            0 :                         concurrency,
    4108            0 :                         recurse,
    4109            0 :                     )
    4110            0 :                     .await
    4111            0 :             },
    4112            0 :             1,
    4113            0 :             1,
    4114            0 :             SHORT_RECONCILE_TIMEOUT,
    4115            0 :             &self.cancel,
    4116            0 :         )
    4117            0 :         .await;
    4118              : 
    4119            0 :         Ok(())
    4120            0 :     }
    4121              : 
    4122              :     /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
    4123              :     ///
    4124              :     /// On success, the returned vector contains exactly the same number of elements as the input `locations`
    4125              :     /// and returned element at index `i` is the result for `req_fn(op(locations[i])`.
    4126            0 :     async fn tenant_for_shards<F, R>(
    4127            0 :         &self,
    4128            0 :         locations: Vec<(TenantShardId, Node)>,
    4129            0 :         mut req_fn: F,
    4130            0 :     ) -> Result<Vec<R>, ApiError>
    4131            0 :     where
    4132            0 :         F: FnMut(
    4133            0 :             TenantShardId,
    4134            0 :             Node,
    4135            0 :         )
    4136            0 :             -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
    4137            0 :     {
    4138            0 :         let mut futs = FuturesUnordered::new();
    4139            0 :         let mut results = Vec::with_capacity(locations.len());
    4140              : 
    4141            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    4142            0 :             let fut = req_fn(tenant_shard_id, node);
    4143            0 :             futs.push(async move { (idx, fut.await) });
    4144            0 :         }
    4145              : 
    4146            0 :         while let Some((idx, r)) = futs.next().await {
    4147            0 :             results.push((idx, r?));
    4148              :         }
    4149              : 
    4150            0 :         results.sort_by_key(|(idx, _)| *idx);
    4151            0 :         Ok(results.into_iter().map(|(_, r)| r).collect())
    4152            0 :     }
    4153              : 
    4154              :     /// Concurrently invoke a pageserver API call on many shards at once.
    4155              :     ///
    4156              :     /// The returned Vec has the same length as the `locations` Vec,
    4157              :     /// and returned element at index `i` is the result for `op(locations[i])`.
    4158            0 :     pub(crate) async fn tenant_for_shards_api<T, O, F>(
    4159            0 :         &self,
    4160            0 :         locations: Vec<(TenantShardId, Node)>,
    4161            0 :         op: O,
    4162            0 :         warn_threshold: u32,
    4163            0 :         max_retries: u32,
    4164            0 :         timeout: Duration,
    4165            0 :         cancel: &CancellationToken,
    4166            0 :     ) -> Vec<mgmt_api::Result<T>>
    4167            0 :     where
    4168            0 :         O: Fn(TenantShardId, PageserverClient) -> F + Copy,
    4169            0 :         F: std::future::Future<Output = mgmt_api::Result<T>>,
    4170            0 :     {
    4171            0 :         let mut futs = FuturesUnordered::new();
    4172            0 :         let mut results = Vec::with_capacity(locations.len());
    4173              : 
    4174            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    4175            0 :             futs.push(async move {
    4176            0 :                 let r = node
    4177            0 :                     .with_client_retries(
    4178            0 :                         |client| op(tenant_shard_id, client),
    4179            0 :                         &self.http_client,
    4180            0 :                         &self.config.pageserver_jwt_token,
    4181            0 :                         warn_threshold,
    4182            0 :                         max_retries,
    4183            0 :                         timeout,
    4184            0 :                         cancel,
    4185            0 :                     )
    4186            0 :                     .await;
    4187            0 :                 (idx, r)
    4188            0 :             });
    4189            0 :         }
    4190              : 
    4191            0 :         while let Some((idx, r)) = futs.next().await {
    4192            0 :             results.push((idx, r.unwrap_or(Err(mgmt_api::Error::Cancelled))));
    4193            0 :         }
    4194              : 
    4195            0 :         results.sort_by_key(|(idx, _)| *idx);
    4196            0 :         results.into_iter().map(|(_, r)| r).collect()
    4197            0 :     }
    4198              : 
    4199              :     /// Helper for safely working with the shards in a tenant remotely on pageservers, for example
    4200              :     /// when creating and deleting timelines:
    4201              :     /// - Makes sure shards are attached somewhere if they weren't already
    4202              :     /// - Looks up the shards and the nodes where they were most recently attached
    4203              :     /// - Guarantees that after the inner function returns, the shards' generations haven't moved on: this
    4204              :     ///   ensures that the remote operation acted on the most recent generation, and is therefore durable.
    4205            0 :     async fn tenant_remote_mutation<R, O, F>(
    4206            0 :         &self,
    4207            0 :         tenant_id: TenantId,
    4208            0 :         op: O,
    4209            0 :     ) -> Result<R, ApiError>
    4210            0 :     where
    4211            0 :         O: FnOnce(TenantMutationLocations) -> F,
    4212            0 :         F: std::future::Future<Output = R>,
    4213            0 :     {
    4214            0 :         let mutation_locations = {
    4215            0 :             let mut locations = TenantMutationLocations::default();
    4216              : 
    4217              :             // Load the currently attached pageservers for the latest generation of each shard.  This can
    4218              :             // run concurrently with reconciliations, and it is not guaranteed that the node we find here
    4219              :             // will still be the latest when we're done: we will check generations again at the end of
    4220              :             // this function to handle that.
    4221            0 :             let generations = self.persistence.tenant_generations(tenant_id).await?;
    4222              : 
    4223            0 :             if generations
    4224            0 :                 .iter()
    4225            0 :                 .any(|i| i.generation.is_none() || i.generation_pageserver.is_none())
    4226              :             {
    4227            0 :                 let shard_generations = generations
    4228            0 :                     .into_iter()
    4229            0 :                     .map(|i| (i.tenant_shard_id, (i.generation, i.generation_pageserver)))
    4230            0 :                     .collect::<HashMap<_, _>>();
    4231            0 : 
    4232            0 :                 // One or more shards has not been attached to a pageserver.  Check if this is because it's configured
    4233            0 :                 // to be detached (409: caller should give up), or because it's meant to be attached but isn't yet (503: caller should retry)
    4234            0 :                 let locked = self.inner.read().unwrap();
    4235            0 :                 for (shard_id, shard) in
    4236            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    4237              :                 {
    4238            0 :                     match shard.policy {
    4239              :                         PlacementPolicy::Attached(_) => {
    4240              :                             // This shard is meant to be attached: the caller is not wrong to try and
    4241              :                             // use this function, but we can't service the request right now.
    4242            0 :                             let Some(generation) = shard_generations.get(shard_id) else {
    4243              :                                 // This can only happen if there is a split brain controller modifying the database.  This should
    4244              :                                 // never happen when testing, and if it happens in production we can only log the issue.
    4245            0 :                                 debug_assert!(false);
    4246            0 :                                 tracing::error!(
    4247            0 :                                     "Shard {shard_id} not found in generation state!  Is another rogue controller running?"
    4248              :                                 );
    4249            0 :                                 continue;
    4250              :                             };
    4251            0 :                             let (generation, generation_pageserver) = generation;
    4252            0 :                             if let Some(generation) = generation {
    4253            0 :                                 if generation_pageserver.is_none() {
    4254              :                                     // This is legitimate only in a very narrow window where the shard was only just configured into
    4255              :                                     // Attached mode after being created in Secondary or Detached mode, and it has had its generation
    4256              :                                     // set but not yet had a Reconciler run (reconciler is the only thing that sets generation_pageserver).
    4257            0 :                                     tracing::warn!(
    4258            0 :                                         "Shard {shard_id} generation is set ({generation:?}) but generation_pageserver is None, reconciler not run yet?"
    4259              :                                     );
    4260            0 :                                 }
    4261              :                             } else {
    4262              :                                 // This should never happen: a shard with no generation is only permitted when it was created in some state
    4263              :                                 // other than PlacementPolicy::Attached (and generation is always written to DB before setting Attached in memory)
    4264            0 :                                 debug_assert!(false);
    4265            0 :                                 tracing::error!(
    4266            0 :                                     "Shard {shard_id} generation is None, but it is in PlacementPolicy::Attached mode!"
    4267              :                                 );
    4268            0 :                                 continue;
    4269              :                             }
    4270              :                         }
    4271              :                         PlacementPolicy::Secondary | PlacementPolicy::Detached => {
    4272            0 :                             return Err(ApiError::Conflict(format!(
    4273            0 :                                 "Shard {shard_id} tenant has policy {:?}",
    4274            0 :                                 shard.policy
    4275            0 :                             )));
    4276              :                         }
    4277              :                     }
    4278              :                 }
    4279              : 
    4280            0 :                 return Err(ApiError::ResourceUnavailable(
    4281            0 :                     "One or more shards in tenant is not yet attached".into(),
    4282            0 :                 ));
    4283            0 :             }
    4284            0 : 
    4285            0 :             let locked = self.inner.read().unwrap();
    4286              :             for ShardGenerationState {
    4287            0 :                 tenant_shard_id,
    4288            0 :                 generation,
    4289            0 :                 generation_pageserver,
    4290            0 :             } in generations
    4291              :             {
    4292            0 :                 let node_id = generation_pageserver.expect("We checked for None above");
    4293            0 :                 let node = locked
    4294            0 :                     .nodes
    4295            0 :                     .get(&node_id)
    4296            0 :                     .ok_or(ApiError::Conflict(format!(
    4297            0 :                         "Raced with removal of node {node_id}"
    4298            0 :                     )))?;
    4299            0 :                 let generation = generation.expect("Checked above");
    4300            0 : 
    4301            0 :                 let tenant = locked.tenants.get(&tenant_shard_id);
    4302              : 
    4303              :                 // TODO(vlad): Abstract the logic that finds stale attached locations
    4304              :                 // from observed state into a [`Service`] method.
    4305            0 :                 let other_locations = match tenant {
    4306            0 :                     Some(tenant) => {
    4307            0 :                         let mut other = tenant.attached_locations();
    4308            0 :                         let latest_location_index =
    4309            0 :                             other.iter().position(|&l| l == (node.get_id(), generation));
    4310            0 :                         if let Some(idx) = latest_location_index {
    4311            0 :                             other.remove(idx);
    4312            0 :                         }
    4313              : 
    4314            0 :                         other
    4315              :                     }
    4316            0 :                     None => Vec::default(),
    4317              :                 };
    4318              : 
    4319            0 :                 let location = ShardMutationLocations {
    4320            0 :                     latest: MutationLocation {
    4321            0 :                         node: node.clone(),
    4322            0 :                         generation,
    4323            0 :                     },
    4324            0 :                     other: other_locations
    4325            0 :                         .into_iter()
    4326            0 :                         .filter_map(|(node_id, generation)| {
    4327            0 :                             let node = locked.nodes.get(&node_id)?;
    4328              : 
    4329            0 :                             Some(MutationLocation {
    4330            0 :                                 node: node.clone(),
    4331            0 :                                 generation,
    4332            0 :                             })
    4333            0 :                         })
    4334            0 :                         .collect(),
    4335            0 :                 };
    4336            0 :                 locations.0.insert(tenant_shard_id, location);
    4337            0 :             }
    4338              : 
    4339            0 :             locations
    4340              :         };
    4341              : 
    4342            0 :         let result = op(mutation_locations.clone()).await;
    4343              : 
    4344              :         // Post-check: are all the generations of all the shards the same as they were initially?  This proves that
    4345              :         // our remote operation executed on the latest generation and is therefore persistent.
    4346              :         {
    4347            0 :             let latest_generations = self.persistence.tenant_generations(tenant_id).await?;
    4348            0 :             if latest_generations
    4349            0 :                 .into_iter()
    4350            0 :                 .map(
    4351            0 :                     |ShardGenerationState {
    4352              :                          tenant_shard_id,
    4353              :                          generation,
    4354              :                          generation_pageserver: _,
    4355            0 :                      }| (tenant_shard_id, generation),
    4356            0 :                 )
    4357            0 :                 .collect::<Vec<_>>()
    4358            0 :                 != mutation_locations
    4359            0 :                     .0
    4360            0 :                     .into_iter()
    4361            0 :                     .map(|i| (i.0, Some(i.1.latest.generation)))
    4362            0 :                     .collect::<Vec<_>>()
    4363              :             {
    4364              :                 // We raced with something that incremented the generation, and therefore cannot be
    4365              :                 // confident that our actions are persistent (they might have hit an old generation).
    4366              :                 //
    4367              :                 // This is safe but requires a retry: ask the client to do that by giving them a 503 response.
    4368            0 :                 return Err(ApiError::ResourceUnavailable(
    4369            0 :                     "Tenant attachment changed, please retry".into(),
    4370            0 :                 ));
    4371            0 :             }
    4372            0 :         }
    4373            0 : 
    4374            0 :         Ok(result)
    4375            0 :     }
    4376              : 
    4377            0 :     pub(crate) async fn tenant_timeline_delete(
    4378            0 :         self: &Arc<Self>,
    4379            0 :         tenant_id: TenantId,
    4380            0 :         timeline_id: TimelineId,
    4381            0 :     ) -> Result<StatusCode, ApiError> {
    4382            0 :         tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
    4383            0 :         let _tenant_lock = trace_shared_lock(
    4384            0 :             &self.tenant_op_locks,
    4385            0 :             tenant_id,
    4386            0 :             TenantOperations::TimelineDelete,
    4387            0 :         )
    4388            0 :         .await;
    4389              : 
    4390            0 :         let status_code = self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    4391            0 :             if targets.0.is_empty() {
    4392            0 :                 return Err(ApiError::NotFound(
    4393            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4394            0 :                 ));
    4395            0 :             }
    4396            0 : 
    4397            0 :             let (shard_zero_tid, shard_zero_locations) = targets.0.pop_first().expect("Must have at least one shard");
    4398            0 :             assert!(shard_zero_tid.is_shard_zero());
    4399              : 
    4400            0 :             async fn delete_one(
    4401            0 :                 tenant_shard_id: TenantShardId,
    4402            0 :                 timeline_id: TimelineId,
    4403            0 :                 node: Node,
    4404            0 :                 http_client: reqwest::Client,
    4405            0 :                 jwt: Option<String>,
    4406            0 :             ) -> Result<StatusCode, ApiError> {
    4407            0 :                 tracing::info!(
    4408            0 :                     "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    4409              :                 );
    4410              : 
    4411            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    4412            0 :                 let res = client
    4413            0 :                     .timeline_delete(tenant_shard_id, timeline_id)
    4414            0 :                     .await;
    4415              : 
    4416            0 :                 match res {
    4417            0 :                     Ok(ok) => Ok(ok),
    4418            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::CONFLICT, _)) => Ok(StatusCode::CONFLICT),
    4419            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg)) => Err(ApiError::ResourceUnavailable(msg.into())),
    4420            0 :                     Err(e) => {
    4421            0 :                         Err(
    4422            0 :                             ApiError::InternalServerError(anyhow::anyhow!(
    4423            0 :                                 "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
    4424            0 :                             ))
    4425            0 :                         )
    4426              :                     }
    4427              :                 }
    4428            0 :             }
    4429              : 
    4430            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    4431            0 :             let statuses = self
    4432            0 :                 .tenant_for_shards(locations, |tenant_shard_id: TenantShardId, node: Node| {
    4433            0 :                     Box::pin(delete_one(
    4434            0 :                         tenant_shard_id,
    4435            0 :                         timeline_id,
    4436            0 :                         node,
    4437            0 :                         self.http_client.clone(),
    4438            0 :                         self.config.pageserver_jwt_token.clone(),
    4439            0 :                     ))
    4440            0 :                 })
    4441            0 :                 .await?;
    4442              : 
    4443              :             // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero.
    4444              :             // We return 409 (Conflict) if deletion was already in progress on any of the shards
    4445              :             // and 202 (Accepted) if deletion was not already in progress on any of the shards.
    4446            0 :             if statuses.iter().any(|s| s == &StatusCode::CONFLICT) {
    4447            0 :                 return Ok(StatusCode::CONFLICT);
    4448            0 :             }
    4449            0 : 
    4450            0 :             if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
    4451            0 :                 return Ok(StatusCode::ACCEPTED);
    4452            0 :             }
    4453              : 
    4454              :             // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
    4455              :             // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
    4456            0 :             let shard_zero_status = delete_one(
    4457            0 :                 shard_zero_tid,
    4458            0 :                 timeline_id,
    4459            0 :                 shard_zero_locations.latest.node,
    4460            0 :                 self.http_client.clone(),
    4461            0 :                 self.config.pageserver_jwt_token.clone(),
    4462            0 :             )
    4463            0 :             .await?;
    4464            0 :             Ok(shard_zero_status)
    4465            0 :         }).await?;
    4466              : 
    4467            0 :         self.tenant_timeline_delete_safekeepers(tenant_id, timeline_id)
    4468            0 :             .await?;
    4469              : 
    4470            0 :         status_code
    4471            0 :     }
    4472              :     /// When you know the TenantId but not a specific shard, and would like to get the node holding shard 0.
    4473            0 :     pub(crate) async fn tenant_shard0_node(
    4474            0 :         &self,
    4475            0 :         tenant_id: TenantId,
    4476            0 :     ) -> Result<(Node, TenantShardId), ApiError> {
    4477            0 :         let tenant_shard_id = {
    4478            0 :             let locked = self.inner.read().unwrap();
    4479            0 :             let Some((tenant_shard_id, _shard)) = locked
    4480            0 :                 .tenants
    4481            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4482            0 :                 .next()
    4483              :             else {
    4484            0 :                 return Err(ApiError::NotFound(
    4485            0 :                     anyhow::anyhow!("Tenant {tenant_id} not found").into(),
    4486            0 :                 ));
    4487              :             };
    4488              : 
    4489            0 :             *tenant_shard_id
    4490            0 :         };
    4491            0 : 
    4492            0 :         self.tenant_shard_node(tenant_shard_id)
    4493            0 :             .await
    4494            0 :             .map(|node| (node, tenant_shard_id))
    4495            0 :     }
    4496              : 
    4497              :     /// When you need to send an HTTP request to the pageserver that holds a shard of a tenant, this
    4498              :     /// function looks up and returns node. If the shard isn't found, returns Err(ApiError::NotFound)
    4499            0 :     pub(crate) async fn tenant_shard_node(
    4500            0 :         &self,
    4501            0 :         tenant_shard_id: TenantShardId,
    4502            0 :     ) -> Result<Node, ApiError> {
    4503            0 :         // Look up in-memory state and maybe use the node from there.
    4504            0 :         {
    4505            0 :             let locked = self.inner.read().unwrap();
    4506            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    4507            0 :                 return Err(ApiError::NotFound(
    4508            0 :                     anyhow::anyhow!("Tenant shard {tenant_shard_id} not found").into(),
    4509            0 :                 ));
    4510              :             };
    4511              : 
    4512            0 :             let Some(intent_node_id) = shard.intent.get_attached() else {
    4513            0 :                 tracing::warn!(
    4514            0 :                     tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
    4515            0 :                     "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
    4516              :                     shard.policy
    4517              :                 );
    4518            0 :                 return Err(ApiError::Conflict(
    4519            0 :                     "Cannot call timeline API on non-attached tenant".to_string(),
    4520            0 :                 ));
    4521              :             };
    4522              : 
    4523            0 :             if shard.reconciler.is_none() {
    4524              :                 // Optimization: while no reconcile is in flight, we may trust our in-memory state
    4525              :                 // to tell us which pageserver to use. Otherwise we will fall through and hit the database
    4526            0 :                 let Some(node) = locked.nodes.get(intent_node_id) else {
    4527              :                     // This should never happen
    4528            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4529            0 :                         "Shard refers to nonexistent node"
    4530            0 :                     )));
    4531              :                 };
    4532            0 :                 return Ok(node.clone());
    4533            0 :             }
    4534              :         };
    4535              : 
    4536              :         // Look up the latest attached pageserver location from the database
    4537              :         // generation state: this will reflect the progress of any ongoing migration.
    4538              :         // Note that it is not guaranteed to _stay_ here, our caller must still handle
    4539              :         // the case where they call through to the pageserver and get a 404.
    4540            0 :         let db_result = self
    4541            0 :             .persistence
    4542            0 :             .tenant_generations(tenant_shard_id.tenant_id)
    4543            0 :             .await?;
    4544              :         let Some(ShardGenerationState {
    4545              :             tenant_shard_id: _,
    4546              :             generation: _,
    4547            0 :             generation_pageserver: Some(node_id),
    4548            0 :         }) = db_result
    4549            0 :             .into_iter()
    4550            0 :             .find(|s| s.tenant_shard_id == tenant_shard_id)
    4551              :         else {
    4552              :             // This can happen if we raced with a tenant deletion or a shard split.  On a retry
    4553              :             // the caller will either succeed (shard split case), get a proper 404 (deletion case),
    4554              :             // or a conflict response (case where tenant was detached in background)
    4555            0 :             return Err(ApiError::ResourceUnavailable(
    4556            0 :                 format!("Shard {tenant_shard_id} not found in database, or is not attached").into(),
    4557            0 :             ));
    4558              :         };
    4559            0 :         let locked = self.inner.read().unwrap();
    4560            0 :         let Some(node) = locked.nodes.get(&node_id) else {
    4561              :             // This should never happen
    4562            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4563            0 :                 "Shard refers to nonexistent node"
    4564            0 :             )));
    4565              :         };
    4566              : 
    4567            0 :         Ok(node.clone())
    4568            0 :     }
    4569              : 
    4570            0 :     pub(crate) fn tenant_locate(
    4571            0 :         &self,
    4572            0 :         tenant_id: TenantId,
    4573            0 :     ) -> Result<TenantLocateResponse, ApiError> {
    4574            0 :         let locked = self.inner.read().unwrap();
    4575            0 :         tracing::info!("Locating shards for tenant {tenant_id}");
    4576              : 
    4577            0 :         let mut result = Vec::new();
    4578            0 :         let mut shard_params: Option<ShardParameters> = None;
    4579              : 
    4580            0 :         for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    4581              :         {
    4582            0 :             let node_id =
    4583            0 :                 shard
    4584            0 :                     .intent
    4585            0 :                     .get_attached()
    4586            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    4587            0 :                         "Cannot locate a tenant that is not attached"
    4588            0 :                     )))?;
    4589              : 
    4590            0 :             let node = locked
    4591            0 :                 .nodes
    4592            0 :                 .get(&node_id)
    4593            0 :                 .expect("Pageservers may not be deleted while referenced");
    4594            0 : 
    4595            0 :             result.push(node.shard_location(*tenant_shard_id));
    4596            0 : 
    4597            0 :             match &shard_params {
    4598            0 :                 None => {
    4599            0 :                     shard_params = Some(ShardParameters {
    4600            0 :                         stripe_size: shard.shard.stripe_size,
    4601            0 :                         count: shard.shard.count,
    4602            0 :                     });
    4603            0 :                 }
    4604            0 :                 Some(params) => {
    4605            0 :                     if params.stripe_size != shard.shard.stripe_size {
    4606              :                         // This should never happen.  We enforce at runtime because it's simpler than
    4607              :                         // adding an extra per-tenant data structure to store the things that should be the same
    4608            0 :                         return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4609            0 :                             "Inconsistent shard stripe size parameters!"
    4610            0 :                         )));
    4611            0 :                     }
    4612              :                 }
    4613              :             }
    4614              :         }
    4615              : 
    4616            0 :         if result.is_empty() {
    4617            0 :             return Err(ApiError::NotFound(
    4618            0 :                 anyhow::anyhow!("No shards for this tenant ID found").into(),
    4619            0 :             ));
    4620            0 :         }
    4621            0 :         let shard_params = shard_params.expect("result is non-empty, therefore this is set");
    4622            0 :         tracing::info!(
    4623            0 :             "Located tenant {} with params {:?} on shards {}",
    4624            0 :             tenant_id,
    4625            0 :             shard_params,
    4626            0 :             result
    4627            0 :                 .iter()
    4628            0 :                 .map(|s| format!("{:?}", s))
    4629            0 :                 .collect::<Vec<_>>()
    4630            0 :                 .join(",")
    4631              :         );
    4632              : 
    4633            0 :         Ok(TenantLocateResponse {
    4634            0 :             shards: result,
    4635            0 :             shard_params,
    4636            0 :         })
    4637            0 :     }
    4638              : 
    4639              :     /// Returns None if the input iterator of shards does not include a shard with number=0
    4640            0 :     fn tenant_describe_impl<'a>(
    4641            0 :         &self,
    4642            0 :         shards: impl Iterator<Item = &'a TenantShard>,
    4643            0 :     ) -> Option<TenantDescribeResponse> {
    4644            0 :         let mut shard_zero = None;
    4645            0 :         let mut describe_shards = Vec::new();
    4646              : 
    4647            0 :         for shard in shards {
    4648            0 :             if shard.tenant_shard_id.is_shard_zero() {
    4649            0 :                 shard_zero = Some(shard);
    4650            0 :             }
    4651              : 
    4652            0 :             describe_shards.push(TenantDescribeResponseShard {
    4653            0 :                 tenant_shard_id: shard.tenant_shard_id,
    4654            0 :                 node_attached: *shard.intent.get_attached(),
    4655            0 :                 node_secondary: shard.intent.get_secondary().to_vec(),
    4656            0 :                 last_error: shard
    4657            0 :                     .last_error
    4658            0 :                     .lock()
    4659            0 :                     .unwrap()
    4660            0 :                     .as_ref()
    4661            0 :                     .map(|e| format!("{e}"))
    4662            0 :                     .unwrap_or("".to_string())
    4663            0 :                     .clone(),
    4664            0 :                 is_reconciling: shard.reconciler.is_some(),
    4665            0 :                 is_pending_compute_notification: shard.pending_compute_notification,
    4666            0 :                 is_splitting: matches!(shard.splitting, SplitState::Splitting),
    4667            0 :                 scheduling_policy: shard.get_scheduling_policy(),
    4668            0 :                 preferred_az_id: shard.preferred_az().map(ToString::to_string),
    4669              :             })
    4670              :         }
    4671              : 
    4672            0 :         let shard_zero = shard_zero?;
    4673              : 
    4674            0 :         Some(TenantDescribeResponse {
    4675            0 :             tenant_id: shard_zero.tenant_shard_id.tenant_id,
    4676            0 :             shards: describe_shards,
    4677            0 :             stripe_size: shard_zero.shard.stripe_size,
    4678            0 :             policy: shard_zero.policy.clone(),
    4679            0 :             config: shard_zero.config.clone(),
    4680            0 :         })
    4681            0 :     }
    4682              : 
    4683            0 :     pub(crate) fn tenant_describe(
    4684            0 :         &self,
    4685            0 :         tenant_id: TenantId,
    4686            0 :     ) -> Result<TenantDescribeResponse, ApiError> {
    4687            0 :         let locked = self.inner.read().unwrap();
    4688            0 : 
    4689            0 :         self.tenant_describe_impl(
    4690            0 :             locked
    4691            0 :                 .tenants
    4692            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4693            0 :                 .map(|(_k, v)| v),
    4694            0 :         )
    4695            0 :         .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
    4696            0 :     }
    4697              : 
    4698              :     /// limit & offset are pagination parameters. Since we are walking an in-memory HashMap, `offset` does not
    4699              :     /// avoid traversing data, it just avoid returning it. This is suitable for our purposes, since our in memory
    4700              :     /// maps are small enough to traverse fast, our pagination is just to avoid serializing huge JSON responses
    4701              :     /// in our external API.
    4702            0 :     pub(crate) fn tenant_list(
    4703            0 :         &self,
    4704            0 :         limit: Option<usize>,
    4705            0 :         start_after: Option<TenantId>,
    4706            0 :     ) -> Vec<TenantDescribeResponse> {
    4707            0 :         let locked = self.inner.read().unwrap();
    4708              : 
    4709              :         // Apply start_from parameter
    4710            0 :         let shard_range = match start_after {
    4711            0 :             None => locked.tenants.range(..),
    4712            0 :             Some(tenant_id) => locked.tenants.range(
    4713            0 :                 TenantShardId {
    4714            0 :                     tenant_id,
    4715            0 :                     shard_number: ShardNumber(u8::MAX),
    4716            0 :                     shard_count: ShardCount(u8::MAX),
    4717            0 :                 }..,
    4718            0 :             ),
    4719              :         };
    4720              : 
    4721            0 :         let mut result = Vec::new();
    4722            0 :         for (_tenant_id, tenant_shards) in &shard_range.group_by(|(id, _shard)| id.tenant_id) {
    4723            0 :             result.push(
    4724            0 :                 self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
    4725            0 :                     .expect("Groups are always non-empty"),
    4726            0 :             );
    4727              : 
    4728              :             // Enforce `limit` parameter
    4729            0 :             if let Some(limit) = limit {
    4730            0 :                 if result.len() >= limit {
    4731            0 :                     break;
    4732            0 :                 }
    4733            0 :             }
    4734              :         }
    4735              : 
    4736            0 :         result
    4737            0 :     }
    4738              : 
    4739              :     #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
    4740              :     async fn abort_tenant_shard_split(
    4741              :         &self,
    4742              :         op: &TenantShardSplitAbort,
    4743              :     ) -> Result<(), TenantShardSplitAbortError> {
    4744              :         // Cleaning up a split:
    4745              :         // - Parent shards are not destroyed during a split, just detached.
    4746              :         // - Failed pageserver split API calls can leave the remote node with just the parent attached,
    4747              :         //   just the children attached, or both.
    4748              :         //
    4749              :         // Therefore our work to do is to:
    4750              :         // 1. Clean up storage controller's internal state to just refer to parents, no children
    4751              :         // 2. Call out to pageservers to ensure that children are detached
    4752              :         // 3. Call out to pageservers to ensure that parents are attached.
    4753              :         //
    4754              :         // Crash safety:
    4755              :         // - If the storage controller stops running during this cleanup *after* clearing the splitting state
    4756              :         //   from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
    4757              :         //   and detach them.
    4758              :         // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
    4759              :         //   from our database, then we will re-enter this cleanup routine on startup.
    4760              : 
    4761              :         let TenantShardSplitAbort {
    4762              :             tenant_id,
    4763              :             new_shard_count,
    4764              :             new_stripe_size,
    4765              :             ..
    4766              :         } = op;
    4767              : 
    4768              :         // First abort persistent state, if any exists.
    4769              :         match self
    4770              :             .persistence
    4771              :             .abort_shard_split(*tenant_id, *new_shard_count)
    4772              :             .await?
    4773              :         {
    4774              :             AbortShardSplitStatus::Aborted => {
    4775              :                 // Proceed to roll back any child shards created on pageservers
    4776              :             }
    4777              :             AbortShardSplitStatus::Complete => {
    4778              :                 // The split completed (we might hit that path if e.g. our database transaction
    4779              :                 // to write the completion landed in the database, but we dropped connection
    4780              :                 // before seeing the result).
    4781              :                 //
    4782              :                 // We must update in-memory state to reflect the successful split.
    4783              :                 self.tenant_shard_split_commit_inmem(
    4784              :                     *tenant_id,
    4785              :                     *new_shard_count,
    4786              :                     *new_stripe_size,
    4787              :                 );
    4788              :                 return Ok(());
    4789              :             }
    4790              :         }
    4791              : 
    4792              :         // Clean up in-memory state, and accumulate the list of child locations that need detaching
    4793              :         let detach_locations: Vec<(Node, TenantShardId)> = {
    4794              :             let mut detach_locations = Vec::new();
    4795              :             let mut locked = self.inner.write().unwrap();
    4796              :             let (nodes, tenants, scheduler) = locked.parts_mut();
    4797              : 
    4798              :             for (tenant_shard_id, shard) in
    4799              :                 tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
    4800              :             {
    4801              :                 if shard.shard.count == op.new_shard_count {
    4802              :                     // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
    4803              :                     // is infallible, so if we got an error we shouldn't have got that far.
    4804              :                     tracing::warn!(
    4805              :                         "During split abort, child shard {tenant_shard_id} found in-memory"
    4806              :                     );
    4807              :                     continue;
    4808              :                 }
    4809              : 
    4810              :                 // Add the children of this shard to this list of things to detach
    4811              :                 if let Some(node_id) = shard.intent.get_attached() {
    4812              :                     for child_id in tenant_shard_id.split(*new_shard_count) {
    4813              :                         detach_locations.push((
    4814              :                             nodes
    4815              :                                 .get(node_id)
    4816              :                                 .expect("Intent references nonexistent node")
    4817              :                                 .clone(),
    4818              :                             child_id,
    4819              :                         ));
    4820              :                     }
    4821              :                 } else {
    4822              :                     tracing::warn!(
    4823              :                         "During split abort, shard {tenant_shard_id} has no attached location"
    4824              :                     );
    4825              :                 }
    4826              : 
    4827              :                 tracing::info!("Restoring parent shard {tenant_shard_id}");
    4828              : 
    4829              :                 // Drop any intents that refer to unavailable nodes, to enable this abort to proceed even
    4830              :                 // if the original attachment location is offline.
    4831              :                 if let Some(node_id) = shard.intent.get_attached() {
    4832              :                     if !nodes.get(node_id).unwrap().is_available() {
    4833              :                         tracing::info!(
    4834              :                             "Demoting attached intent for {tenant_shard_id} on unavailable node {node_id}"
    4835              :                         );
    4836              :                         shard.intent.demote_attached(scheduler, *node_id);
    4837              :                     }
    4838              :                 }
    4839              :                 for node_id in shard.intent.get_secondary().clone() {
    4840              :                     if !nodes.get(&node_id).unwrap().is_available() {
    4841              :                         tracing::info!(
    4842              :                             "Dropping secondary intent for {tenant_shard_id} on unavailable node {node_id}"
    4843              :                         );
    4844              :                         shard.intent.remove_secondary(scheduler, node_id);
    4845              :                     }
    4846              :                 }
    4847              : 
    4848              :                 shard.splitting = SplitState::Idle;
    4849              :                 if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
    4850              :                     // If this shard can't be scheduled now (perhaps due to offline nodes or
    4851              :                     // capacity issues), that must not prevent us rolling back a split.  In this
    4852              :                     // case it should be eventually scheduled in the background.
    4853              :                     tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
    4854              :                 }
    4855              : 
    4856              :                 self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    4857              :             }
    4858              : 
    4859              :             // We don't expect any new_shard_count shards to exist here, but drop them just in case
    4860            0 :             tenants.retain(|_id, s| s.shard.count != *new_shard_count);
    4861              : 
    4862              :             detach_locations
    4863              :         };
    4864              : 
    4865              :         for (node, child_id) in detach_locations {
    4866              :             if !node.is_available() {
    4867              :                 // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
    4868              :                 // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
    4869              :                 // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
    4870              :                 // them from the node.
    4871              :                 tracing::warn!(
    4872              :                     "Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated."
    4873              :                 );
    4874              :                 continue;
    4875              :             }
    4876              : 
    4877              :             // Detach the remote child.  If the pageserver split API call is still in progress, this call will get
    4878              :             // a 503 and retry, up to our limit.
    4879              :             tracing::info!("Detaching {child_id} on {node}...");
    4880              :             match node
    4881              :                 .with_client_retries(
    4882            0 :                     |client| async move {
    4883            0 :                         let config = LocationConfig {
    4884            0 :                             mode: LocationConfigMode::Detached,
    4885            0 :                             generation: None,
    4886            0 :                             secondary_conf: None,
    4887            0 :                             shard_number: child_id.shard_number.0,
    4888            0 :                             shard_count: child_id.shard_count.literal(),
    4889            0 :                             // Stripe size and tenant config don't matter when detaching
    4890            0 :                             shard_stripe_size: 0,
    4891            0 :                             tenant_conf: TenantConfig::default(),
    4892            0 :                         };
    4893            0 : 
    4894            0 :                         client.location_config(child_id, config, None, false).await
    4895            0 :                     },
    4896              :                     &self.http_client,
    4897              :                     &self.config.pageserver_jwt_token,
    4898              :                     1,
    4899              :                     10,
    4900              :                     Duration::from_secs(5),
    4901              :                     &self.cancel,
    4902              :                 )
    4903              :                 .await
    4904              :             {
    4905              :                 Some(Ok(_)) => {}
    4906              :                 Some(Err(e)) => {
    4907              :                     // We failed to communicate with the remote node.  This is problematic: we may be
    4908              :                     // leaving it with a rogue child shard.
    4909              :                     tracing::warn!(
    4910              :                         "Failed to detach child {child_id} from node {node} during abort"
    4911              :                     );
    4912              :                     return Err(e.into());
    4913              :                 }
    4914              :                 None => {
    4915              :                     // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
    4916              :                     // clean up on restart. The node going offline requires a retry.
    4917              :                     return Err(TenantShardSplitAbortError::Unavailable);
    4918              :                 }
    4919              :             };
    4920              :         }
    4921              : 
    4922              :         tracing::info!("Successfully aborted split");
    4923              :         Ok(())
    4924              :     }
    4925              : 
    4926              :     /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
    4927              :     /// of the tenant map to reflect the child shards that exist after the split.
    4928            0 :     fn tenant_shard_split_commit_inmem(
    4929            0 :         &self,
    4930            0 :         tenant_id: TenantId,
    4931            0 :         new_shard_count: ShardCount,
    4932            0 :         new_stripe_size: Option<ShardStripeSize>,
    4933            0 :     ) -> (
    4934            0 :         TenantShardSplitResponse,
    4935            0 :         Vec<(TenantShardId, NodeId, ShardStripeSize)>,
    4936            0 :         Vec<ReconcilerWaiter>,
    4937            0 :     ) {
    4938            0 :         let mut response = TenantShardSplitResponse {
    4939            0 :             new_shards: Vec::new(),
    4940            0 :         };
    4941            0 :         let mut child_locations = Vec::new();
    4942            0 :         let mut waiters = Vec::new();
    4943            0 : 
    4944            0 :         {
    4945            0 :             let mut locked = self.inner.write().unwrap();
    4946            0 : 
    4947            0 :             let parent_ids = locked
    4948            0 :                 .tenants
    4949            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4950            0 :                 .map(|(shard_id, _)| *shard_id)
    4951            0 :                 .collect::<Vec<_>>();
    4952            0 : 
    4953            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    4954            0 :             for parent_id in parent_ids {
    4955            0 :                 let child_ids = parent_id.split(new_shard_count);
    4956              : 
    4957            0 :                 let (pageserver, generation, policy, parent_ident, config, preferred_az) = {
    4958            0 :                     let mut old_state = tenants
    4959            0 :                         .remove(&parent_id)
    4960            0 :                         .expect("It was present, we just split it");
    4961            0 : 
    4962            0 :                     // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
    4963            0 :                     // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
    4964            0 :                     // nothing else can clear this.
    4965            0 :                     assert!(matches!(old_state.splitting, SplitState::Splitting));
    4966              : 
    4967            0 :                     let old_attached = old_state.intent.get_attached().unwrap();
    4968            0 :                     old_state.intent.clear(scheduler);
    4969            0 :                     let generation = old_state.generation.expect("Shard must have been attached");
    4970            0 :                     (
    4971            0 :                         old_attached,
    4972            0 :                         generation,
    4973            0 :                         old_state.policy.clone(),
    4974            0 :                         old_state.shard,
    4975            0 :                         old_state.config.clone(),
    4976            0 :                         old_state.preferred_az().cloned(),
    4977            0 :                     )
    4978            0 :                 };
    4979            0 : 
    4980            0 :                 let mut schedule_context = ScheduleContext::default();
    4981            0 :                 for child in child_ids {
    4982            0 :                     let mut child_shard = parent_ident;
    4983            0 :                     child_shard.number = child.shard_number;
    4984            0 :                     child_shard.count = child.shard_count;
    4985            0 :                     if let Some(stripe_size) = new_stripe_size {
    4986            0 :                         child_shard.stripe_size = stripe_size;
    4987            0 :                     }
    4988              : 
    4989            0 :                     let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
    4990            0 :                     child_observed.insert(
    4991            0 :                         pageserver,
    4992            0 :                         ObservedStateLocation {
    4993            0 :                             conf: Some(attached_location_conf(
    4994            0 :                                 generation,
    4995            0 :                                 &child_shard,
    4996            0 :                                 &config,
    4997            0 :                                 &policy,
    4998            0 :                             )),
    4999            0 :                         },
    5000            0 :                     );
    5001            0 : 
    5002            0 :                     let mut child_state =
    5003            0 :                         TenantShard::new(child, child_shard, policy.clone(), preferred_az.clone());
    5004            0 :                     child_state.intent =
    5005            0 :                         IntentState::single(scheduler, Some(pageserver), preferred_az.clone());
    5006            0 :                     child_state.observed = ObservedState {
    5007            0 :                         locations: child_observed,
    5008            0 :                     };
    5009            0 :                     child_state.generation = Some(generation);
    5010            0 :                     child_state.config = config.clone();
    5011            0 : 
    5012            0 :                     // The child's TenantShard::splitting is intentionally left at the default value of Idle,
    5013            0 :                     // as at this point in the split process we have succeeded and this part is infallible:
    5014            0 :                     // we will never need to do any special recovery from this state.
    5015            0 : 
    5016            0 :                     child_locations.push((child, pageserver, child_shard.stripe_size));
    5017              : 
    5018            0 :                     if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
    5019              :                         // This is not fatal, because we've implicitly already got an attached
    5020              :                         // location for the child shard.  Failure here just means we couldn't
    5021              :                         // find a secondary (e.g. because cluster is overloaded).
    5022            0 :                         tracing::warn!("Failed to schedule child shard {child}: {e}");
    5023            0 :                     }
    5024              :                     // In the background, attach secondary locations for the new shards
    5025            0 :                     if let Some(waiter) = self.maybe_reconcile_shard(
    5026            0 :                         &mut child_state,
    5027            0 :                         nodes,
    5028            0 :                         ReconcilerPriority::High,
    5029            0 :                     ) {
    5030            0 :                         waiters.push(waiter);
    5031            0 :                     }
    5032              : 
    5033            0 :                     tenants.insert(child, child_state);
    5034            0 :                     response.new_shards.push(child);
    5035              :                 }
    5036              :             }
    5037            0 :             (response, child_locations, waiters)
    5038            0 :         }
    5039            0 :     }
    5040              : 
    5041            0 :     async fn tenant_shard_split_start_secondaries(
    5042            0 :         &self,
    5043            0 :         tenant_id: TenantId,
    5044            0 :         waiters: Vec<ReconcilerWaiter>,
    5045            0 :     ) {
    5046              :         // Wait for initial reconcile of child shards, this creates the secondary locations
    5047            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    5048              :             // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
    5049              :             // their secondaries couldn't be attached.
    5050            0 :             tracing::warn!("Failed to reconcile after split: {e}");
    5051            0 :             return;
    5052            0 :         }
    5053              : 
    5054              :         // Take the state lock to discover the attached & secondary intents for all shards
    5055            0 :         let (attached, secondary) = {
    5056            0 :             let locked = self.inner.read().unwrap();
    5057            0 :             let mut attached = Vec::new();
    5058            0 :             let mut secondary = Vec::new();
    5059              : 
    5060            0 :             for (tenant_shard_id, shard) in
    5061            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5062              :             {
    5063            0 :                 let Some(node_id) = shard.intent.get_attached() else {
    5064              :                     // Unexpected.  Race with a PlacementPolicy change?
    5065            0 :                     tracing::warn!(
    5066            0 :                         "No attached node on {tenant_shard_id} immediately after shard split!"
    5067              :                     );
    5068            0 :                     continue;
    5069              :                 };
    5070              : 
    5071            0 :                 let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
    5072              :                     // No secondary location.  Nothing for us to do.
    5073            0 :                     continue;
    5074              :                 };
    5075              : 
    5076            0 :                 let attached_node = locked
    5077            0 :                     .nodes
    5078            0 :                     .get(node_id)
    5079            0 :                     .expect("Pageservers may not be deleted while referenced");
    5080            0 : 
    5081            0 :                 let secondary_node = locked
    5082            0 :                     .nodes
    5083            0 :                     .get(secondary_node_id)
    5084            0 :                     .expect("Pageservers may not be deleted while referenced");
    5085            0 : 
    5086            0 :                 attached.push((*tenant_shard_id, attached_node.clone()));
    5087            0 :                 secondary.push((*tenant_shard_id, secondary_node.clone()));
    5088              :             }
    5089            0 :             (attached, secondary)
    5090            0 :         };
    5091            0 : 
    5092            0 :         if secondary.is_empty() {
    5093              :             // No secondary locations; nothing for us to do
    5094            0 :             return;
    5095            0 :         }
    5096              : 
    5097            0 :         for result in self
    5098            0 :             .tenant_for_shards_api(
    5099            0 :                 attached,
    5100            0 :                 |tenant_shard_id, client| async move {
    5101            0 :                     client.tenant_heatmap_upload(tenant_shard_id).await
    5102            0 :                 },
    5103            0 :                 1,
    5104            0 :                 1,
    5105            0 :                 SHORT_RECONCILE_TIMEOUT,
    5106            0 :                 &self.cancel,
    5107            0 :             )
    5108            0 :             .await
    5109              :         {
    5110            0 :             if let Err(e) = result {
    5111            0 :                 tracing::warn!("Error calling heatmap upload after shard split: {e}");
    5112            0 :                 return;
    5113            0 :             }
    5114              :         }
    5115              : 
    5116            0 :         for result in self
    5117            0 :             .tenant_for_shards_api(
    5118            0 :                 secondary,
    5119            0 :                 |tenant_shard_id, client| async move {
    5120            0 :                     client
    5121            0 :                         .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
    5122            0 :                         .await
    5123            0 :                 },
    5124            0 :                 1,
    5125            0 :                 1,
    5126            0 :                 SHORT_RECONCILE_TIMEOUT,
    5127            0 :                 &self.cancel,
    5128            0 :             )
    5129            0 :             .await
    5130              :         {
    5131            0 :             if let Err(e) = result {
    5132            0 :                 tracing::warn!("Error calling secondary download after shard split: {e}");
    5133            0 :                 return;
    5134            0 :             }
    5135              :         }
    5136            0 :     }
    5137              : 
    5138            0 :     pub(crate) async fn tenant_shard_split(
    5139            0 :         &self,
    5140            0 :         tenant_id: TenantId,
    5141            0 :         split_req: TenantShardSplitRequest,
    5142            0 :     ) -> Result<TenantShardSplitResponse, ApiError> {
    5143              :         // TODO: return 503 if we get stuck waiting for this lock
    5144              :         // (issue https://github.com/neondatabase/neon/issues/7108)
    5145            0 :         let _tenant_lock = trace_exclusive_lock(
    5146            0 :             &self.tenant_op_locks,
    5147            0 :             tenant_id,
    5148            0 :             TenantOperations::ShardSplit,
    5149            0 :         )
    5150            0 :         .await;
    5151              : 
    5152            0 :         let new_shard_count = ShardCount::new(split_req.new_shard_count);
    5153            0 :         let new_stripe_size = split_req.new_stripe_size;
    5154              : 
    5155              :         // Validate the request and construct parameters.  This phase is fallible, but does not require
    5156              :         // rollback on errors, as it does no I/O and mutates no state.
    5157            0 :         let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
    5158            0 :             ShardSplitAction::NoOp(resp) => return Ok(resp),
    5159            0 :             ShardSplitAction::Split(params) => params,
    5160              :         };
    5161              : 
    5162              :         // Execute this split: this phase mutates state and does remote I/O on pageservers.  If it fails,
    5163              :         // we must roll back.
    5164            0 :         let r = self
    5165            0 :             .do_tenant_shard_split(tenant_id, shard_split_params)
    5166            0 :             .await;
    5167              : 
    5168            0 :         let (response, waiters) = match r {
    5169            0 :             Ok(r) => r,
    5170            0 :             Err(e) => {
    5171            0 :                 // Split might be part-done, we must do work to abort it.
    5172            0 :                 tracing::warn!("Enqueuing background abort of split on {tenant_id}");
    5173            0 :                 self.abort_tx
    5174            0 :                     .send(TenantShardSplitAbort {
    5175            0 :                         tenant_id,
    5176            0 :                         new_shard_count,
    5177            0 :                         new_stripe_size,
    5178            0 :                         _tenant_lock,
    5179            0 :                     })
    5180            0 :                     // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
    5181            0 :                     .ok();
    5182            0 :                 return Err(e);
    5183              :             }
    5184              :         };
    5185              : 
    5186              :         // The split is now complete.  As an optimization, we will trigger all the child shards to upload
    5187              :         // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
    5188              :         // for the background heatmap/download interval before secondaries get warm enough to migrate shards
    5189              :         // in [`Self::optimize_all`]
    5190            0 :         self.tenant_shard_split_start_secondaries(tenant_id, waiters)
    5191            0 :             .await;
    5192            0 :         Ok(response)
    5193            0 :     }
    5194              : 
    5195            0 :     fn prepare_tenant_shard_split(
    5196            0 :         &self,
    5197            0 :         tenant_id: TenantId,
    5198            0 :         split_req: TenantShardSplitRequest,
    5199            0 :     ) -> Result<ShardSplitAction, ApiError> {
    5200            0 :         fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
    5201            0 :             anyhow::anyhow!("failpoint")
    5202            0 :         )));
    5203              : 
    5204            0 :         let mut policy = None;
    5205            0 :         let mut config = None;
    5206            0 :         let mut shard_ident = None;
    5207            0 :         let mut preferred_az_id = None;
    5208              :         // Validate input, and calculate which shards we will create
    5209            0 :         let (old_shard_count, targets) =
    5210              :             {
    5211            0 :                 let locked = self.inner.read().unwrap();
    5212            0 : 
    5213            0 :                 let pageservers = locked.nodes.clone();
    5214            0 : 
    5215            0 :                 let mut targets = Vec::new();
    5216            0 : 
    5217            0 :                 // In case this is a retry, count how many already-split shards we found
    5218            0 :                 let mut children_found = Vec::new();
    5219            0 :                 let mut old_shard_count = None;
    5220              : 
    5221            0 :                 for (tenant_shard_id, shard) in
    5222            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5223              :                 {
    5224            0 :                     match shard.shard.count.count().cmp(&split_req.new_shard_count) {
    5225              :                         Ordering::Equal => {
    5226              :                             //  Already split this
    5227            0 :                             children_found.push(*tenant_shard_id);
    5228            0 :                             continue;
    5229              :                         }
    5230              :                         Ordering::Greater => {
    5231            0 :                             return Err(ApiError::BadRequest(anyhow::anyhow!(
    5232            0 :                                 "Requested count {} but already have shards at count {}",
    5233            0 :                                 split_req.new_shard_count,
    5234            0 :                                 shard.shard.count.count()
    5235            0 :                             )));
    5236              :                         }
    5237            0 :                         Ordering::Less => {
    5238            0 :                             // Fall through: this shard has lower count than requested,
    5239            0 :                             // is a candidate for splitting.
    5240            0 :                         }
    5241            0 :                     }
    5242            0 : 
    5243            0 :                     match old_shard_count {
    5244            0 :                         None => old_shard_count = Some(shard.shard.count),
    5245            0 :                         Some(old_shard_count) => {
    5246            0 :                             if old_shard_count != shard.shard.count {
    5247              :                                 // We may hit this case if a caller asked for two splits to
    5248              :                                 // different sizes, before the first one is complete.
    5249              :                                 // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
    5250              :                                 // of shard_count=1 and shard_count=2 shards in the map.
    5251            0 :                                 return Err(ApiError::Conflict(
    5252            0 :                                     "Cannot split, currently mid-split".to_string(),
    5253            0 :                                 ));
    5254            0 :                             }
    5255              :                         }
    5256              :                     }
    5257            0 :                     if policy.is_none() {
    5258            0 :                         policy = Some(shard.policy.clone());
    5259            0 :                     }
    5260            0 :                     if shard_ident.is_none() {
    5261            0 :                         shard_ident = Some(shard.shard);
    5262            0 :                     }
    5263            0 :                     if config.is_none() {
    5264            0 :                         config = Some(shard.config.clone());
    5265            0 :                     }
    5266            0 :                     if preferred_az_id.is_none() {
    5267            0 :                         preferred_az_id = shard.preferred_az().cloned();
    5268            0 :                     }
    5269              : 
    5270            0 :                     if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
    5271            0 :                         tracing::info!(
    5272            0 :                             "Tenant shard {} already has shard count {}",
    5273              :                             tenant_shard_id,
    5274              :                             split_req.new_shard_count
    5275              :                         );
    5276            0 :                         continue;
    5277            0 :                     }
    5278              : 
    5279            0 :                     let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
    5280            0 :                         anyhow::anyhow!("Cannot split a tenant that is not attached"),
    5281            0 :                     ))?;
    5282              : 
    5283            0 :                     let node = pageservers
    5284            0 :                         .get(&node_id)
    5285            0 :                         .expect("Pageservers may not be deleted while referenced");
    5286            0 : 
    5287            0 :                     targets.push(ShardSplitTarget {
    5288            0 :                         parent_id: *tenant_shard_id,
    5289            0 :                         node: node.clone(),
    5290            0 :                         child_ids: tenant_shard_id
    5291            0 :                             .split(ShardCount::new(split_req.new_shard_count)),
    5292            0 :                     });
    5293              :                 }
    5294              : 
    5295            0 :                 if targets.is_empty() {
    5296            0 :                     if children_found.len() == split_req.new_shard_count as usize {
    5297            0 :                         return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
    5298            0 :                             new_shards: children_found,
    5299            0 :                         }));
    5300              :                     } else {
    5301              :                         // No shards found to split, and no existing children found: the
    5302              :                         // tenant doesn't exist at all.
    5303            0 :                         return Err(ApiError::NotFound(
    5304            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    5305            0 :                         ));
    5306              :                     }
    5307            0 :                 }
    5308            0 : 
    5309            0 :                 (old_shard_count, targets)
    5310            0 :             };
    5311            0 : 
    5312            0 :         // unwrap safety: we would have returned above if we didn't find at least one shard to split
    5313            0 :         let old_shard_count = old_shard_count.unwrap();
    5314            0 :         let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
    5315              :             // This ShardIdentity will be used as the template for all children, so this implicitly
    5316              :             // applies the new stripe size to the children.
    5317            0 :             let mut shard_ident = shard_ident.unwrap();
    5318            0 :             if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
    5319            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    5320            0 :                     "Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards",
    5321            0 :                     shard_ident.stripe_size
    5322            0 :                 )));
    5323            0 :             }
    5324            0 : 
    5325            0 :             shard_ident.stripe_size = new_stripe_size;
    5326            0 :             tracing::info!("applied  stripe size {}", shard_ident.stripe_size.0);
    5327            0 :             shard_ident
    5328              :         } else {
    5329            0 :             shard_ident.unwrap()
    5330              :         };
    5331            0 :         let policy = policy.unwrap();
    5332            0 :         let config = config.unwrap();
    5333            0 : 
    5334            0 :         Ok(ShardSplitAction::Split(Box::new(ShardSplitParams {
    5335            0 :             old_shard_count,
    5336            0 :             new_shard_count: ShardCount::new(split_req.new_shard_count),
    5337            0 :             new_stripe_size: split_req.new_stripe_size,
    5338            0 :             targets,
    5339            0 :             policy,
    5340            0 :             config,
    5341            0 :             shard_ident,
    5342            0 :             preferred_az_id,
    5343            0 :         })))
    5344            0 :     }
    5345              : 
    5346            0 :     async fn do_tenant_shard_split(
    5347            0 :         &self,
    5348            0 :         tenant_id: TenantId,
    5349            0 :         params: Box<ShardSplitParams>,
    5350            0 :     ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
    5351            0 :         // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
    5352            0 :         // request could occur here, deleting or mutating the tenant.  begin_shard_split checks that the
    5353            0 :         // parent shards exist as expected, but it would be neater to do the above pre-checks within the
    5354            0 :         // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
    5355            0 :         // (https://github.com/neondatabase/neon/issues/6676)
    5356            0 : 
    5357            0 :         let ShardSplitParams {
    5358            0 :             old_shard_count,
    5359            0 :             new_shard_count,
    5360            0 :             new_stripe_size,
    5361            0 :             mut targets,
    5362            0 :             policy,
    5363            0 :             config,
    5364            0 :             shard_ident,
    5365            0 :             preferred_az_id,
    5366            0 :         } = *params;
    5367              : 
    5368              :         // Drop any secondary locations: pageservers do not support splitting these, and in any case the
    5369              :         // end-state for a split tenant will usually be to have secondary locations on different nodes.
    5370              :         // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
    5371              :         // at the time of split.
    5372            0 :         let waiters = {
    5373            0 :             let mut locked = self.inner.write().unwrap();
    5374            0 :             let mut waiters = Vec::new();
    5375            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5376            0 :             for target in &mut targets {
    5377            0 :                 let Some(shard) = tenants.get_mut(&target.parent_id) else {
    5378              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    5379            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5380            0 :                         "Shard {} not found",
    5381            0 :                         target.parent_id
    5382            0 :                     )));
    5383              :                 };
    5384              : 
    5385            0 :                 if shard.intent.get_attached() != &Some(target.node.get_id()) {
    5386              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    5387            0 :                     return Err(ApiError::Conflict(format!(
    5388            0 :                         "Shard {} unexpectedly rescheduled during split",
    5389            0 :                         target.parent_id
    5390            0 :                     )));
    5391            0 :                 }
    5392            0 : 
    5393            0 :                 // Irrespective of PlacementPolicy, clear secondary locations from intent
    5394            0 :                 shard.intent.clear_secondary(scheduler);
    5395              : 
    5396              :                 // Run Reconciler to execute detach fo secondary locations.
    5397            0 :                 if let Some(waiter) =
    5398            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    5399            0 :                 {
    5400            0 :                     waiters.push(waiter);
    5401            0 :                 }
    5402              :             }
    5403            0 :             waiters
    5404            0 :         };
    5405            0 :         self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
    5406              : 
    5407              :         // Before creating any new child shards in memory or on the pageservers, persist them: this
    5408              :         // enables us to ensure that we will always be able to clean up if something goes wrong.  This also
    5409              :         // acts as the protection against two concurrent attempts to split: one of them will get a database
    5410              :         // error trying to insert the child shards.
    5411            0 :         let mut child_tsps = Vec::new();
    5412            0 :         for target in &targets {
    5413            0 :             let mut this_child_tsps = Vec::new();
    5414            0 :             for child in &target.child_ids {
    5415            0 :                 let mut child_shard = shard_ident;
    5416            0 :                 child_shard.number = child.shard_number;
    5417            0 :                 child_shard.count = child.shard_count;
    5418            0 : 
    5419            0 :                 tracing::info!(
    5420            0 :                     "Create child shard persistence with stripe size {}",
    5421              :                     shard_ident.stripe_size.0
    5422              :                 );
    5423              : 
    5424            0 :                 this_child_tsps.push(TenantShardPersistence {
    5425            0 :                     tenant_id: child.tenant_id.to_string(),
    5426            0 :                     shard_number: child.shard_number.0 as i32,
    5427            0 :                     shard_count: child.shard_count.literal() as i32,
    5428            0 :                     shard_stripe_size: shard_ident.stripe_size.0 as i32,
    5429            0 :                     // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
    5430            0 :                     // populate the correct generation as part of its transaction, to protect us
    5431            0 :                     // against racing with changes in the state of the parent.
    5432            0 :                     generation: None,
    5433            0 :                     generation_pageserver: Some(target.node.get_id().0 as i64),
    5434            0 :                     placement_policy: serde_json::to_string(&policy).unwrap(),
    5435            0 :                     config: serde_json::to_string(&config).unwrap(),
    5436            0 :                     splitting: SplitState::Splitting,
    5437            0 : 
    5438            0 :                     // Scheduling policies and preferred AZ do not carry through to children
    5439            0 :                     scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    5440            0 :                         .unwrap(),
    5441            0 :                     preferred_az_id: preferred_az_id.as_ref().map(|az| az.0.clone()),
    5442            0 :                 });
    5443            0 :             }
    5444              : 
    5445            0 :             child_tsps.push((target.parent_id, this_child_tsps));
    5446              :         }
    5447              : 
    5448            0 :         if let Err(e) = self
    5449            0 :             .persistence
    5450            0 :             .begin_shard_split(old_shard_count, tenant_id, child_tsps)
    5451            0 :             .await
    5452              :         {
    5453            0 :             match e {
    5454              :                 DatabaseError::Query(diesel::result::Error::DatabaseError(
    5455              :                     DatabaseErrorKind::UniqueViolation,
    5456              :                     _,
    5457              :                 )) => {
    5458              :                     // Inserting a child shard violated a unique constraint: we raced with another call to
    5459              :                     // this function
    5460            0 :                     tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
    5461            0 :                     return Err(ApiError::Conflict("Tenant is already splitting".into()));
    5462              :                 }
    5463            0 :                 _ => return Err(ApiError::InternalServerError(e.into())),
    5464              :             }
    5465            0 :         }
    5466            0 :         fail::fail_point!("shard-split-post-begin", |_| Err(
    5467            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    5468            0 :         ));
    5469              : 
    5470              :         // Now that I have persisted the splitting state, apply it in-memory.  This is infallible, so
    5471              :         // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
    5472              :         // is not set in memory, then it was not persisted.
    5473              :         {
    5474            0 :             let mut locked = self.inner.write().unwrap();
    5475            0 :             for target in &targets {
    5476            0 :                 if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
    5477            0 :                     parent_shard.splitting = SplitState::Splitting;
    5478            0 :                     // Put the observed state to None, to reflect that it is indeterminate once we start the
    5479            0 :                     // split operation.
    5480            0 :                     parent_shard
    5481            0 :                         .observed
    5482            0 :                         .locations
    5483            0 :                         .insert(target.node.get_id(), ObservedStateLocation { conf: None });
    5484            0 :                 }
    5485              :             }
    5486              :         }
    5487              : 
    5488              :         // TODO: issue split calls concurrently (this only matters once we're splitting
    5489              :         // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
    5490              : 
    5491            0 :         for target in &targets {
    5492              :             let ShardSplitTarget {
    5493            0 :                 parent_id,
    5494            0 :                 node,
    5495            0 :                 child_ids,
    5496            0 :             } = target;
    5497            0 :             let client = PageserverClient::new(
    5498            0 :                 node.get_id(),
    5499            0 :                 self.http_client.clone(),
    5500            0 :                 node.base_url(),
    5501            0 :                 self.config.pageserver_jwt_token.as_deref(),
    5502            0 :             );
    5503            0 :             let response = client
    5504            0 :                 .tenant_shard_split(
    5505            0 :                     *parent_id,
    5506            0 :                     TenantShardSplitRequest {
    5507            0 :                         new_shard_count: new_shard_count.literal(),
    5508            0 :                         new_stripe_size,
    5509            0 :                     },
    5510            0 :                 )
    5511            0 :                 .await
    5512            0 :                 .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?;
    5513              : 
    5514            0 :             fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
    5515            0 :                 "failpoint".to_string()
    5516            0 :             )));
    5517              : 
    5518            0 :             failpoint_support::sleep_millis_async!("shard-split-post-remote-sleep", &self.cancel);
    5519              : 
    5520            0 :             tracing::info!(
    5521            0 :                 "Split {} into {}",
    5522            0 :                 parent_id,
    5523            0 :                 response
    5524            0 :                     .new_shards
    5525            0 :                     .iter()
    5526            0 :                     .map(|s| format!("{:?}", s))
    5527            0 :                     .collect::<Vec<_>>()
    5528            0 :                     .join(",")
    5529              :             );
    5530              : 
    5531            0 :             if &response.new_shards != child_ids {
    5532              :                 // This should never happen: the pageserver should agree with us on how shard splits work.
    5533            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5534            0 :                     "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
    5535            0 :                     parent_id,
    5536            0 :                     response.new_shards,
    5537            0 :                     child_ids
    5538            0 :                 )));
    5539            0 :             }
    5540              :         }
    5541              : 
    5542            0 :         pausable_failpoint!("shard-split-pre-complete");
    5543              : 
    5544              :         // TODO: if the pageserver restarted concurrently with our split API call,
    5545              :         // the actual generation of the child shard might differ from the generation
    5546              :         // we expect it to have.  In order for our in-database generation to end up
    5547              :         // correct, we should carry the child generation back in the response and apply it here
    5548              :         // in complete_shard_split (and apply the correct generation in memory)
    5549              :         // (or, we can carry generation in the request and reject the request if
    5550              :         //  it doesn't match, but that requires more retry logic on this side)
    5551              : 
    5552            0 :         self.persistence
    5553            0 :             .complete_shard_split(tenant_id, old_shard_count, new_shard_count)
    5554            0 :             .await?;
    5555              : 
    5556            0 :         fail::fail_point!("shard-split-post-complete", |_| Err(
    5557            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    5558            0 :         ));
    5559              : 
    5560              :         // Replace all the shards we just split with their children: this phase is infallible.
    5561            0 :         let (response, child_locations, waiters) =
    5562            0 :             self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
    5563            0 : 
    5564            0 :         // Send compute notifications for all the new shards
    5565            0 :         let mut failed_notifications = Vec::new();
    5566            0 :         for (child_id, child_ps, stripe_size) in child_locations {
    5567            0 :             if let Err(e) = self
    5568            0 :                 .compute_hook
    5569            0 :                 .notify(
    5570            0 :                     compute_hook::ShardUpdate {
    5571            0 :                         tenant_shard_id: child_id,
    5572            0 :                         node_id: child_ps,
    5573            0 :                         stripe_size,
    5574            0 :                         preferred_az: preferred_az_id.as_ref().map(Cow::Borrowed),
    5575            0 :                     },
    5576            0 :                     &self.cancel,
    5577            0 :                 )
    5578            0 :                 .await
    5579              :             {
    5580            0 :                 tracing::warn!(
    5581            0 :                     "Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
    5582              :                     child_id,
    5583              :                     child_ps
    5584              :                 );
    5585            0 :                 failed_notifications.push(child_id);
    5586            0 :             }
    5587              :         }
    5588              : 
    5589              :         // If we failed any compute notifications, make a note to retry later.
    5590            0 :         if !failed_notifications.is_empty() {
    5591            0 :             let mut locked = self.inner.write().unwrap();
    5592            0 :             for failed in failed_notifications {
    5593            0 :                 if let Some(shard) = locked.tenants.get_mut(&failed) {
    5594            0 :                     shard.pending_compute_notification = true;
    5595            0 :                 }
    5596              :             }
    5597            0 :         }
    5598              : 
    5599            0 :         Ok((response, waiters))
    5600            0 :     }
    5601              : 
    5602              :     /// A graceful migration: update the preferred node and let optimisation handle the migration
    5603              :     /// in the background (may take a long time as it will fully warm up a location before cutting over)
    5604              :     ///
    5605              :     /// Our external API calls this a 'prewarm=true' migration, but internally it isn't a special prewarm step: it's
    5606              :     /// just a migration that uses the same graceful procedure as our background scheduling optimisations would use.
    5607            0 :     fn tenant_shard_migrate_with_prewarm(
    5608            0 :         &self,
    5609            0 :         migrate_req: &TenantShardMigrateRequest,
    5610            0 :         shard: &mut TenantShard,
    5611            0 :         scheduler: &mut Scheduler,
    5612            0 :         schedule_context: ScheduleContext,
    5613            0 :     ) -> Result<Option<ScheduleOptimization>, ApiError> {
    5614            0 :         shard.set_preferred_node(Some(migrate_req.node_id));
    5615            0 : 
    5616            0 :         // Generate whatever the initial change to the intent is: this could be creation of a secondary, or
    5617            0 :         // cutting over to an existing secondary.  Caller is responsible for validating this before applying it,
    5618            0 :         // e.g. by checking secondary is warm enough.
    5619            0 :         Ok(shard.optimize_attachment(scheduler, &schedule_context))
    5620            0 :     }
    5621              : 
    5622              :     /// Immediate migration: directly update the intent state and kick off a reconciler
    5623            0 :     fn tenant_shard_migrate_immediate(
    5624            0 :         &self,
    5625            0 :         migrate_req: &TenantShardMigrateRequest,
    5626            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    5627            0 :         shard: &mut TenantShard,
    5628            0 :         scheduler: &mut Scheduler,
    5629            0 :     ) -> Result<Option<ReconcilerWaiter>, ApiError> {
    5630            0 :         // Non-graceful migration: update the intent state immediately
    5631            0 :         let old_attached = *shard.intent.get_attached();
    5632            0 :         match shard.policy {
    5633            0 :             PlacementPolicy::Attached(n) => {
    5634            0 :                 // If our new attached node was a secondary, it no longer should be.
    5635            0 :                 shard
    5636            0 :                     .intent
    5637            0 :                     .remove_secondary(scheduler, migrate_req.node_id);
    5638            0 : 
    5639            0 :                 shard
    5640            0 :                     .intent
    5641            0 :                     .set_attached(scheduler, Some(migrate_req.node_id));
    5642              : 
    5643              :                 // If we were already attached to something, demote that to a secondary
    5644            0 :                 if let Some(old_attached) = old_attached {
    5645            0 :                     if n > 0 {
    5646              :                         // Remove other secondaries to make room for the location we'll demote
    5647            0 :                         while shard.intent.get_secondary().len() >= n {
    5648            0 :                             shard.intent.pop_secondary(scheduler);
    5649            0 :                         }
    5650              : 
    5651            0 :                         shard.intent.push_secondary(scheduler, old_attached);
    5652            0 :                     }
    5653            0 :                 }
    5654              :             }
    5655            0 :             PlacementPolicy::Secondary => {
    5656            0 :                 shard.intent.clear(scheduler);
    5657            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    5658            0 :             }
    5659              :             PlacementPolicy::Detached => {
    5660            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    5661            0 :                     "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
    5662            0 :                 )));
    5663              :             }
    5664              :         }
    5665              : 
    5666            0 :         tracing::info!("Migrating: new intent {:?}", shard.intent);
    5667            0 :         shard.sequence = shard.sequence.next();
    5668            0 :         shard.set_preferred_node(None); // Abort any in-flight graceful migration
    5669            0 :         Ok(self.maybe_configured_reconcile_shard(
    5670            0 :             shard,
    5671            0 :             nodes,
    5672            0 :             (&migrate_req.migration_config).into(),
    5673            0 :         ))
    5674            0 :     }
    5675              : 
    5676            0 :     pub(crate) async fn tenant_shard_migrate(
    5677            0 :         &self,
    5678            0 :         tenant_shard_id: TenantShardId,
    5679            0 :         migrate_req: TenantShardMigrateRequest,
    5680            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    5681              :         // Depending on whether the migration is a change and whether it's graceful or immediate, we might
    5682              :         // get a different outcome to handle
    5683              :         enum MigrationOutcome {
    5684              :             Optimization(Option<ScheduleOptimization>),
    5685              :             Reconcile(Option<ReconcilerWaiter>),
    5686              :         }
    5687              : 
    5688            0 :         let outcome = {
    5689            0 :             let mut locked = self.inner.write().unwrap();
    5690            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5691              : 
    5692            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    5693            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    5694            0 :                     "Node {} not found",
    5695            0 :                     migrate_req.node_id
    5696            0 :                 )));
    5697              :             };
    5698              : 
    5699              :             // Migration to unavavailable node requires force flag
    5700            0 :             if !node.is_available() {
    5701            0 :                 if migrate_req.migration_config.override_scheduler {
    5702              :                     // Warn but proceed: the caller may intend to manually adjust the placement of
    5703              :                     // a shard even if the node is down, e.g. if intervening during an incident.
    5704            0 :                     tracing::warn!("Forcibly migrating to unavailable node {node}");
    5705              :                 } else {
    5706            0 :                     tracing::warn!("Node {node} is unavailable, refusing migration");
    5707            0 :                     return Err(ApiError::PreconditionFailed(
    5708            0 :                         format!("Node {node} is unavailable").into_boxed_str(),
    5709            0 :                     ));
    5710              :                 }
    5711            0 :             }
    5712              : 
    5713              :             // Calculate the ScheduleContext for this tenant
    5714            0 :             let mut schedule_context = ScheduleContext::default();
    5715            0 :             for (_shard_id, shard) in
    5716            0 :                 tenants.range(TenantShardId::tenant_range(tenant_shard_id.tenant_id))
    5717            0 :             {
    5718            0 :                 schedule_context.avoid(&shard.intent.all_pageservers());
    5719            0 :             }
    5720              : 
    5721              :             // Look up the specific shard we will migrate
    5722            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    5723            0 :                 return Err(ApiError::NotFound(
    5724            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    5725            0 :                 ));
    5726              :             };
    5727              : 
    5728              :             // Migration to a node with unfavorable scheduling score requires a force flag, because it might just
    5729              :             // be migrated back by the optimiser.
    5730            0 :             if let Some(better_node) = shard.find_better_location::<AttachedShardTag>(
    5731            0 :                 scheduler,
    5732            0 :                 &schedule_context,
    5733            0 :                 migrate_req.node_id,
    5734            0 :                 &[],
    5735            0 :             ) {
    5736            0 :                 if !migrate_req.migration_config.override_scheduler {
    5737            0 :                     return Err(ApiError::PreconditionFailed(
    5738            0 :                         "Migration to a worse-scoring node".into(),
    5739            0 :                     ));
    5740              :                 } else {
    5741            0 :                     tracing::info!(
    5742            0 :                         "Migrating to a worse-scoring node {} (optimiser would prefer {better_node})",
    5743              :                         migrate_req.node_id
    5744              :                     );
    5745              :                 }
    5746            0 :             }
    5747              : 
    5748            0 :             if let Some(origin_node_id) = migrate_req.origin_node_id {
    5749            0 :                 if shard.intent.get_attached() != &Some(origin_node_id) {
    5750            0 :                     return Err(ApiError::PreconditionFailed(
    5751            0 :                         format!(
    5752            0 :                             "Migration expected to originate from {} but shard is on {:?}",
    5753            0 :                             origin_node_id,
    5754            0 :                             shard.intent.get_attached()
    5755            0 :                         )
    5756            0 :                         .into(),
    5757            0 :                     ));
    5758            0 :                 }
    5759            0 :             }
    5760              : 
    5761            0 :             if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    5762              :                 // No-op case: we will still proceed to wait for reconciliation in case it is
    5763              :                 // incomplete from an earlier update to the intent.
    5764            0 :                 tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
    5765              : 
    5766              :                 // An instruction to migrate to the currently attached node should
    5767              :                 // cancel any pending graceful migration
    5768            0 :                 shard.set_preferred_node(None);
    5769            0 : 
    5770            0 :                 MigrationOutcome::Reconcile(self.maybe_configured_reconcile_shard(
    5771            0 :                     shard,
    5772            0 :                     nodes,
    5773            0 :                     (&migrate_req.migration_config).into(),
    5774            0 :                 ))
    5775            0 :             } else if migrate_req.migration_config.prewarm {
    5776            0 :                 MigrationOutcome::Optimization(self.tenant_shard_migrate_with_prewarm(
    5777            0 :                     &migrate_req,
    5778            0 :                     shard,
    5779            0 :                     scheduler,
    5780            0 :                     schedule_context,
    5781            0 :                 )?)
    5782              :             } else {
    5783            0 :                 MigrationOutcome::Reconcile(self.tenant_shard_migrate_immediate(
    5784            0 :                     &migrate_req,
    5785            0 :                     nodes,
    5786            0 :                     shard,
    5787            0 :                     scheduler,
    5788            0 :                 )?)
    5789              :             }
    5790              :         };
    5791              : 
    5792              :         // We may need to validate + apply an optimisation, or we may need to just retrive a reconcile waiter
    5793            0 :         let waiter = match outcome {
    5794            0 :             MigrationOutcome::Optimization(Some(optimization)) => {
    5795              :                 // Validate and apply the optimization -- this would happen anyway in background reconcile loop, but
    5796              :                 // we might as well do it more promptly as this is a direct external request.
    5797            0 :                 let mut validated = self
    5798            0 :                     .optimize_all_validate(vec![(tenant_shard_id, optimization)])
    5799            0 :                     .await;
    5800            0 :                 if let Some((_shard_id, optimization)) = validated.pop() {
    5801            0 :                     let mut locked = self.inner.write().unwrap();
    5802            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    5803            0 :                     let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    5804              :                         // Rare but possible: tenant is removed between generating optimisation and validating it.
    5805            0 :                         return Err(ApiError::NotFound(
    5806            0 :                             anyhow::anyhow!("Tenant shard not found").into(),
    5807            0 :                         ));
    5808              :                     };
    5809              : 
    5810            0 :                     if !shard.apply_optimization(scheduler, optimization) {
    5811              :                         // This can happen but is unusual enough to warn on: something else changed in the shard that made the optimisation stale
    5812              :                         // and therefore not applied.
    5813            0 :                         tracing::warn!(
    5814            0 :                             "Schedule optimisation generated during graceful migration was not applied, shard changed?"
    5815              :                         );
    5816            0 :                     }
    5817            0 :                     self.maybe_configured_reconcile_shard(
    5818            0 :                         shard,
    5819            0 :                         nodes,
    5820            0 :                         (&migrate_req.migration_config).into(),
    5821            0 :                     )
    5822              :                 } else {
    5823            0 :                     None
    5824              :                 }
    5825              :             }
    5826            0 :             MigrationOutcome::Optimization(None) => None,
    5827            0 :             MigrationOutcome::Reconcile(waiter) => waiter,
    5828              :         };
    5829              : 
    5830              :         // Finally, wait for any reconcile we started to complete.  In the case of immediate-mode migrations to cold
    5831              :         // locations, this has a good chance of timing out.
    5832            0 :         if let Some(waiter) = waiter {
    5833            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    5834              :         } else {
    5835            0 :             tracing::info!("Migration is a no-op");
    5836              :         }
    5837              : 
    5838            0 :         Ok(TenantShardMigrateResponse {})
    5839            0 :     }
    5840              : 
    5841            0 :     pub(crate) async fn tenant_shard_migrate_secondary(
    5842            0 :         &self,
    5843            0 :         tenant_shard_id: TenantShardId,
    5844            0 :         migrate_req: TenantShardMigrateRequest,
    5845            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    5846            0 :         let waiter = {
    5847            0 :             let mut locked = self.inner.write().unwrap();
    5848            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5849              : 
    5850            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    5851            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    5852            0 :                     "Node {} not found",
    5853            0 :                     migrate_req.node_id
    5854            0 :                 )));
    5855              :             };
    5856              : 
    5857            0 :             if !node.is_available() {
    5858              :                 // Warn but proceed: the caller may intend to manually adjust the placement of
    5859              :                 // a shard even if the node is down, e.g. if intervening during an incident.
    5860            0 :                 tracing::warn!("Migrating to unavailable node {node}");
    5861            0 :             }
    5862              : 
    5863            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    5864            0 :                 return Err(ApiError::NotFound(
    5865            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    5866            0 :                 ));
    5867              :             };
    5868              : 
    5869            0 :             if shard.intent.get_secondary().len() == 1
    5870            0 :                 && shard.intent.get_secondary()[0] == migrate_req.node_id
    5871              :             {
    5872            0 :                 tracing::info!(
    5873            0 :                     "Migrating secondary to {node}: intent is unchanged {:?}",
    5874              :                     shard.intent
    5875              :                 );
    5876            0 :             } else if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    5877            0 :                 tracing::info!(
    5878            0 :                     "Migrating secondary to {node}: already attached where we were asked to create a secondary"
    5879              :                 );
    5880              :             } else {
    5881            0 :                 let old_secondaries = shard.intent.get_secondary().clone();
    5882            0 :                 for secondary in old_secondaries {
    5883            0 :                     shard.intent.remove_secondary(scheduler, secondary);
    5884            0 :                 }
    5885              : 
    5886            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    5887            0 :                 shard.sequence = shard.sequence.next();
    5888            0 :                 tracing::info!(
    5889            0 :                     "Migrating secondary to {node}: new intent {:?}",
    5890              :                     shard.intent
    5891              :                 );
    5892              :             }
    5893              : 
    5894            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    5895              :         };
    5896              : 
    5897            0 :         if let Some(waiter) = waiter {
    5898            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    5899              :         } else {
    5900            0 :             tracing::info!("Migration is a no-op");
    5901              :         }
    5902              : 
    5903            0 :         Ok(TenantShardMigrateResponse {})
    5904            0 :     }
    5905              : 
    5906              :     /// 'cancel' in this context means cancel any ongoing reconcile
    5907            0 :     pub(crate) async fn tenant_shard_cancel_reconcile(
    5908            0 :         &self,
    5909            0 :         tenant_shard_id: TenantShardId,
    5910            0 :     ) -> Result<(), ApiError> {
    5911              :         // Take state lock and fire the cancellation token, after which we drop lock and wait for any ongoing reconcile to complete
    5912            0 :         let waiter = {
    5913            0 :             let locked = self.inner.write().unwrap();
    5914            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    5915            0 :                 return Err(ApiError::NotFound(
    5916            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    5917            0 :                 ));
    5918              :             };
    5919              : 
    5920            0 :             let waiter = shard.get_waiter();
    5921            0 :             match waiter {
    5922              :                 None => {
    5923            0 :                     tracing::info!("Shard does not have an ongoing Reconciler");
    5924            0 :                     return Ok(());
    5925              :                 }
    5926            0 :                 Some(waiter) => {
    5927            0 :                     tracing::info!("Cancelling Reconciler");
    5928            0 :                     shard.cancel_reconciler();
    5929            0 :                     waiter
    5930            0 :                 }
    5931            0 :             }
    5932            0 :         };
    5933            0 : 
    5934            0 :         // Cancellation should be prompt.  If this fails we have still done our job of firing the
    5935            0 :         // cancellation token, but by returning an ApiError we will indicate to the caller that
    5936            0 :         // the Reconciler is misbehaving and not respecting the cancellation token
    5937            0 :         self.await_waiters(vec![waiter], SHORT_RECONCILE_TIMEOUT)
    5938            0 :             .await?;
    5939              : 
    5940            0 :         Ok(())
    5941            0 :     }
    5942              : 
    5943              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    5944              :     /// detaching or deleting it on pageservers.
    5945            0 :     pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
    5946            0 :         self.persistence.delete_tenant(tenant_id).await?;
    5947              : 
    5948            0 :         let mut locked = self.inner.write().unwrap();
    5949            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    5950            0 :         let mut shards = Vec::new();
    5951            0 :         for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
    5952            0 :             shards.push(*tenant_shard_id);
    5953            0 :         }
    5954              : 
    5955            0 :         for shard_id in shards {
    5956            0 :             if let Some(mut shard) = tenants.remove(&shard_id) {
    5957            0 :                 shard.intent.clear(scheduler);
    5958            0 :             }
    5959              :         }
    5960              : 
    5961            0 :         Ok(())
    5962            0 :     }
    5963              : 
    5964              :     /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
    5965              :     /// tenant with a very high generation number so that it will see the existing data.
    5966            0 :     pub(crate) async fn tenant_import(
    5967            0 :         &self,
    5968            0 :         tenant_id: TenantId,
    5969            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    5970            0 :         // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
    5971            0 :         let maybe_node = {
    5972            0 :             self.inner
    5973            0 :                 .read()
    5974            0 :                 .unwrap()
    5975            0 :                 .nodes
    5976            0 :                 .values()
    5977            0 :                 .find(|n| n.is_available())
    5978            0 :                 .cloned()
    5979              :         };
    5980            0 :         let Some(node) = maybe_node else {
    5981            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
    5982              :         };
    5983              : 
    5984            0 :         let client = PageserverClient::new(
    5985            0 :             node.get_id(),
    5986            0 :             self.http_client.clone(),
    5987            0 :             node.base_url(),
    5988            0 :             self.config.pageserver_jwt_token.as_deref(),
    5989            0 :         );
    5990              : 
    5991            0 :         let scan_result = client
    5992            0 :             .tenant_scan_remote_storage(tenant_id)
    5993            0 :             .await
    5994            0 :             .map_err(|e| passthrough_api_error(&node, e))?;
    5995              : 
    5996              :         // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
    5997            0 :         let Some(shard_count) = scan_result
    5998            0 :             .shards
    5999            0 :             .iter()
    6000            0 :             .map(|s| s.tenant_shard_id.shard_count)
    6001            0 :             .max()
    6002              :         else {
    6003            0 :             return Err(ApiError::NotFound(
    6004            0 :                 anyhow::anyhow!("No shards found").into(),
    6005            0 :             ));
    6006              :         };
    6007              : 
    6008              :         // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
    6009              :         // to
    6010            0 :         let generation = scan_result
    6011            0 :             .shards
    6012            0 :             .iter()
    6013            0 :             .map(|s| s.generation)
    6014            0 :             .max()
    6015            0 :             .expect("We already validated >0 shards");
    6016            0 : 
    6017            0 :         // FIXME: we have no way to recover the shard stripe size from contents of remote storage: this will
    6018            0 :         // only work if they were using the default stripe size.
    6019            0 :         let stripe_size = ShardParameters::DEFAULT_STRIPE_SIZE;
    6020              : 
    6021            0 :         let (response, waiters) = self
    6022            0 :             .do_tenant_create(TenantCreateRequest {
    6023            0 :                 new_tenant_id: TenantShardId::unsharded(tenant_id),
    6024            0 :                 generation,
    6025            0 : 
    6026            0 :                 shard_parameters: ShardParameters {
    6027            0 :                     count: shard_count,
    6028            0 :                     stripe_size,
    6029            0 :                 },
    6030            0 :                 placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
    6031            0 :                 config: TenantConfig::default(),
    6032            0 :             })
    6033            0 :             .await?;
    6034              : 
    6035            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    6036              :             // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
    6037              :             // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
    6038              :             // reconcile, as reconciliation includes notifying compute.
    6039            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
    6040            0 :         }
    6041              : 
    6042            0 :         Ok(response)
    6043            0 :     }
    6044              : 
    6045              :     /// For debug/support: a full JSON dump of TenantShards.  Returns a response so that
    6046              :     /// we don't have to make TenantShard clonable in the return path.
    6047            0 :     pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    6048            0 :         let serialized = {
    6049            0 :             let locked = self.inner.read().unwrap();
    6050            0 :             let result = locked.tenants.values().collect::<Vec<_>>();
    6051            0 :             serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
    6052              :         };
    6053              : 
    6054            0 :         hyper::Response::builder()
    6055            0 :             .status(hyper::StatusCode::OK)
    6056            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    6057            0 :             .body(hyper::Body::from(serialized))
    6058            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    6059            0 :     }
    6060              : 
    6061              :     /// Check the consistency of in-memory state vs. persistent state, and check that the
    6062              :     /// scheduler's statistics are up to date.
    6063              :     ///
    6064              :     /// These consistency checks expect an **idle** system.  If changes are going on while
    6065              :     /// we run, then we can falsely indicate a consistency issue.  This is sufficient for end-of-test
    6066              :     /// checks, but not suitable for running continuously in the background in the field.
    6067            0 :     pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
    6068            0 :         let (mut expect_nodes, mut expect_shards) = {
    6069            0 :             let locked = self.inner.read().unwrap();
    6070            0 : 
    6071            0 :             locked
    6072            0 :                 .scheduler
    6073            0 :                 .consistency_check(locked.nodes.values(), locked.tenants.values())
    6074            0 :                 .context("Scheduler checks")
    6075            0 :                 .map_err(ApiError::InternalServerError)?;
    6076              : 
    6077            0 :             let expect_nodes = locked
    6078            0 :                 .nodes
    6079            0 :                 .values()
    6080            0 :                 .map(|n| n.to_persistent())
    6081            0 :                 .collect::<Vec<_>>();
    6082            0 : 
    6083            0 :             let expect_shards = locked
    6084            0 :                 .tenants
    6085            0 :                 .values()
    6086            0 :                 .map(|t| t.to_persistent())
    6087            0 :                 .collect::<Vec<_>>();
    6088              : 
    6089              :             // This method can only validate the state of an idle system: if a reconcile is in
    6090              :             // progress, fail out early to avoid giving false errors on state that won't match
    6091              :             // between database and memory under a ReconcileResult is processed.
    6092            0 :             for t in locked.tenants.values() {
    6093            0 :                 if t.reconciler.is_some() {
    6094            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6095            0 :                         "Shard {} reconciliation in progress",
    6096            0 :                         t.tenant_shard_id
    6097            0 :                     )));
    6098            0 :                 }
    6099              :             }
    6100              : 
    6101            0 :             (expect_nodes, expect_shards)
    6102              :         };
    6103              : 
    6104            0 :         let mut nodes = self.persistence.list_nodes().await?;
    6105            0 :         expect_nodes.sort_by_key(|n| n.node_id);
    6106            0 :         nodes.sort_by_key(|n| n.node_id);
    6107              : 
    6108              :         // Errors relating to nodes are deferred so that we don't skip the shard checks below if we have a node error
    6109            0 :         let node_result = if nodes != expect_nodes {
    6110            0 :             tracing::error!("Consistency check failed on nodes.");
    6111            0 :             tracing::error!(
    6112            0 :                 "Nodes in memory: {}",
    6113            0 :                 serde_json::to_string(&expect_nodes)
    6114            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6115              :             );
    6116            0 :             tracing::error!(
    6117            0 :                 "Nodes in database: {}",
    6118            0 :                 serde_json::to_string(&nodes)
    6119            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6120              :             );
    6121            0 :             Err(ApiError::InternalServerError(anyhow::anyhow!(
    6122            0 :                 "Node consistency failure"
    6123            0 :             )))
    6124              :         } else {
    6125            0 :             Ok(())
    6126              :         };
    6127              : 
    6128            0 :         let mut persistent_shards = self.persistence.load_active_tenant_shards().await?;
    6129            0 :         persistent_shards
    6130            0 :             .sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    6131            0 : 
    6132            0 :         expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    6133              : 
    6134              :         // Because JSON contents of persistent tenants might disagree with the fields in current `TenantConfig`
    6135              :         // definition, we will do an encode/decode cycle to ensure any legacy fields are dropped and any new
    6136              :         // fields are added, before doing a comparison.
    6137            0 :         for tsp in &mut persistent_shards {
    6138            0 :             let config: TenantConfig = serde_json::from_str(&tsp.config)
    6139            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?;
    6140            0 :             tsp.config = serde_json::to_string(&config).expect("Encoding config is infallible");
    6141              :         }
    6142              : 
    6143            0 :         if persistent_shards != expect_shards {
    6144            0 :             tracing::error!("Consistency check failed on shards.");
    6145              : 
    6146            0 :             tracing::error!(
    6147            0 :                 "Shards in memory: {}",
    6148            0 :                 serde_json::to_string(&expect_shards)
    6149            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6150              :             );
    6151            0 :             tracing::error!(
    6152            0 :                 "Shards in database: {}",
    6153            0 :                 serde_json::to_string(&persistent_shards)
    6154            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    6155              :             );
    6156              : 
    6157              :             // The total dump log lines above are useful in testing but in the field grafana will
    6158              :             // usually just drop them because they're so large. So we also do some explicit logging
    6159              :             // of just the diffs.
    6160            0 :             let persistent_shards = persistent_shards
    6161            0 :                 .into_iter()
    6162            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    6163            0 :                 .collect::<HashMap<_, _>>();
    6164            0 :             let expect_shards = expect_shards
    6165            0 :                 .into_iter()
    6166            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    6167            0 :                 .collect::<HashMap<_, _>>();
    6168            0 :             for (tenant_shard_id, persistent_tsp) in &persistent_shards {
    6169            0 :                 match expect_shards.get(tenant_shard_id) {
    6170              :                     None => {
    6171            0 :                         tracing::error!(
    6172            0 :                             "Shard {} found in database but not in memory",
    6173              :                             tenant_shard_id
    6174              :                         );
    6175              :                     }
    6176            0 :                     Some(expect_tsp) => {
    6177            0 :                         if expect_tsp != persistent_tsp {
    6178            0 :                             tracing::error!(
    6179            0 :                                 "Shard {} is inconsistent.  In memory: {}, database has: {}",
    6180            0 :                                 tenant_shard_id,
    6181            0 :                                 serde_json::to_string(expect_tsp).unwrap(),
    6182            0 :                                 serde_json::to_string(&persistent_tsp).unwrap()
    6183              :                             );
    6184            0 :                         }
    6185              :                     }
    6186              :                 }
    6187              :             }
    6188              : 
    6189              :             // Having already logged any differences, log any shards that simply aren't present in the database
    6190            0 :             for (tenant_shard_id, memory_tsp) in &expect_shards {
    6191            0 :                 if !persistent_shards.contains_key(tenant_shard_id) {
    6192            0 :                     tracing::error!(
    6193            0 :                         "Shard {} found in memory but not in database: {}",
    6194            0 :                         tenant_shard_id,
    6195            0 :                         serde_json::to_string(memory_tsp)
    6196            0 :                             .map_err(|e| ApiError::InternalServerError(e.into()))?
    6197              :                     );
    6198            0 :                 }
    6199              :             }
    6200              : 
    6201            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6202            0 :                 "Shard consistency failure"
    6203            0 :             )));
    6204            0 :         }
    6205            0 : 
    6206            0 :         node_result
    6207            0 :     }
    6208              : 
    6209              :     /// For debug/support: a JSON dump of the [`Scheduler`].  Returns a response so that
    6210              :     /// we don't have to make TenantShard clonable in the return path.
    6211            0 :     pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    6212            0 :         let serialized = {
    6213            0 :             let locked = self.inner.read().unwrap();
    6214            0 :             serde_json::to_string(&locked.scheduler)
    6215            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?
    6216              :         };
    6217              : 
    6218            0 :         hyper::Response::builder()
    6219            0 :             .status(hyper::StatusCode::OK)
    6220            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    6221            0 :             .body(hyper::Body::from(serialized))
    6222            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    6223            0 :     }
    6224              : 
    6225              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    6226              :     /// detaching or deleting it on pageservers.  We do not try and re-schedule any
    6227              :     /// tenants that were on this node.
    6228            0 :     pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
    6229            0 :         self.persistence.delete_node(node_id).await?;
    6230              : 
    6231            0 :         let mut locked = self.inner.write().unwrap();
    6232              : 
    6233            0 :         for shard in locked.tenants.values_mut() {
    6234            0 :             shard.deref_node(node_id);
    6235            0 :             shard.observed.locations.remove(&node_id);
    6236            0 :         }
    6237              : 
    6238            0 :         let mut nodes = (*locked.nodes).clone();
    6239            0 :         nodes.remove(&node_id);
    6240            0 :         locked.nodes = Arc::new(nodes);
    6241            0 :         metrics::METRICS_REGISTRY
    6242            0 :             .metrics_group
    6243            0 :             .storage_controller_pageserver_nodes
    6244            0 :             .set(locked.nodes.len() as i64);
    6245            0 : 
    6246            0 :         locked.scheduler.node_remove(node_id);
    6247            0 : 
    6248            0 :         Ok(())
    6249            0 :     }
    6250              : 
    6251              :     /// If a node has any work on it, it will be rescheduled: this is "clean" in the sense
    6252              :     /// that we don't leave any bad state behind in the storage controller, but unclean
    6253              :     /// in the sense that we are not carefully draining the node.
    6254            0 :     pub(crate) async fn node_delete(&self, node_id: NodeId) -> Result<(), ApiError> {
    6255            0 :         let _node_lock =
    6256            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Delete).await;
    6257              : 
    6258              :         // 1. Atomically update in-memory state:
    6259              :         //    - set the scheduling state to Pause to make subsequent scheduling ops skip it
    6260              :         //    - update shards' intents to exclude the node, and reschedule any shards whose intents we modified.
    6261              :         //    - drop the node from the main nodes map, so that when running reconciles complete they do not
    6262              :         //      re-insert references to this node into the ObservedState of shards
    6263              :         //    - drop the node from the scheduler
    6264              :         {
    6265            0 :             let mut locked = self.inner.write().unwrap();
    6266            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6267            0 : 
    6268            0 :             {
    6269            0 :                 let mut nodes_mut = (*nodes).deref().clone();
    6270            0 :                 match nodes_mut.get_mut(&node_id) {
    6271            0 :                     Some(node) => {
    6272            0 :                         // We do not bother setting this in the database, because we're about to delete the row anyway, and
    6273            0 :                         // if we crash it would not be desirable to leave the node paused after a restart.
    6274            0 :                         node.set_scheduling(NodeSchedulingPolicy::Pause);
    6275            0 :                     }
    6276              :                     None => {
    6277            0 :                         tracing::info!(
    6278            0 :                             "Node not found: presuming this is a retry and returning success"
    6279              :                         );
    6280            0 :                         return Ok(());
    6281              :                     }
    6282              :                 }
    6283              : 
    6284            0 :                 *nodes = Arc::new(nodes_mut);
    6285              :             }
    6286              : 
    6287            0 :             for (_tenant_id, mut schedule_context, shards) in
    6288            0 :                 TenantShardContextIterator::new(tenants, ScheduleMode::Normal)
    6289              :             {
    6290            0 :                 for shard in shards {
    6291            0 :                     if shard.deref_node(node_id) {
    6292            0 :                         if let Err(e) = shard.schedule(scheduler, &mut schedule_context) {
    6293              :                             // TODO: implement force flag to remove a node even if we can't reschedule
    6294              :                             // a tenant
    6295            0 :                             tracing::error!(
    6296            0 :                                 "Refusing to delete node, shard {} can't be rescheduled: {e}",
    6297              :                                 shard.tenant_shard_id
    6298              :                             );
    6299            0 :                             return Err(e.into());
    6300              :                         } else {
    6301            0 :                             tracing::info!(
    6302            0 :                                 "Rescheduled shard {} away from node during deletion",
    6303              :                                 shard.tenant_shard_id
    6304              :                             )
    6305              :                         }
    6306              : 
    6307            0 :                         self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    6308            0 :                     }
    6309              : 
    6310              :                     // Here we remove an existing observed location for the node we're removing, and it will
    6311              :                     // not be re-added by a reconciler's completion because we filter out removed nodes in
    6312              :                     // process_result.
    6313              :                     //
    6314              :                     // Note that we update the shard's observed state _after_ calling maybe_reconcile_shard: that
    6315              :                     // means any reconciles we spawned will know about the node we're deleting, enabling them
    6316              :                     // to do live migrations if it's still online.
    6317            0 :                     shard.observed.locations.remove(&node_id);
    6318              :                 }
    6319              :             }
    6320              : 
    6321            0 :             scheduler.node_remove(node_id);
    6322            0 : 
    6323            0 :             {
    6324            0 :                 let mut nodes_mut = (**nodes).clone();
    6325            0 :                 if let Some(mut removed_node) = nodes_mut.remove(&node_id) {
    6326            0 :                     // Ensure that any reconciler holding an Arc<> to this node will
    6327            0 :                     // drop out when trying to RPC to it (setting Offline state sets the
    6328            0 :                     // cancellation token on the Node object).
    6329            0 :                     removed_node.set_availability(NodeAvailability::Offline);
    6330            0 :                 }
    6331            0 :                 *nodes = Arc::new(nodes_mut);
    6332            0 :                 metrics::METRICS_REGISTRY
    6333            0 :                     .metrics_group
    6334            0 :                     .storage_controller_pageserver_nodes
    6335            0 :                     .set(nodes.len() as i64);
    6336            0 :             }
    6337            0 :         }
    6338            0 : 
    6339            0 :         // Note: some `generation_pageserver` columns on tenant shards in the database may still refer to
    6340            0 :         // the removed node, as this column means "The pageserver to which this generation was issued", and
    6341            0 :         // their generations won't get updated until the reconcilers moving them away from this node complete.
    6342            0 :         // That is safe because in Service::spawn we only use generation_pageserver if it refers to a node
    6343            0 :         // that exists.
    6344            0 : 
    6345            0 :         // 2. Actually delete the node from the database and from in-memory state
    6346            0 :         tracing::info!("Deleting node from database");
    6347            0 :         self.persistence.delete_node(node_id).await?;
    6348              : 
    6349            0 :         Ok(())
    6350            0 :     }
    6351              : 
    6352            0 :     pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
    6353            0 :         let nodes = {
    6354            0 :             self.inner
    6355            0 :                 .read()
    6356            0 :                 .unwrap()
    6357            0 :                 .nodes
    6358            0 :                 .values()
    6359            0 :                 .cloned()
    6360            0 :                 .collect::<Vec<_>>()
    6361            0 :         };
    6362            0 : 
    6363            0 :         Ok(nodes)
    6364            0 :     }
    6365              : 
    6366            0 :     pub(crate) async fn get_node(&self, node_id: NodeId) -> Result<Node, ApiError> {
    6367            0 :         self.inner
    6368            0 :             .read()
    6369            0 :             .unwrap()
    6370            0 :             .nodes
    6371            0 :             .get(&node_id)
    6372            0 :             .cloned()
    6373            0 :             .ok_or(ApiError::NotFound(
    6374            0 :                 format!("Node {node_id} not registered").into(),
    6375            0 :             ))
    6376            0 :     }
    6377              : 
    6378            0 :     pub(crate) async fn get_node_shards(
    6379            0 :         &self,
    6380            0 :         node_id: NodeId,
    6381            0 :     ) -> Result<NodeShardResponse, ApiError> {
    6382            0 :         let locked = self.inner.read().unwrap();
    6383            0 :         let mut shards = Vec::new();
    6384            0 :         for (tid, tenant) in locked.tenants.iter() {
    6385            0 :             let is_intended_secondary = match (
    6386            0 :                 tenant.intent.get_attached() == &Some(node_id),
    6387            0 :                 tenant.intent.get_secondary().contains(&node_id),
    6388            0 :             ) {
    6389              :                 (true, true) => {
    6390            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6391            0 :                         "{} attached as primary+secondary on the same node",
    6392            0 :                         tid
    6393            0 :                     )));
    6394              :                 }
    6395            0 :                 (true, false) => Some(false),
    6396            0 :                 (false, true) => Some(true),
    6397            0 :                 (false, false) => None,
    6398              :             };
    6399            0 :             let is_observed_secondary = if let Some(ObservedStateLocation { conf: Some(conf) }) =
    6400            0 :                 tenant.observed.locations.get(&node_id)
    6401              :             {
    6402            0 :                 Some(conf.secondary_conf.is_some())
    6403              :             } else {
    6404            0 :                 None
    6405              :             };
    6406            0 :             if is_intended_secondary.is_some() || is_observed_secondary.is_some() {
    6407            0 :                 shards.push(NodeShard {
    6408            0 :                     tenant_shard_id: *tid,
    6409            0 :                     is_intended_secondary,
    6410            0 :                     is_observed_secondary,
    6411            0 :                 });
    6412            0 :             }
    6413              :         }
    6414            0 :         Ok(NodeShardResponse { node_id, shards })
    6415            0 :     }
    6416              : 
    6417            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
    6418            0 :         self.persistence.get_leader().await
    6419            0 :     }
    6420              : 
    6421            0 :     pub(crate) async fn node_register(
    6422            0 :         &self,
    6423            0 :         register_req: NodeRegisterRequest,
    6424            0 :     ) -> Result<(), ApiError> {
    6425            0 :         let _node_lock = trace_exclusive_lock(
    6426            0 :             &self.node_op_locks,
    6427            0 :             register_req.node_id,
    6428            0 :             NodeOperations::Register,
    6429            0 :         )
    6430            0 :         .await;
    6431              : 
    6432              :         #[derive(PartialEq)]
    6433              :         enum RegistrationStatus {
    6434              :             UpToDate,
    6435              :             NeedUpdate,
    6436              :             Mismatched,
    6437              :             New,
    6438              :         }
    6439              : 
    6440            0 :         let registration_status = {
    6441            0 :             let locked = self.inner.read().unwrap();
    6442            0 :             if let Some(node) = locked.nodes.get(&register_req.node_id) {
    6443            0 :                 if node.registration_match(&register_req) {
    6444            0 :                     if node.need_update(&register_req) {
    6445            0 :                         RegistrationStatus::NeedUpdate
    6446              :                     } else {
    6447            0 :                         RegistrationStatus::UpToDate
    6448              :                     }
    6449              :                 } else {
    6450            0 :                     RegistrationStatus::Mismatched
    6451              :                 }
    6452              :             } else {
    6453            0 :                 RegistrationStatus::New
    6454              :             }
    6455              :         };
    6456              : 
    6457            0 :         match registration_status {
    6458              :             RegistrationStatus::UpToDate => {
    6459            0 :                 tracing::info!(
    6460            0 :                     "Node {} re-registered with matching address and is up to date",
    6461              :                     register_req.node_id
    6462              :                 );
    6463              : 
    6464            0 :                 return Ok(());
    6465              :             }
    6466              :             RegistrationStatus::Mismatched => {
    6467              :                 // TODO: decide if we want to allow modifying node addresses without removing and re-adding
    6468              :                 // the node.  Safest/simplest thing is to refuse it, and usually we deploy with
    6469              :                 // a fixed address through the lifetime of a node.
    6470            0 :                 tracing::warn!(
    6471            0 :                     "Node {} tried to register with different address",
    6472              :                     register_req.node_id
    6473              :                 );
    6474            0 :                 return Err(ApiError::Conflict(
    6475            0 :                     "Node is already registered with different address".to_string(),
    6476            0 :                 ));
    6477              :             }
    6478            0 :             RegistrationStatus::New | RegistrationStatus::NeedUpdate => {
    6479            0 :                 // fallthrough
    6480            0 :             }
    6481            0 :         }
    6482            0 : 
    6483            0 :         // We do not require that a node is actually online when registered (it will start life
    6484            0 :         // with it's  availability set to Offline), but we _do_ require that its DNS record exists. We're
    6485            0 :         // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
    6486            0 :         // that register themselves with a broken DNS config.  We check only the HTTP hostname, because
    6487            0 :         // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
    6488            0 :         if tokio::net::lookup_host(format!(
    6489            0 :             "{}:{}",
    6490            0 :             register_req.listen_http_addr, register_req.listen_http_port
    6491            0 :         ))
    6492            0 :         .await
    6493            0 :         .is_err()
    6494              :         {
    6495              :             // If we have a transient DNS issue, it's up to the caller to retry their registration.  Because
    6496              :             // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
    6497              :             // we return a soft 503 error, to encourage callers to retry past transient issues.
    6498            0 :             return Err(ApiError::ResourceUnavailable(
    6499            0 :                 format!(
    6500            0 :                     "Node {} tried to register with unknown DNS name '{}'",
    6501            0 :                     register_req.node_id, register_req.listen_http_addr
    6502            0 :                 )
    6503            0 :                 .into(),
    6504            0 :             ));
    6505            0 :         }
    6506            0 : 
    6507            0 :         if self.config.use_https_pageserver_api && register_req.listen_https_port.is_none() {
    6508            0 :             return Err(ApiError::PreconditionFailed(
    6509            0 :                 format!(
    6510            0 :                     "Node {} has no https port, but use_https is enabled",
    6511            0 :                     register_req.node_id
    6512            0 :                 )
    6513            0 :                 .into(),
    6514            0 :             ));
    6515            0 :         }
    6516            0 : 
    6517            0 :         // Ordering: we must persist the new node _before_ adding it to in-memory state.
    6518            0 :         // This ensures that before we use it for anything or expose it via any external
    6519            0 :         // API, it is guaranteed to be available after a restart.
    6520            0 :         let new_node = Node::new(
    6521            0 :             register_req.node_id,
    6522            0 :             register_req.listen_http_addr,
    6523            0 :             register_req.listen_http_port,
    6524            0 :             register_req.listen_https_port,
    6525            0 :             register_req.listen_pg_addr,
    6526            0 :             register_req.listen_pg_port,
    6527            0 :             register_req.availability_zone_id.clone(),
    6528            0 :             self.config.use_https_pageserver_api,
    6529            0 :         );
    6530            0 :         let new_node = match new_node {
    6531            0 :             Ok(new_node) => new_node,
    6532            0 :             Err(error) => return Err(ApiError::InternalServerError(error)),
    6533              :         };
    6534              : 
    6535            0 :         match registration_status {
    6536            0 :             RegistrationStatus::New => self.persistence.insert_node(&new_node).await?,
    6537              :             RegistrationStatus::NeedUpdate => {
    6538            0 :                 self.persistence
    6539            0 :                     .update_node_on_registration(
    6540            0 :                         register_req.node_id,
    6541            0 :                         register_req.listen_https_port,
    6542            0 :                     )
    6543            0 :                     .await?
    6544              :             }
    6545            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    6546              :         }
    6547              : 
    6548            0 :         let mut locked = self.inner.write().unwrap();
    6549            0 :         let mut new_nodes = (*locked.nodes).clone();
    6550            0 : 
    6551            0 :         locked.scheduler.node_upsert(&new_node);
    6552            0 :         new_nodes.insert(register_req.node_id, new_node);
    6553            0 : 
    6554            0 :         locked.nodes = Arc::new(new_nodes);
    6555            0 : 
    6556            0 :         metrics::METRICS_REGISTRY
    6557            0 :             .metrics_group
    6558            0 :             .storage_controller_pageserver_nodes
    6559            0 :             .set(locked.nodes.len() as i64);
    6560            0 : 
    6561            0 :         match registration_status {
    6562              :             RegistrationStatus::New => {
    6563            0 :                 tracing::info!(
    6564            0 :                     "Registered pageserver {} ({}), now have {} pageservers",
    6565            0 :                     register_req.node_id,
    6566            0 :                     register_req.availability_zone_id,
    6567            0 :                     locked.nodes.len()
    6568              :                 );
    6569              :             }
    6570              :             RegistrationStatus::NeedUpdate => {
    6571            0 :                 tracing::info!(
    6572            0 :                     "Re-registered and updated node {} ({})",
    6573              :                     register_req.node_id,
    6574              :                     register_req.availability_zone_id,
    6575              :                 );
    6576              :             }
    6577            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    6578              :         }
    6579            0 :         Ok(())
    6580            0 :     }
    6581              : 
    6582              :     /// Configure in-memory and persistent state of a node as requested
    6583              :     ///
    6584              :     /// Note that this function does not trigger any immediate side effects in response
    6585              :     /// to the changes. That part is handled by [`Self::handle_node_availability_transition`].
    6586            0 :     async fn node_state_configure(
    6587            0 :         &self,
    6588            0 :         node_id: NodeId,
    6589            0 :         availability: Option<NodeAvailability>,
    6590            0 :         scheduling: Option<NodeSchedulingPolicy>,
    6591            0 :         node_lock: &TracingExclusiveGuard<NodeOperations>,
    6592            0 :     ) -> Result<AvailabilityTransition, ApiError> {
    6593            0 :         if let Some(scheduling) = scheduling {
    6594              :             // Scheduling is a persistent part of Node: we must write updates to the database before
    6595              :             // applying them in memory
    6596            0 :             self.persistence
    6597            0 :                 .update_node_scheduling_policy(node_id, scheduling)
    6598            0 :                 .await?;
    6599            0 :         }
    6600              : 
    6601              :         // If we're activating a node, then before setting it active we must reconcile any shard locations
    6602              :         // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
    6603              :         // by calling [`Self::node_activate_reconcile`]
    6604              :         //
    6605              :         // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
    6606              :         // nothing else can mutate its availability while we run.
    6607            0 :         let availability_transition = if let Some(input_availability) = availability.as_ref() {
    6608            0 :             let (activate_node, availability_transition) = {
    6609            0 :                 let locked = self.inner.read().unwrap();
    6610            0 :                 let Some(node) = locked.nodes.get(&node_id) else {
    6611            0 :                     return Err(ApiError::NotFound(
    6612            0 :                         anyhow::anyhow!("Node {} not registered", node_id).into(),
    6613            0 :                     ));
    6614              :                 };
    6615              : 
    6616            0 :                 (
    6617            0 :                     node.clone(),
    6618            0 :                     node.get_availability_transition(input_availability),
    6619            0 :                 )
    6620              :             };
    6621              : 
    6622            0 :             if matches!(availability_transition, AvailabilityTransition::ToActive) {
    6623            0 :                 self.node_activate_reconcile(activate_node, node_lock)
    6624            0 :                     .await?;
    6625            0 :             }
    6626            0 :             availability_transition
    6627              :         } else {
    6628            0 :             AvailabilityTransition::Unchanged
    6629              :         };
    6630              : 
    6631              :         // Apply changes from the request to our in-memory state for the Node
    6632            0 :         let mut locked = self.inner.write().unwrap();
    6633            0 :         let (nodes, _tenants, scheduler) = locked.parts_mut();
    6634            0 : 
    6635            0 :         let mut new_nodes = (**nodes).clone();
    6636              : 
    6637            0 :         let Some(node) = new_nodes.get_mut(&node_id) else {
    6638            0 :             return Err(ApiError::NotFound(
    6639            0 :                 anyhow::anyhow!("Node not registered").into(),
    6640            0 :             ));
    6641              :         };
    6642              : 
    6643            0 :         if let Some(availability) = availability {
    6644            0 :             node.set_availability(availability);
    6645            0 :         }
    6646              : 
    6647            0 :         if let Some(scheduling) = scheduling {
    6648            0 :             node.set_scheduling(scheduling);
    6649            0 :         }
    6650              : 
    6651              :         // Update the scheduler, in case the elegibility of the node for new shards has changed
    6652            0 :         scheduler.node_upsert(node);
    6653            0 : 
    6654            0 :         let new_nodes = Arc::new(new_nodes);
    6655            0 :         locked.nodes = new_nodes;
    6656            0 : 
    6657            0 :         Ok(availability_transition)
    6658            0 :     }
    6659              : 
    6660              :     /// Handle availability transition of one node
    6661              :     ///
    6662              :     /// Note that you should first call [`Self::node_state_configure`] to update
    6663              :     /// the in-memory state referencing that node. If you need to handle more than one transition
    6664              :     /// consider using [`Self::handle_node_availability_transitions`].
    6665            0 :     async fn handle_node_availability_transition(
    6666            0 :         &self,
    6667            0 :         node_id: NodeId,
    6668            0 :         transition: AvailabilityTransition,
    6669            0 :         _node_lock: &TracingExclusiveGuard<NodeOperations>,
    6670            0 :     ) -> Result<(), ApiError> {
    6671            0 :         // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
    6672            0 :         match transition {
    6673              :             AvailabilityTransition::ToOffline => {
    6674            0 :                 tracing::info!("Node {} transition to offline", node_id);
    6675              : 
    6676            0 :                 let mut locked = self.inner.write().unwrap();
    6677            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    6678            0 : 
    6679            0 :                 let mut tenants_affected: usize = 0;
    6680              : 
    6681            0 :                 for (_tenant_id, mut schedule_context, shards) in
    6682            0 :                     TenantShardContextIterator::new(tenants, ScheduleMode::Normal)
    6683              :                 {
    6684            0 :                     for tenant_shard in shards {
    6685            0 :                         let tenant_shard_id = tenant_shard.tenant_shard_id;
    6686            0 :                         if let Some(observed_loc) =
    6687            0 :                             tenant_shard.observed.locations.get_mut(&node_id)
    6688            0 :                         {
    6689            0 :                             // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
    6690            0 :                             // not assume our knowledge of the node's configuration is accurate until it comes back online
    6691            0 :                             observed_loc.conf = None;
    6692            0 :                         }
    6693              : 
    6694            0 :                         if nodes.len() == 1 {
    6695              :                             // Special case for single-node cluster: there is no point trying to reschedule
    6696              :                             // any tenant shards: avoid doing so, in order to avoid spewing warnings about
    6697              :                             // failures to schedule them.
    6698            0 :                             continue;
    6699            0 :                         }
    6700            0 : 
    6701            0 :                         if !nodes
    6702            0 :                             .values()
    6703            0 :                             .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    6704              :                         {
    6705              :                             // Special case for when all nodes are unavailable and/or unschedulable: there is no point
    6706              :                             // trying to reschedule since there's nowhere else to go. Without this
    6707              :                             // branch we incorrectly detach tenants in response to node unavailability.
    6708            0 :                             continue;
    6709            0 :                         }
    6710            0 : 
    6711            0 :                         if tenant_shard.intent.demote_attached(scheduler, node_id) {
    6712            0 :                             tenant_shard.sequence = tenant_shard.sequence.next();
    6713            0 : 
    6714            0 :                             match tenant_shard.schedule(scheduler, &mut schedule_context) {
    6715            0 :                                 Err(e) => {
    6716            0 :                                     // It is possible that some tenants will become unschedulable when too many pageservers
    6717            0 :                                     // go offline: in this case there isn't much we can do other than make the issue observable.
    6718            0 :                                     // TODO: give TenantShard a scheduling error attribute to be queried later.
    6719            0 :                                     tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
    6720              :                                 }
    6721              :                                 Ok(()) => {
    6722            0 :                                     if self
    6723            0 :                                         .maybe_reconcile_shard(
    6724            0 :                                             tenant_shard,
    6725            0 :                                             nodes,
    6726            0 :                                             ReconcilerPriority::Normal,
    6727            0 :                                         )
    6728            0 :                                         .is_some()
    6729            0 :                                     {
    6730            0 :                                         tenants_affected += 1;
    6731            0 :                                     };
    6732              :                                 }
    6733              :                             }
    6734            0 :                         }
    6735              :                     }
    6736              :                 }
    6737            0 :                 tracing::info!(
    6738            0 :                     "Launched {} reconciler tasks for tenants affected by node {} going offline",
    6739              :                     tenants_affected,
    6740              :                     node_id
    6741              :                 )
    6742              :             }
    6743              :             AvailabilityTransition::ToActive => {
    6744            0 :                 tracing::info!("Node {} transition to active", node_id);
    6745              : 
    6746            0 :                 let mut locked = self.inner.write().unwrap();
    6747            0 :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    6748              : 
    6749              :                 // When a node comes back online, we must reconcile any tenant that has a None observed
    6750              :                 // location on the node.
    6751            0 :                 for tenant_shard in tenants.values_mut() {
    6752              :                     // If a reconciliation is already in progress, rely on the previous scheduling
    6753              :                     // decision and skip triggering a new reconciliation.
    6754            0 :                     if tenant_shard.reconciler.is_some() {
    6755            0 :                         continue;
    6756            0 :                     }
    6757              : 
    6758            0 :                     if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
    6759            0 :                         if observed_loc.conf.is_none() {
    6760            0 :                             self.maybe_reconcile_shard(
    6761            0 :                                 tenant_shard,
    6762            0 :                                 nodes,
    6763            0 :                                 ReconcilerPriority::Normal,
    6764            0 :                             );
    6765            0 :                         }
    6766            0 :                     }
    6767              :                 }
    6768              : 
    6769              :                 // TODO: in the background, we should balance work back onto this pageserver
    6770              :             }
    6771              :             // No action required for the intermediate unavailable state.
    6772              :             // When we transition into active or offline from the unavailable state,
    6773              :             // the correct handling above will kick in.
    6774              :             AvailabilityTransition::ToWarmingUpFromActive => {
    6775            0 :                 tracing::info!("Node {} transition to unavailable from active", node_id);
    6776              :             }
    6777              :             AvailabilityTransition::ToWarmingUpFromOffline => {
    6778            0 :                 tracing::info!("Node {} transition to unavailable from offline", node_id);
    6779              :             }
    6780              :             AvailabilityTransition::Unchanged => {
    6781            0 :                 tracing::debug!("Node {} no availability change during config", node_id);
    6782              :             }
    6783              :         }
    6784              : 
    6785            0 :         Ok(())
    6786            0 :     }
    6787              : 
    6788              :     /// Handle availability transition for multiple nodes
    6789              :     ///
    6790              :     /// Note that you should first call [`Self::node_state_configure`] for
    6791              :     /// all nodes being handled here for the handling to use fresh in-memory state.
    6792            0 :     async fn handle_node_availability_transitions(
    6793            0 :         &self,
    6794            0 :         transitions: Vec<(
    6795            0 :             NodeId,
    6796            0 :             TracingExclusiveGuard<NodeOperations>,
    6797            0 :             AvailabilityTransition,
    6798            0 :         )>,
    6799            0 :     ) -> Result<(), Vec<(NodeId, ApiError)>> {
    6800            0 :         let mut errors = Vec::default();
    6801            0 :         for (node_id, node_lock, transition) in transitions {
    6802            0 :             let res = self
    6803            0 :                 .handle_node_availability_transition(node_id, transition, &node_lock)
    6804            0 :                 .await;
    6805            0 :             if let Err(err) = res {
    6806            0 :                 errors.push((node_id, err));
    6807            0 :             }
    6808              :         }
    6809              : 
    6810            0 :         if errors.is_empty() {
    6811            0 :             Ok(())
    6812              :         } else {
    6813            0 :             Err(errors)
    6814              :         }
    6815            0 :     }
    6816              : 
    6817            0 :     pub(crate) async fn node_configure(
    6818            0 :         &self,
    6819            0 :         node_id: NodeId,
    6820            0 :         availability: Option<NodeAvailability>,
    6821            0 :         scheduling: Option<NodeSchedulingPolicy>,
    6822            0 :     ) -> Result<(), ApiError> {
    6823            0 :         let node_lock =
    6824            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
    6825              : 
    6826            0 :         let transition = self
    6827            0 :             .node_state_configure(node_id, availability, scheduling, &node_lock)
    6828            0 :             .await?;
    6829            0 :         self.handle_node_availability_transition(node_id, transition, &node_lock)
    6830            0 :             .await
    6831            0 :     }
    6832              : 
    6833              :     /// Wrapper around [`Self::node_configure`] which only allows changes while there is no ongoing
    6834              :     /// operation for HTTP api.
    6835            0 :     pub(crate) async fn external_node_configure(
    6836            0 :         &self,
    6837            0 :         node_id: NodeId,
    6838            0 :         availability: Option<NodeAvailability>,
    6839            0 :         scheduling: Option<NodeSchedulingPolicy>,
    6840            0 :     ) -> Result<(), ApiError> {
    6841            0 :         {
    6842            0 :             let locked = self.inner.read().unwrap();
    6843            0 :             if let Some(op) = locked.ongoing_operation.as_ref().map(|op| op.operation) {
    6844            0 :                 return Err(ApiError::PreconditionFailed(
    6845            0 :                     format!("Ongoing background operation forbids configuring: {op}").into(),
    6846            0 :                 ));
    6847            0 :             }
    6848            0 :         }
    6849            0 : 
    6850            0 :         self.node_configure(node_id, availability, scheduling).await
    6851            0 :     }
    6852              : 
    6853            0 :     pub(crate) async fn start_node_drain(
    6854            0 :         self: &Arc<Self>,
    6855            0 :         node_id: NodeId,
    6856            0 :     ) -> Result<(), ApiError> {
    6857            0 :         let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = {
    6858            0 :             let locked = self.inner.read().unwrap();
    6859            0 :             let nodes = &locked.nodes;
    6860            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    6861            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    6862            0 :             ))?;
    6863            0 :             let schedulable_nodes_count = nodes
    6864            0 :                 .iter()
    6865            0 :                 .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    6866            0 :                 .count();
    6867            0 : 
    6868            0 :             (
    6869            0 :                 locked
    6870            0 :                     .ongoing_operation
    6871            0 :                     .as_ref()
    6872            0 :                     .map(|ongoing| ongoing.operation),
    6873            0 :                 node.is_available(),
    6874            0 :                 node.get_scheduling(),
    6875            0 :                 schedulable_nodes_count,
    6876            0 :             )
    6877            0 :         };
    6878              : 
    6879            0 :         if let Some(ongoing) = ongoing_op {
    6880            0 :             return Err(ApiError::PreconditionFailed(
    6881            0 :                 format!("Background operation already ongoing for node: {}", ongoing).into(),
    6882            0 :             ));
    6883            0 :         }
    6884            0 : 
    6885            0 :         if !node_available {
    6886            0 :             return Err(ApiError::ResourceUnavailable(
    6887            0 :                 format!("Node {node_id} is currently unavailable").into(),
    6888            0 :             ));
    6889            0 :         }
    6890            0 : 
    6891            0 :         if schedulable_nodes_count == 0 {
    6892            0 :             return Err(ApiError::PreconditionFailed(
    6893            0 :                 "No other schedulable nodes to drain to".into(),
    6894            0 :             ));
    6895            0 :         }
    6896            0 : 
    6897            0 :         match node_policy {
    6898              :             NodeSchedulingPolicy::Active => {
    6899            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining))
    6900            0 :                     .await?;
    6901              : 
    6902            0 :                 let cancel = self.cancel.child_token();
    6903            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    6904              : 
    6905            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    6906            0 :                     operation: Operation::Drain(Drain { node_id }),
    6907            0 :                     cancel: cancel.clone(),
    6908            0 :                 });
    6909              : 
    6910            0 :                 let span = tracing::info_span!(parent: None, "drain_node", %node_id);
    6911              : 
    6912            0 :                 tokio::task::spawn({
    6913            0 :                     let service = self.clone();
    6914            0 :                     let cancel = cancel.clone();
    6915            0 :                     async move {
    6916            0 :                         let _gate_guard = gate_guard;
    6917            0 : 
    6918            0 :                         scopeguard::defer! {
    6919            0 :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    6920            0 : 
    6921            0 :                             if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) {
    6922            0 :                                 assert_eq!(removed_drain.node_id, node_id, "We always take the same operation");
    6923            0 :                             } else {
    6924            0 :                                 panic!("We always remove the same operation")
    6925            0 :                             }
    6926            0 :                         }
    6927            0 : 
    6928            0 :                         tracing::info!("Drain background operation starting");
    6929            0 :                         let res = service.drain_node(node_id, cancel).await;
    6930            0 :                         match res {
    6931              :                             Ok(()) => {
    6932            0 :                                 tracing::info!("Drain background operation completed successfully");
    6933              :                             }
    6934              :                             Err(OperationError::Cancelled) => {
    6935            0 :                                 tracing::info!("Drain background operation was cancelled");
    6936              :                             }
    6937            0 :                             Err(err) => {
    6938            0 :                                 tracing::error!("Drain background operation encountered: {err}")
    6939              :                             }
    6940              :                         }
    6941            0 :                     }
    6942            0 :                 }.instrument(span));
    6943            0 :             }
    6944              :             NodeSchedulingPolicy::Draining => {
    6945            0 :                 return Err(ApiError::Conflict(format!(
    6946            0 :                     "Node {node_id} has drain in progress"
    6947            0 :                 )));
    6948              :             }
    6949            0 :             policy => {
    6950            0 :                 return Err(ApiError::PreconditionFailed(
    6951            0 :                     format!("Node {node_id} cannot be drained due to {policy:?} policy").into(),
    6952            0 :                 ));
    6953              :             }
    6954              :         }
    6955              : 
    6956            0 :         Ok(())
    6957            0 :     }
    6958              : 
    6959            0 :     pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> {
    6960            0 :         let node_available = {
    6961            0 :             let locked = self.inner.read().unwrap();
    6962            0 :             let nodes = &locked.nodes;
    6963            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    6964            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    6965            0 :             ))?;
    6966              : 
    6967            0 :             node.is_available()
    6968            0 :         };
    6969            0 : 
    6970            0 :         if !node_available {
    6971            0 :             return Err(ApiError::ResourceUnavailable(
    6972            0 :                 format!("Node {node_id} is currently unavailable").into(),
    6973            0 :             ));
    6974            0 :         }
    6975              : 
    6976            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    6977            0 :             if let Operation::Drain(drain) = op_handler.operation {
    6978            0 :                 if drain.node_id == node_id {
    6979            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    6980            0 :                     op_handler.cancel.cancel();
    6981            0 :                     return Ok(());
    6982            0 :                 }
    6983            0 :             }
    6984            0 :         }
    6985              : 
    6986            0 :         Err(ApiError::PreconditionFailed(
    6987            0 :             format!("Node {node_id} has no drain in progress").into(),
    6988            0 :         ))
    6989            0 :     }
    6990              : 
    6991            0 :     pub(crate) async fn start_node_fill(self: &Arc<Self>, node_id: NodeId) -> Result<(), ApiError> {
    6992            0 :         let (ongoing_op, node_available, node_policy, total_nodes_count) = {
    6993            0 :             let locked = self.inner.read().unwrap();
    6994            0 :             let nodes = &locked.nodes;
    6995            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    6996            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    6997            0 :             ))?;
    6998              : 
    6999            0 :             (
    7000            0 :                 locked
    7001            0 :                     .ongoing_operation
    7002            0 :                     .as_ref()
    7003            0 :                     .map(|ongoing| ongoing.operation),
    7004            0 :                 node.is_available(),
    7005            0 :                 node.get_scheduling(),
    7006            0 :                 nodes.len(),
    7007            0 :             )
    7008            0 :         };
    7009              : 
    7010            0 :         if let Some(ongoing) = ongoing_op {
    7011            0 :             return Err(ApiError::PreconditionFailed(
    7012            0 :                 format!("Background operation already ongoing for node: {}", ongoing).into(),
    7013            0 :             ));
    7014            0 :         }
    7015            0 : 
    7016            0 :         if !node_available {
    7017            0 :             return Err(ApiError::ResourceUnavailable(
    7018            0 :                 format!("Node {node_id} is currently unavailable").into(),
    7019            0 :             ));
    7020            0 :         }
    7021            0 : 
    7022            0 :         if total_nodes_count <= 1 {
    7023            0 :             return Err(ApiError::PreconditionFailed(
    7024            0 :                 "No other nodes to fill from".into(),
    7025            0 :             ));
    7026            0 :         }
    7027            0 : 
    7028            0 :         match node_policy {
    7029              :             NodeSchedulingPolicy::Active => {
    7030            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling))
    7031            0 :                     .await?;
    7032              : 
    7033            0 :                 let cancel = self.cancel.child_token();
    7034            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    7035              : 
    7036            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    7037            0 :                     operation: Operation::Fill(Fill { node_id }),
    7038            0 :                     cancel: cancel.clone(),
    7039            0 :                 });
    7040              : 
    7041            0 :                 let span = tracing::info_span!(parent: None, "fill_node", %node_id);
    7042              : 
    7043            0 :                 tokio::task::spawn({
    7044            0 :                     let service = self.clone();
    7045            0 :                     let cancel = cancel.clone();
    7046            0 :                     async move {
    7047            0 :                         let _gate_guard = gate_guard;
    7048            0 : 
    7049            0 :                         scopeguard::defer! {
    7050            0 :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    7051            0 : 
    7052            0 :                             if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) {
    7053            0 :                                 assert_eq!(removed_fill.node_id, node_id, "We always take the same operation");
    7054            0 :                             } else {
    7055            0 :                                 panic!("We always remove the same operation")
    7056            0 :                             }
    7057            0 :                         }
    7058            0 : 
    7059            0 :                         tracing::info!("Fill background operation starting");
    7060            0 :                         let res = service.fill_node(node_id, cancel).await;
    7061            0 :                         match res {
    7062              :                             Ok(()) => {
    7063            0 :                                 tracing::info!("Fill background operation completed successfully");
    7064              :                             }
    7065              :                             Err(OperationError::Cancelled) => {
    7066            0 :                                 tracing::info!("Fill background operation was cancelled");
    7067              :                             }
    7068            0 :                             Err(err) => {
    7069            0 :                                 tracing::error!("Fill background operation encountered: {err}")
    7070              :                             }
    7071              :                         }
    7072            0 :                     }
    7073            0 :                 }.instrument(span));
    7074            0 :             }
    7075              :             NodeSchedulingPolicy::Filling => {
    7076            0 :                 return Err(ApiError::Conflict(format!(
    7077            0 :                     "Node {node_id} has fill in progress"
    7078            0 :                 )));
    7079              :             }
    7080            0 :             policy => {
    7081            0 :                 return Err(ApiError::PreconditionFailed(
    7082            0 :                     format!("Node {node_id} cannot be filled due to {policy:?} policy").into(),
    7083            0 :                 ));
    7084              :             }
    7085              :         }
    7086              : 
    7087            0 :         Ok(())
    7088            0 :     }
    7089              : 
    7090            0 :     pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> {
    7091            0 :         let node_available = {
    7092            0 :             let locked = self.inner.read().unwrap();
    7093            0 :             let nodes = &locked.nodes;
    7094            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    7095            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    7096            0 :             ))?;
    7097              : 
    7098            0 :             node.is_available()
    7099            0 :         };
    7100            0 : 
    7101            0 :         if !node_available {
    7102            0 :             return Err(ApiError::ResourceUnavailable(
    7103            0 :                 format!("Node {node_id} is currently unavailable").into(),
    7104            0 :             ));
    7105            0 :         }
    7106              : 
    7107            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    7108            0 :             if let Operation::Fill(fill) = op_handler.operation {
    7109            0 :                 if fill.node_id == node_id {
    7110            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    7111            0 :                     op_handler.cancel.cancel();
    7112            0 :                     return Ok(());
    7113            0 :                 }
    7114            0 :             }
    7115            0 :         }
    7116              : 
    7117            0 :         Err(ApiError::PreconditionFailed(
    7118            0 :             format!("Node {node_id} has no fill in progress").into(),
    7119            0 :         ))
    7120            0 :     }
    7121              : 
    7122              :     /// Like [`Self::maybe_configured_reconcile_shard`], but uses the default reconciler
    7123              :     /// configuration
    7124            0 :     fn maybe_reconcile_shard(
    7125            0 :         &self,
    7126            0 :         shard: &mut TenantShard,
    7127            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    7128            0 :         priority: ReconcilerPriority,
    7129            0 :     ) -> Option<ReconcilerWaiter> {
    7130            0 :         self.maybe_configured_reconcile_shard(shard, nodes, ReconcilerConfig::new(priority))
    7131            0 :     }
    7132              : 
    7133              :     /// Before constructing a Reconciler, acquire semaphore units from the appropriate concurrency limit (depends on priority)
    7134            0 :     fn get_reconciler_units(
    7135            0 :         &self,
    7136            0 :         priority: ReconcilerPriority,
    7137            0 :     ) -> Result<ReconcileUnits, TryAcquireError> {
    7138            0 :         let units = match priority {
    7139            0 :             ReconcilerPriority::Normal => self.reconciler_concurrency.clone().try_acquire_owned(),
    7140              :             ReconcilerPriority::High => {
    7141            0 :                 match self
    7142            0 :                     .priority_reconciler_concurrency
    7143            0 :                     .clone()
    7144            0 :                     .try_acquire_owned()
    7145              :                 {
    7146            0 :                     Ok(u) => Ok(u),
    7147              :                     Err(TryAcquireError::NoPermits) => {
    7148              :                         // If the high priority semaphore is exhausted, then high priority tasks may steal units from
    7149              :                         // the normal priority semaphore.
    7150            0 :                         self.reconciler_concurrency.clone().try_acquire_owned()
    7151              :                     }
    7152            0 :                     Err(e) => Err(e),
    7153              :                 }
    7154              :             }
    7155              :         };
    7156              : 
    7157            0 :         units.map(ReconcileUnits::new)
    7158            0 :     }
    7159              : 
    7160              :     /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
    7161            0 :     fn maybe_configured_reconcile_shard(
    7162            0 :         &self,
    7163            0 :         shard: &mut TenantShard,
    7164            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    7165            0 :         reconciler_config: ReconcilerConfig,
    7166            0 :     ) -> Option<ReconcilerWaiter> {
    7167            0 :         let reconcile_needed = shard.get_reconcile_needed(nodes);
    7168              : 
    7169            0 :         let reconcile_reason = match reconcile_needed {
    7170            0 :             ReconcileNeeded::No => return None,
    7171            0 :             ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
    7172            0 :             ReconcileNeeded::Yes(reason) => {
    7173            0 :                 // Fall through to try and acquire units for spawning reconciler
    7174            0 :                 reason
    7175              :             }
    7176              :         };
    7177              : 
    7178            0 :         let units = match self.get_reconciler_units(reconciler_config.priority) {
    7179            0 :             Ok(u) => u,
    7180              :             Err(_) => {
    7181            0 :                 tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
    7182            0 :                     "Concurrency limited: enqueued for reconcile later");
    7183            0 :                 if !shard.delayed_reconcile {
    7184            0 :                     match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
    7185            0 :                         Err(TrySendError::Closed(_)) => {
    7186            0 :                             // Weird mid-shutdown case?
    7187            0 :                         }
    7188              :                         Err(TrySendError::Full(_)) => {
    7189              :                             // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
    7190            0 :                             tracing::warn!(
    7191            0 :                                 "Many shards are waiting to reconcile: delayed_reconcile queue is full"
    7192              :                             );
    7193              :                         }
    7194            0 :                         Ok(()) => {
    7195            0 :                             shard.delayed_reconcile = true;
    7196            0 :                         }
    7197              :                     }
    7198            0 :                 }
    7199              : 
    7200              :                 // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
    7201              :                 // number to advance.  When this function is eventually called again and succeeds in getting units,
    7202              :                 // it will spawn a reconciler that makes this waiter complete.
    7203            0 :                 return Some(shard.future_reconcile_waiter());
    7204              :             }
    7205              :         };
    7206              : 
    7207            0 :         let Ok(gate_guard) = self.reconcilers_gate.enter() else {
    7208              :             // Gate closed: we're shutting down, drop out.
    7209            0 :             return None;
    7210              :         };
    7211              : 
    7212            0 :         shard.spawn_reconciler(
    7213            0 :             reconcile_reason,
    7214            0 :             &self.result_tx,
    7215            0 :             nodes,
    7216            0 :             &self.compute_hook,
    7217            0 :             reconciler_config,
    7218            0 :             &self.config,
    7219            0 :             &self.persistence,
    7220            0 :             units,
    7221            0 :             gate_guard,
    7222            0 :             &self.reconcilers_cancel,
    7223            0 :             self.http_client.clone(),
    7224            0 :         )
    7225            0 :     }
    7226              : 
    7227              :     /// Check all tenants for pending reconciliation work, and reconcile those in need.
    7228              :     /// Additionally, reschedule tenants that require it.
    7229              :     ///
    7230              :     /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
    7231              :     /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
    7232              :     /// available.  A return value of 0 indicates that everything is fully reconciled already.
    7233            0 :     fn reconcile_all(&self) -> usize {
    7234            0 :         let mut locked = self.inner.write().unwrap();
    7235            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    7236            0 :         let pageservers = nodes.clone();
    7237            0 : 
    7238            0 :         // This function is an efficient place to update lazy statistics, since we are walking
    7239            0 :         // all tenants.
    7240            0 :         let mut pending_reconciles = 0;
    7241            0 :         let mut az_violations = 0;
    7242            0 : 
    7243            0 :         // If we find any tenants to drop from memory, stash them to offload after
    7244            0 :         // we're done traversing the map of tenants.
    7245            0 :         let mut drop_detached_tenants = Vec::new();
    7246            0 : 
    7247            0 :         let mut reconciles_spawned = 0;
    7248            0 :         for shard in tenants.values_mut() {
    7249              :             // Accumulate scheduling statistics
    7250            0 :             if let (Some(attached), Some(preferred)) =
    7251            0 :                 (shard.intent.get_attached(), shard.preferred_az())
    7252              :             {
    7253            0 :                 let node_az = nodes
    7254            0 :                     .get(attached)
    7255            0 :                     .expect("Nodes exist if referenced")
    7256            0 :                     .get_availability_zone_id();
    7257            0 :                 if node_az != preferred {
    7258            0 :                     az_violations += 1;
    7259            0 :                 }
    7260            0 :             }
    7261              : 
    7262              :             // Skip checking if this shard is already enqueued for reconciliation
    7263            0 :             if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
    7264              :                 // If there is something delayed, then return a nonzero count so that
    7265              :                 // callers like reconcile_all_now do not incorrectly get the impression
    7266              :                 // that the system is in a quiescent state.
    7267            0 :                 reconciles_spawned = std::cmp::max(1, reconciles_spawned);
    7268            0 :                 pending_reconciles += 1;
    7269            0 :                 continue;
    7270            0 :             }
    7271            0 : 
    7272            0 :             // Eventual consistency: if an earlier reconcile job failed, and the shard is still
    7273            0 :             // dirty, spawn another rone
    7274            0 :             if self
    7275            0 :                 .maybe_reconcile_shard(shard, &pageservers, ReconcilerPriority::Normal)
    7276            0 :                 .is_some()
    7277            0 :             {
    7278            0 :                 reconciles_spawned += 1;
    7279            0 :             } else if shard.delayed_reconcile {
    7280            0 :                 // Shard wanted to reconcile but for some reason couldn't.
    7281            0 :                 pending_reconciles += 1;
    7282            0 :             }
    7283              : 
    7284              :             // If this tenant is detached, try dropping it from memory. This is usually done
    7285              :             // proactively in [`Self::process_results`], but we do it here to handle the edge
    7286              :             // case where a reconcile completes while someone else is holding an op lock for the tenant.
    7287            0 :             if shard.tenant_shard_id.shard_number == ShardNumber(0)
    7288            0 :                 && shard.policy == PlacementPolicy::Detached
    7289              :             {
    7290            0 :                 if let Some(guard) = self.tenant_op_locks.try_exclusive(
    7291            0 :                     shard.tenant_shard_id.tenant_id,
    7292            0 :                     TenantOperations::DropDetached,
    7293            0 :                 ) {
    7294            0 :                     drop_detached_tenants.push((shard.tenant_shard_id.tenant_id, guard));
    7295            0 :                 }
    7296            0 :             }
    7297              :         }
    7298              : 
    7299              :         // Some metrics are calculated from SchedulerNode state, update these periodically
    7300            0 :         scheduler.update_metrics();
    7301              : 
    7302              :         // Process any deferred tenant drops
    7303            0 :         for (tenant_id, guard) in drop_detached_tenants {
    7304            0 :             self.maybe_drop_tenant(tenant_id, &mut locked, &guard);
    7305            0 :         }
    7306              : 
    7307            0 :         metrics::METRICS_REGISTRY
    7308            0 :             .metrics_group
    7309            0 :             .storage_controller_schedule_az_violation
    7310            0 :             .set(az_violations as i64);
    7311            0 : 
    7312            0 :         metrics::METRICS_REGISTRY
    7313            0 :             .metrics_group
    7314            0 :             .storage_controller_pending_reconciles
    7315            0 :             .set(pending_reconciles as i64);
    7316            0 : 
    7317            0 :         reconciles_spawned
    7318            0 :     }
    7319              : 
    7320              :     /// `optimize` in this context means identifying shards which have valid scheduled locations, but
    7321              :     /// could be scheduled somewhere better:
    7322              :     /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
    7323              :     ///    * e.g. after a node fails then recovers, to move some work back to it
    7324              :     /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
    7325              :     ///    * e.g. after a shard split, the initial attached locations will all be on the node where
    7326              :     ///      we did the split, but are probably better placed elsewhere.
    7327              :     /// - Creating new secondary locations if it improves the spreading of a sharded tenant
    7328              :     ///    * e.g. after a shard split, some locations will be on the same node (where the split
    7329              :     ///      happened), and will probably be better placed elsewhere.
    7330              :     ///
    7331              :     /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
    7332              :     /// the time of scheduling, this function looks for cases where a better-scoring location is available
    7333              :     /// according to those same soft constraints.
    7334            0 :     async fn optimize_all(&self) -> usize {
    7335              :         // Limit on how many shards' optmizations each call to this function will execute.  Combined
    7336              :         // with the frequency of background calls, this acts as an implicit rate limit that runs a small
    7337              :         // trickle of optimizations in the background, rather than executing a large number in parallel
    7338              :         // when a change occurs.
    7339              :         const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 16;
    7340              : 
    7341              :         // Synchronous prepare: scan shards for possible scheduling optimizations
    7342            0 :         let candidate_work = self.optimize_all_plan();
    7343            0 :         let candidate_work_len = candidate_work.len();
    7344              : 
    7345              :         // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
    7346            0 :         let validated_work = self.optimize_all_validate(candidate_work).await;
    7347              : 
    7348            0 :         let was_work_filtered = validated_work.len() != candidate_work_len;
    7349            0 : 
    7350            0 :         // Synchronous apply: update the shards' intent states according to validated optimisations
    7351            0 :         let mut reconciles_spawned = 0;
    7352            0 :         let mut optimizations_applied = 0;
    7353            0 :         let mut locked = self.inner.write().unwrap();
    7354            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    7355            0 :         for (tenant_shard_id, optimization) in validated_work {
    7356            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    7357              :                 // Shard was dropped between planning and execution;
    7358            0 :                 continue;
    7359              :             };
    7360            0 :             tracing::info!(tenant_shard_id=%tenant_shard_id, "Applying optimization: {optimization:?}");
    7361            0 :             if shard.apply_optimization(scheduler, optimization) {
    7362            0 :                 optimizations_applied += 1;
    7363            0 :                 if self
    7364            0 :                     .maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal)
    7365            0 :                     .is_some()
    7366            0 :                 {
    7367            0 :                     reconciles_spawned += 1;
    7368            0 :                 }
    7369            0 :             }
    7370              : 
    7371            0 :             if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
    7372            0 :                 break;
    7373            0 :             }
    7374              :         }
    7375              : 
    7376            0 :         if was_work_filtered {
    7377            0 :             // If we filtered any work out during validation, ensure we return a nonzero value to indicate
    7378            0 :             // to callers that the system is not in a truly quiet state, it's going to do some work as soon
    7379            0 :             // as these validations start passing.
    7380            0 :             reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
    7381            0 :         }
    7382              : 
    7383            0 :         reconciles_spawned
    7384            0 :     }
    7385              : 
    7386            0 :     fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
    7387              :         // How many candidate optimizations we will generate, before evaluating them for readniess: setting
    7388              :         // this higher than the execution limit gives us a chance to execute some work even if the first
    7389              :         // few optimizations we find are not ready.
    7390              :         const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 64;
    7391              : 
    7392            0 :         let mut work = Vec::new();
    7393            0 :         let mut locked = self.inner.write().unwrap();
    7394            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    7395              : 
    7396              :         // We are going to plan a bunch of optimisations before applying any of them, so the
    7397              :         // utilisation stats on nodes will be effectively stale for the >1st optimisation we
    7398              :         // generate.  To avoid this causing unstable migrations/flapping, it's important that the
    7399              :         // code in TenantShard for finding optimisations uses [`NodeAttachmentSchedulingScore::disregard_utilization`]
    7400              :         // to ignore the utilisation component of the score.
    7401              : 
    7402            0 :         for (_tenant_id, schedule_context, shards) in
    7403            0 :             TenantShardContextIterator::new(tenants, ScheduleMode::Speculative)
    7404              :         {
    7405            0 :             for shard in shards {
    7406            0 :                 if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
    7407            0 :                     break;
    7408            0 :                 }
    7409            0 :                 match shard.get_scheduling_policy() {
    7410            0 :                     ShardSchedulingPolicy::Active => {
    7411            0 :                         // Ok to do optimization
    7412            0 :                     }
    7413            0 :                     ShardSchedulingPolicy::Essential if shard.get_preferred_node().is_some() => {
    7414            0 :                         // Ok to do optimization: we are executing a graceful migration that
    7415            0 :                         // has set preferred_node
    7416            0 :                     }
    7417              :                     ShardSchedulingPolicy::Essential
    7418              :                     | ShardSchedulingPolicy::Pause
    7419              :                     | ShardSchedulingPolicy::Stop => {
    7420              :                         // Policy prevents optimizing this shard.
    7421            0 :                         continue;
    7422              :                     }
    7423              :                 }
    7424              : 
    7425            0 :                 if !matches!(shard.splitting, SplitState::Idle)
    7426            0 :                     || matches!(shard.policy, PlacementPolicy::Detached)
    7427            0 :                     || shard.reconciler.is_some()
    7428              :                 {
    7429              :                     // Do not start any optimizations while another change to the tenant is ongoing: this
    7430              :                     // is not necessary for correctness, but simplifies operations and implicitly throttles
    7431              :                     // optimization changes to happen in a "trickle" over time.
    7432            0 :                     continue;
    7433            0 :                 }
    7434            0 : 
    7435            0 :                 // Fast path: we may quickly identify shards that don't have any possible optimisations
    7436            0 :                 if !shard.maybe_optimizable(scheduler, &schedule_context) {
    7437            0 :                     if cfg!(feature = "testing") {
    7438              :                         // Check that maybe_optimizable doesn't disagree with the actual optimization functions.
    7439              :                         // Only do this in testing builds because it is not a correctness-critical check, so we shouldn't
    7440              :                         // panic in prod if we hit this, or spend cycles on it in prod.
    7441            0 :                         assert!(
    7442            0 :                             shard
    7443            0 :                                 .optimize_attachment(scheduler, &schedule_context)
    7444            0 :                                 .is_none()
    7445            0 :                         );
    7446            0 :                         assert!(
    7447            0 :                             shard
    7448            0 :                                 .optimize_secondary(scheduler, &schedule_context)
    7449            0 :                                 .is_none()
    7450            0 :                         );
    7451            0 :                     }
    7452            0 :                     continue;
    7453            0 :                 }
    7454              : 
    7455            0 :                 if let Some(optimization) =
    7456              :                     // If idle, maybe optimize attachments: if a shard has a secondary location that is preferable to
    7457              :                     // its primary location based on soft constraints, cut it over.
    7458            0 :                     shard.optimize_attachment(scheduler, &schedule_context)
    7459              :                 {
    7460            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for attachment: {optimization:?}");
    7461            0 :                     work.push((shard.tenant_shard_id, optimization));
    7462            0 :                     break;
    7463            0 :                 } else if let Some(optimization) =
    7464              :                     // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
    7465              :                     // better placed on another node, based on ScheduleContext, then adjust it.  This
    7466              :                     // covers cases like after a shard split, where we might have too many shards
    7467              :                     // in the same tenant with secondary locations on the node where they originally split.
    7468            0 :                     shard.optimize_secondary(scheduler, &schedule_context)
    7469              :                 {
    7470            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for secondary: {optimization:?}");
    7471            0 :                     work.push((shard.tenant_shard_id, optimization));
    7472            0 :                     break;
    7473            0 :                 }
    7474              :             }
    7475              :         }
    7476              : 
    7477            0 :         work
    7478            0 :     }
    7479              : 
    7480            0 :     async fn optimize_all_validate(
    7481            0 :         &self,
    7482            0 :         candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
    7483            0 :     ) -> Vec<(TenantShardId, ScheduleOptimization)> {
    7484            0 :         // Take a clone of the node map to use outside the lock in async validation phase
    7485            0 :         let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
    7486            0 : 
    7487            0 :         let mut want_secondary_status = Vec::new();
    7488            0 : 
    7489            0 :         // Validate our plans: this is an async phase where we may do I/O to pageservers to
    7490            0 :         // check that the state of locations is acceptable to run the optimization, such as
    7491            0 :         // checking that a secondary location is sufficiently warmed-up to cleanly cut over
    7492            0 :         // in a live migration.
    7493            0 :         let mut validated_work = Vec::new();
    7494            0 :         for (tenant_shard_id, optimization) in candidate_work {
    7495            0 :             match optimization.action {
    7496              :                 ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
    7497              :                     old_attached_node_id: _,
    7498            0 :                     new_attached_node_id,
    7499            0 :                 }) => {
    7500            0 :                     match validation_nodes.get(&new_attached_node_id) {
    7501            0 :                         None => {
    7502            0 :                             // Node was dropped between planning and validation
    7503            0 :                         }
    7504            0 :                         Some(node) => {
    7505            0 :                             if !node.is_available() {
    7506            0 :                                 tracing::info!(
    7507            0 :                                     "Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable"
    7508              :                                 );
    7509            0 :                             } else {
    7510            0 :                                 // Accumulate optimizations that require fetching secondary status, so that we can execute these
    7511            0 :                                 // remote API requests concurrently.
    7512            0 :                                 want_secondary_status.push((
    7513            0 :                                     tenant_shard_id,
    7514            0 :                                     node.clone(),
    7515            0 :                                     optimization,
    7516            0 :                                 ));
    7517            0 :                             }
    7518              :                         }
    7519              :                     }
    7520              :                 }
    7521              :                 ScheduleOptimizationAction::ReplaceSecondary(_)
    7522              :                 | ScheduleOptimizationAction::CreateSecondary(_)
    7523              :                 | ScheduleOptimizationAction::RemoveSecondary(_) => {
    7524              :                     // No extra checks needed to manage secondaries: this does not interrupt client access
    7525            0 :                     validated_work.push((tenant_shard_id, optimization))
    7526              :                 }
    7527              :             };
    7528              :         }
    7529              : 
    7530              :         // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
    7531              :         // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
    7532              :         // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
    7533            0 :         let results = self
    7534            0 :             .tenant_for_shards_api(
    7535            0 :                 want_secondary_status
    7536            0 :                     .iter()
    7537            0 :                     .map(|i| (i.0, i.1.clone()))
    7538            0 :                     .collect(),
    7539            0 :                 |tenant_shard_id, client| async move {
    7540            0 :                     client.tenant_secondary_status(tenant_shard_id).await
    7541            0 :                 },
    7542            0 :                 1,
    7543            0 :                 1,
    7544            0 :                 SHORT_RECONCILE_TIMEOUT,
    7545            0 :                 &self.cancel,
    7546            0 :             )
    7547            0 :             .await;
    7548              : 
    7549            0 :         for ((tenant_shard_id, node, optimization), secondary_status) in
    7550            0 :             want_secondary_status.into_iter().zip(results.into_iter())
    7551              :         {
    7552            0 :             match secondary_status {
    7553            0 :                 Err(e) => {
    7554            0 :                     tracing::info!(
    7555            0 :                         "Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}"
    7556              :                     );
    7557              :                 }
    7558            0 :                 Ok(progress) => {
    7559              :                     // We require secondary locations to have less than 10GiB of downloads pending before we will use
    7560              :                     // them in an optimization
    7561              :                     const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
    7562              : 
    7563            0 :                     if progress.heatmap_mtime.is_none()
    7564            0 :                         || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
    7565            0 :                             && progress.bytes_downloaded != progress.bytes_total
    7566            0 :                         || progress.bytes_total - progress.bytes_downloaded
    7567            0 :                             > DOWNLOAD_FRESHNESS_THRESHOLD
    7568              :                     {
    7569            0 :                         tracing::info!(
    7570            0 :                             "Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}"
    7571              :                         );
    7572              : 
    7573              :                         #[cfg(feature = "testing")]
    7574            0 :                         if progress.heatmap_mtime.is_none() {
    7575              :                             // No heatmap might mean the attached location has never uploaded one, or that
    7576              :                             // the secondary download hasn't happened yet.  This is relatively unusual in the field,
    7577              :                             // but fairly common in tests.
    7578            0 :                             self.kick_secondary_download(tenant_shard_id).await;
    7579            0 :                         }
    7580              :                     } else {
    7581              :                         // Location looks ready: proceed
    7582            0 :                         tracing::info!(
    7583            0 :                             "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
    7584              :                         );
    7585            0 :                         validated_work.push((tenant_shard_id, optimization))
    7586              :                     }
    7587              :                 }
    7588              :             }
    7589              :         }
    7590              : 
    7591            0 :         validated_work
    7592            0 :     }
    7593              : 
    7594              :     /// Some aspects of scheduling optimisation wait for secondary locations to be warm.  This
    7595              :     /// happens on multi-minute timescales in the field, which is fine because optimisation is meant
    7596              :     /// to be a lazy background thing. However, when testing, it is not practical to wait around, so
    7597              :     /// we have this helper to move things along faster.
    7598              :     #[cfg(feature = "testing")]
    7599            0 :     async fn kick_secondary_download(&self, tenant_shard_id: TenantShardId) {
    7600            0 :         let (attached_node, secondaries) = {
    7601            0 :             let locked = self.inner.read().unwrap();
    7602            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    7603            0 :                 tracing::warn!(
    7604            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: not found"
    7605              :                 );
    7606            0 :                 return;
    7607              :             };
    7608              : 
    7609            0 :             let Some(attached) = shard.intent.get_attached() else {
    7610            0 :                 tracing::warn!(
    7611            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: no attached"
    7612              :                 );
    7613            0 :                 return;
    7614              :             };
    7615              : 
    7616            0 :             let secondaries = shard
    7617            0 :                 .intent
    7618            0 :                 .get_secondary()
    7619            0 :                 .iter()
    7620            0 :                 .map(|n| locked.nodes.get(n).unwrap().clone())
    7621            0 :                 .collect::<Vec<_>>();
    7622            0 : 
    7623            0 :             (locked.nodes.get(attached).unwrap().clone(), secondaries)
    7624            0 :         };
    7625            0 : 
    7626            0 :         // Make remote API calls to upload + download heatmaps: we ignore errors because this is just
    7627            0 :         // a 'kick' to let scheduling optimisation run more promptly.
    7628            0 :         match attached_node
    7629            0 :             .with_client_retries(
    7630            0 :                 |client| async move { client.tenant_heatmap_upload(tenant_shard_id).await },
    7631            0 :                 &self.http_client,
    7632            0 :                 &self.config.pageserver_jwt_token,
    7633            0 :                 3,
    7634            0 :                 10,
    7635            0 :                 SHORT_RECONCILE_TIMEOUT,
    7636            0 :                 &self.cancel,
    7637            0 :             )
    7638            0 :             .await
    7639              :         {
    7640            0 :             Some(Err(e)) => {
    7641            0 :                 tracing::info!(
    7642            0 :                     "Failed to upload heatmap from {attached_node} for {tenant_shard_id}: {e}"
    7643              :                 );
    7644              :             }
    7645              :             None => {
    7646            0 :                 tracing::info!(
    7647            0 :                     "Cancelled while uploading heatmap from {attached_node} for {tenant_shard_id}"
    7648              :                 );
    7649              :             }
    7650              :             Some(Ok(_)) => {
    7651            0 :                 tracing::info!(
    7652            0 :                     "Successfully uploaded heatmap from {attached_node} for {tenant_shard_id}"
    7653              :                 );
    7654              :             }
    7655              :         }
    7656              : 
    7657            0 :         for secondary_node in secondaries {
    7658            0 :             match secondary_node
    7659            0 :                 .with_client_retries(
    7660            0 :                     |client| async move {
    7661            0 :                         client
    7662            0 :                             .tenant_secondary_download(
    7663            0 :                                 tenant_shard_id,
    7664            0 :                                 Some(Duration::from_secs(1)),
    7665            0 :                             )
    7666            0 :                             .await
    7667            0 :                     },
    7668            0 :                     &self.http_client,
    7669            0 :                     &self.config.pageserver_jwt_token,
    7670            0 :                     3,
    7671            0 :                     10,
    7672            0 :                     SHORT_RECONCILE_TIMEOUT,
    7673            0 :                     &self.cancel,
    7674            0 :                 )
    7675            0 :                 .await
    7676              :             {
    7677            0 :                 Some(Err(e)) => {
    7678            0 :                     tracing::info!(
    7679            0 :                         "Failed to download heatmap from {secondary_node} for {tenant_shard_id}: {e}"
    7680              :                     );
    7681              :                 }
    7682              :                 None => {
    7683            0 :                     tracing::info!(
    7684            0 :                         "Cancelled while downloading heatmap from {secondary_node} for {tenant_shard_id}"
    7685              :                     );
    7686              :                 }
    7687            0 :                 Some(Ok(progress)) => {
    7688            0 :                     tracing::info!(
    7689            0 :                         "Successfully downloaded heatmap from {secondary_node} for {tenant_shard_id}: {progress:?}"
    7690              :                     );
    7691              :                 }
    7692              :             }
    7693              :         }
    7694            0 :     }
    7695              : 
    7696              :     /// Asynchronously split a tenant that's eligible for automatic splits. At most one tenant will
    7697              :     /// be split per call.
    7698              :     ///
    7699              :     /// Two sets of criteria are used: initial splits and size-based splits (in that order).
    7700              :     /// Initial splits are used to eagerly split unsharded tenants that may be performing initial
    7701              :     /// ingestion, since sharded tenants have significantly better ingestion throughput. Size-based
    7702              :     /// splits are used to bound the maximum shard size and balance out load.
    7703              :     ///
    7704              :     /// Splits are based on max_logical_size, i.e. the logical size of the largest timeline in a
    7705              :     /// tenant. We use this instead of the total logical size because branches will duplicate
    7706              :     /// logical size without actually using more storage. We could also use visible physical size,
    7707              :     /// but this might overestimate tenants that frequently churn branches.
    7708              :     ///
    7709              :     /// Initial splits (initial_split_threshold):
    7710              :     /// * Applies to tenants with 1 shard.
    7711              :     /// * The largest timeline (max_logical_size) exceeds initial_split_threshold.
    7712              :     /// * Splits into initial_split_shards.
    7713              :     ///
    7714              :     /// Size-based splits (split_threshold):
    7715              :     /// * Applies to all tenants.
    7716              :     /// * The largest timeline (max_logical_size) divided by shard count exceeds split_threshold.
    7717              :     /// * Splits such that max_logical_size / shard_count <= split_threshold, in powers of 2.
    7718              :     ///
    7719              :     /// Tenant shards are ordered by descending max_logical_size, first initial split candidates
    7720              :     /// then size-based split candidates. The first matching candidate is split.
    7721              :     ///
    7722              :     /// The shard count is clamped to max_split_shards. If a candidate is eligible for both initial
    7723              :     /// and size-based splits, the largest shard count will be used.
    7724              :     ///
    7725              :     /// An unsharded tenant will get DEFAULT_STRIPE_SIZE, regardless of what its ShardIdentity says.
    7726              :     /// A sharded tenant will retain its stripe size, as splits do not allow changing it.
    7727              :     ///
    7728              :     /// TODO: consider spawning multiple splits in parallel: this is only called once every 20
    7729              :     /// seconds, so a large backlog can take a long time, and if a tenant fails to split it will
    7730              :     /// block all other splits.
    7731            0 :     async fn autosplit_tenants(self: &Arc<Self>) {
    7732            0 :         // If max_split_shards is set to 0 or 1, we can't split.
    7733            0 :         let max_split_shards = self.config.max_split_shards;
    7734            0 :         if max_split_shards <= 1 {
    7735            0 :             return;
    7736            0 :         }
    7737            0 : 
    7738            0 :         // If initial_split_shards is set to 0 or 1, disable initial splits.
    7739            0 :         let mut initial_split_threshold = self.config.initial_split_threshold.unwrap_or(0);
    7740            0 :         let initial_split_shards = self.config.initial_split_shards;
    7741            0 :         if initial_split_shards <= 1 {
    7742            0 :             initial_split_threshold = 0;
    7743            0 :         }
    7744              : 
    7745              :         // If no split_threshold nor initial_split_threshold, disable autosplits.
    7746            0 :         let split_threshold = self.config.split_threshold.unwrap_or(0);
    7747            0 :         if split_threshold == 0 && initial_split_threshold == 0 {
    7748            0 :             return;
    7749            0 :         }
    7750            0 : 
    7751            0 :         // Fetch split candidates in prioritized order.
    7752            0 :         //
    7753            0 :         // If initial splits are enabled, fetch eligible tenants first. We prioritize initial splits
    7754            0 :         // over size-based splits, since these are often performing initial ingestion and rely on
    7755            0 :         // splits to improve ingest throughput.
    7756            0 :         let mut candidates = Vec::new();
    7757            0 : 
    7758            0 :         if initial_split_threshold > 0 {
    7759              :             // Initial splits: fetch tenants with 1 shard where the logical size of the largest
    7760              :             // timeline exceeds the initial split threshold.
    7761            0 :             let initial_candidates = self
    7762            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    7763            0 :                     order_by: TenantSorting::MaxLogicalSize,
    7764            0 :                     limit: 10,
    7765            0 :                     where_shards_lt: Some(ShardCount(2)),
    7766            0 :                     where_gt: Some(initial_split_threshold),
    7767            0 :                 })
    7768            0 :                 .await;
    7769            0 :             candidates.extend(initial_candidates);
    7770            0 :         }
    7771              : 
    7772            0 :         if split_threshold > 0 {
    7773              :             // Size-based splits: fetch tenants where the logical size of the largest timeline
    7774              :             // divided by shard count exceeds the split threshold.
    7775              :             //
    7776              :             // max_logical_size is only tracked on shard 0, and contains the total logical size
    7777              :             // across all shards. We have to order and filter by MaxLogicalSizePerShard, i.e.
    7778              :             // max_logical_size / shard_count, such that we only receive tenants that are actually
    7779              :             // eligible for splits. But we still use max_logical_size for later split calculations.
    7780            0 :             let size_candidates = self
    7781            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    7782            0 :                     order_by: TenantSorting::MaxLogicalSizePerShard,
    7783            0 :                     limit: 10,
    7784            0 :                     where_shards_lt: Some(ShardCount(max_split_shards)),
    7785            0 :                     where_gt: Some(split_threshold),
    7786            0 :                 })
    7787            0 :                 .await;
    7788              :             #[cfg(feature = "testing")]
    7789            0 :             assert!(
    7790            0 :                 size_candidates.iter().all(|c| c.id.is_shard_zero()),
    7791            0 :                 "MaxLogicalSizePerShard returned non-zero shard: {size_candidates:?}",
    7792              :             );
    7793            0 :             candidates.extend(size_candidates);
    7794            0 :         }
    7795              : 
    7796              :         // Filter out tenants in a prohibiting scheduling mode.
    7797            0 :         {
    7798            0 :             let state = self.inner.read().unwrap();
    7799            0 :             candidates.retain(|i| {
    7800            0 :                 let policy = state.tenants.get(&i.id).map(|s| s.get_scheduling_policy());
    7801            0 :                 policy == Some(ShardSchedulingPolicy::Active)
    7802            0 :             });
    7803            0 :         }
    7804              : 
    7805              :         // Pick the first candidate to split. This will generally always be the first one in
    7806              :         // candidates, but we defensively skip candidates that end up not actually splitting.
    7807            0 :         let Some((candidate, new_shard_count)) = candidates
    7808            0 :             .into_iter()
    7809            0 :             .filter_map(|candidate| {
    7810            0 :                 let new_shard_count = Self::compute_split_shards(ShardSplitInputs {
    7811            0 :                     shard_count: candidate.id.shard_count,
    7812            0 :                     max_logical_size: candidate.max_logical_size,
    7813            0 :                     split_threshold,
    7814            0 :                     max_split_shards,
    7815            0 :                     initial_split_threshold,
    7816            0 :                     initial_split_shards,
    7817            0 :                 });
    7818            0 :                 new_shard_count.map(|shards| (candidate, shards.count()))
    7819            0 :             })
    7820            0 :             .next()
    7821              :         else {
    7822            0 :             debug!("no split-eligible tenants found");
    7823            0 :             return;
    7824              :         };
    7825              : 
    7826              :         // Retain the stripe size of sharded tenants, as splits don't allow changing it. Otherwise,
    7827              :         // use DEFAULT_STRIPE_SIZE for unsharded tenants -- their stripe size doesn't really matter,
    7828              :         // and if we change the default stripe size we want to use the new default rather than an
    7829              :         // old, persisted stripe size.
    7830            0 :         let new_stripe_size = match candidate.id.shard_count.count() {
    7831            0 :             0 => panic!("invalid shard count 0"),
    7832            0 :             1 => Some(ShardParameters::DEFAULT_STRIPE_SIZE),
    7833            0 :             2.. => None,
    7834              :         };
    7835              : 
    7836              :         // We spawn a task to run this, so it's exactly like some external API client requesting
    7837              :         // it.  We don't want to block the background reconcile loop on this.
    7838            0 :         let old_shard_count = candidate.id.shard_count.count();
    7839            0 :         info!(
    7840            0 :             "auto-splitting tenant {old_shard_count} → {new_shard_count} shards, \
    7841            0 :                 current size {candidate:?} (split_threshold={split_threshold} \
    7842            0 :                 initial_split_threshold={initial_split_threshold})"
    7843              :         );
    7844              : 
    7845            0 :         let this = self.clone();
    7846            0 :         tokio::spawn(
    7847            0 :             async move {
    7848            0 :                 match this
    7849            0 :                     .tenant_shard_split(
    7850            0 :                         candidate.id.tenant_id,
    7851            0 :                         TenantShardSplitRequest {
    7852            0 :                             new_shard_count,
    7853            0 :                             new_stripe_size,
    7854            0 :                         },
    7855            0 :                     )
    7856            0 :                     .await
    7857              :                 {
    7858              :                     Ok(_) => {
    7859            0 :                         info!("successful auto-split {old_shard_count} → {new_shard_count} shards")
    7860              :                     }
    7861            0 :                     Err(err) => error!("auto-split failed: {err}"),
    7862              :                 }
    7863            0 :             }
    7864            0 :             .instrument(info_span!("auto_split", tenant_id=%candidate.id.tenant_id)),
    7865              :         );
    7866            0 :     }
    7867              : 
    7868              :     /// Returns the number of shards to split a tenant into, or None if the tenant shouldn't split,
    7869              :     /// based on the total logical size of the largest timeline summed across all shards. Uses the
    7870              :     /// larger of size-based and initial splits, clamped to max_split_shards.
    7871              :     ///
    7872              :     /// NB: the thresholds are exclusive, since TopTenantShardsRequest uses where_gt.
    7873           25 :     fn compute_split_shards(inputs: ShardSplitInputs) -> Option<ShardCount> {
    7874           25 :         let ShardSplitInputs {
    7875           25 :             shard_count,
    7876           25 :             max_logical_size,
    7877           25 :             split_threshold,
    7878           25 :             max_split_shards,
    7879           25 :             initial_split_threshold,
    7880           25 :             initial_split_shards,
    7881           25 :         } = inputs;
    7882           25 : 
    7883           25 :         let mut new_shard_count: u8 = shard_count.count();
    7884           25 : 
    7885           25 :         // Size-based splits. Ensures max_logical_size / new_shard_count <= split_threshold, using
    7886           25 :         // power-of-two shard counts.
    7887           25 :         //
    7888           25 :         // If the current shard count is not a power of two, and does not exceed split_threshold,
    7889           25 :         // then we leave it alone rather than forcing a power-of-two split.
    7890           25 :         if split_threshold > 0
    7891           18 :             && max_logical_size.div_ceil(split_threshold) > shard_count.count() as u64
    7892           12 :         {
    7893           12 :             new_shard_count = max_logical_size
    7894           12 :                 .div_ceil(split_threshold)
    7895           12 :                 .checked_next_power_of_two()
    7896           12 :                 .unwrap_or(u8::MAX as u64)
    7897           12 :                 .try_into()
    7898           12 :                 .unwrap_or(u8::MAX);
    7899           13 :         }
    7900              : 
    7901              :         // Initial splits. Use the larger of size-based and initial split shard counts. This only
    7902              :         // applies to unsharded tenants, i.e. changes to initial_split_threshold or
    7903              :         // initial_split_shards are not retroactive for sharded tenants.
    7904           25 :         if initial_split_threshold > 0
    7905           14 :             && shard_count.count() <= 1
    7906           11 :             && max_logical_size > initial_split_threshold
    7907            8 :         {
    7908            8 :             new_shard_count = new_shard_count.max(initial_split_shards);
    7909           17 :         }
    7910              : 
    7911              :         // Clamp to max shards.
    7912           25 :         new_shard_count = new_shard_count.min(max_split_shards);
    7913           25 : 
    7914           25 :         // Don't split if we're not increasing the shard count.
    7915           25 :         if new_shard_count <= shard_count.count() {
    7916           10 :             return None;
    7917           15 :         }
    7918           15 : 
    7919           15 :         Some(ShardCount(new_shard_count))
    7920           25 :     }
    7921              : 
    7922              :     /// Fetches the top tenant shards from every node, in descending order of
    7923              :     /// max logical size. Any node errors will be logged and ignored.
    7924            0 :     async fn get_top_tenant_shards(
    7925            0 :         &self,
    7926            0 :         request: &TopTenantShardsRequest,
    7927            0 :     ) -> Vec<TopTenantShardItem> {
    7928            0 :         let nodes = self
    7929            0 :             .inner
    7930            0 :             .read()
    7931            0 :             .unwrap()
    7932            0 :             .nodes
    7933            0 :             .values()
    7934            0 :             .cloned()
    7935            0 :             .collect_vec();
    7936            0 : 
    7937            0 :         let mut futures = FuturesUnordered::new();
    7938            0 :         for node in nodes {
    7939            0 :             futures.push(async move {
    7940            0 :                 node.with_client_retries(
    7941            0 :                     |client| async move { client.top_tenant_shards(request.clone()).await },
    7942            0 :                     &self.http_client,
    7943            0 :                     &self.config.pageserver_jwt_token,
    7944            0 :                     3,
    7945            0 :                     3,
    7946            0 :                     Duration::from_secs(5),
    7947            0 :                     &self.cancel,
    7948            0 :                 )
    7949            0 :                 .await
    7950            0 :             });
    7951            0 :         }
    7952              : 
    7953            0 :         let mut top = Vec::new();
    7954            0 :         while let Some(output) = futures.next().await {
    7955            0 :             match output {
    7956            0 :                 Some(Ok(response)) => top.extend(response.shards),
    7957            0 :                 Some(Err(mgmt_api::Error::Cancelled)) => {}
    7958            0 :                 Some(Err(err)) => warn!("failed to fetch top tenants: {err}"),
    7959            0 :                 None => {} // node is shutting down
    7960              :             }
    7961              :         }
    7962              : 
    7963            0 :         top.sort_by_key(|i| i.max_logical_size);
    7964            0 :         top.reverse();
    7965            0 :         top
    7966            0 :     }
    7967              : 
    7968              :     /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
    7969              :     /// also wait for any generated Reconcilers to complete.  Calling this until it returns zero should
    7970              :     /// put the system into a quiescent state where future background reconciliations won't do anything.
    7971            0 :     pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
    7972            0 :         let reconciles_spawned = self.reconcile_all();
    7973            0 :         let reconciles_spawned = if reconciles_spawned == 0 {
    7974              :             // Only optimize when we are otherwise idle
    7975            0 :             self.optimize_all().await
    7976              :         } else {
    7977            0 :             reconciles_spawned
    7978              :         };
    7979              : 
    7980            0 :         let waiters = {
    7981            0 :             let mut waiters = Vec::new();
    7982            0 :             let locked = self.inner.read().unwrap();
    7983            0 :             for (_tenant_shard_id, shard) in locked.tenants.iter() {
    7984            0 :                 if let Some(waiter) = shard.get_waiter() {
    7985            0 :                     waiters.push(waiter);
    7986            0 :                 }
    7987              :             }
    7988            0 :             waiters
    7989            0 :         };
    7990            0 : 
    7991            0 :         let waiter_count = waiters.len();
    7992            0 :         match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    7993            0 :             Ok(()) => {}
    7994            0 :             Err(ReconcileWaitError::Failed(_, reconcile_error))
    7995            0 :                 if matches!(*reconcile_error, ReconcileError::Cancel) =>
    7996            0 :             {
    7997            0 :                 // Ignore reconciler cancel errors: this reconciler might have shut down
    7998            0 :                 // because some other change superceded it.  We will return a nonzero number,
    7999            0 :                 // so the caller knows they might have to call again to quiesce the system.
    8000            0 :             }
    8001            0 :             Err(e) => {
    8002            0 :                 return Err(e);
    8003              :             }
    8004              :         };
    8005              : 
    8006            0 :         tracing::info!(
    8007            0 :             "{} reconciles in reconcile_all, {} waiters",
    8008              :             reconciles_spawned,
    8009              :             waiter_count
    8010              :         );
    8011              : 
    8012            0 :         Ok(std::cmp::max(waiter_count, reconciles_spawned))
    8013            0 :     }
    8014              : 
    8015            0 :     async fn stop_reconciliations(&self, reason: StopReconciliationsReason) {
    8016            0 :         // Cancel all on-going reconciles and wait for them to exit the gate.
    8017            0 :         tracing::info!("{reason}: cancelling and waiting for in-flight reconciles");
    8018            0 :         self.reconcilers_cancel.cancel();
    8019            0 :         self.reconcilers_gate.close().await;
    8020              : 
    8021              :         // Signal the background loop in [`Service::process_results`] to exit once
    8022              :         // it has proccessed the results from all the reconciles we cancelled earlier.
    8023            0 :         tracing::info!("{reason}: processing results from previously in-flight reconciles");
    8024            0 :         self.result_tx.send(ReconcileResultRequest::Stop).ok();
    8025            0 :         self.result_tx.closed().await;
    8026            0 :     }
    8027              : 
    8028            0 :     pub async fn shutdown(&self) {
    8029            0 :         self.stop_reconciliations(StopReconciliationsReason::ShuttingDown)
    8030            0 :             .await;
    8031              : 
    8032              :         // Background tasks hold gate guards: this notifies them of the cancellation and
    8033              :         // waits for them all to complete.
    8034            0 :         tracing::info!("Shutting down: cancelling and waiting for background tasks to exit");
    8035            0 :         self.cancel.cancel();
    8036            0 :         self.gate.close().await;
    8037            0 :     }
    8038              : 
    8039              :     /// Spot check the download lag for a secondary location of a shard.
    8040              :     /// Should be used as a heuristic, since it's not always precise: the
    8041              :     /// secondary might have not downloaded the new heat map yet and, hence,
    8042              :     /// is not aware of the lag.
    8043              :     ///
    8044              :     /// Returns:
    8045              :     /// * Ok(None) if the lag could not be determined from the status,
    8046              :     /// * Ok(Some(_)) if the lag could be determind
    8047              :     /// * Err on failures to query the pageserver.
    8048            0 :     async fn secondary_lag(
    8049            0 :         &self,
    8050            0 :         secondary: &NodeId,
    8051            0 :         tenant_shard_id: TenantShardId,
    8052            0 :     ) -> Result<Option<u64>, mgmt_api::Error> {
    8053            0 :         let nodes = self.inner.read().unwrap().nodes.clone();
    8054            0 :         let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError(
    8055            0 :             StatusCode::NOT_FOUND,
    8056            0 :             format!("Node with id {} not found", secondary),
    8057            0 :         ))?;
    8058              : 
    8059            0 :         match node
    8060            0 :             .with_client_retries(
    8061            0 :                 |client| async move { client.tenant_secondary_status(tenant_shard_id).await },
    8062            0 :                 &self.http_client,
    8063            0 :                 &self.config.pageserver_jwt_token,
    8064            0 :                 1,
    8065            0 :                 3,
    8066            0 :                 Duration::from_millis(250),
    8067            0 :                 &self.cancel,
    8068            0 :             )
    8069            0 :             .await
    8070              :         {
    8071            0 :             Some(Ok(status)) => match status.heatmap_mtime {
    8072            0 :                 Some(_) => Ok(Some(status.bytes_total - status.bytes_downloaded)),
    8073            0 :                 None => Ok(None),
    8074              :             },
    8075            0 :             Some(Err(e)) => Err(e),
    8076            0 :             None => Err(mgmt_api::Error::Cancelled),
    8077              :         }
    8078            0 :     }
    8079              : 
    8080              :     /// Drain a node by moving the shards attached to it as primaries.
    8081              :     /// This is a long running operation and it should run as a separate Tokio task.
    8082            0 :     pub(crate) async fn drain_node(
    8083            0 :         self: &Arc<Self>,
    8084            0 :         node_id: NodeId,
    8085            0 :         cancel: CancellationToken,
    8086            0 :     ) -> Result<(), OperationError> {
    8087              :         const MAX_SECONDARY_LAG_BYTES_DEFAULT: u64 = 256 * 1024 * 1024;
    8088            0 :         let max_secondary_lag_bytes = self
    8089            0 :             .config
    8090            0 :             .max_secondary_lag_bytes
    8091            0 :             .unwrap_or(MAX_SECONDARY_LAG_BYTES_DEFAULT);
    8092              : 
    8093              :         // By default, live migrations are generous about the wait time for getting
    8094              :         // the secondary location up to speed. When draining, give up earlier in order
    8095              :         // to not stall the operation when a cold secondary is encountered.
    8096              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(20);
    8097              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    8098            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    8099            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    8100            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    8101            0 :             .build();
    8102            0 : 
    8103            0 :         let mut waiters = Vec::new();
    8104            0 : 
    8105            0 :         let mut tid_iter = TenantShardIterator::new({
    8106            0 :             let service = self.clone();
    8107            0 :             move |last_inspected_shard: Option<TenantShardId>| {
    8108            0 :                 let locked = &service.inner.read().unwrap();
    8109            0 :                 let tenants = &locked.tenants;
    8110            0 :                 let entry = match last_inspected_shard {
    8111            0 :                     Some(skip_past) => {
    8112            0 :                         // Skip to the last seen tenant shard id
    8113            0 :                         let mut cursor = tenants.iter().skip_while(|(tid, _)| **tid != skip_past);
    8114            0 : 
    8115            0 :                         // Skip past the last seen
    8116            0 :                         cursor.nth(1)
    8117              :                     }
    8118            0 :                     None => tenants.first_key_value(),
    8119              :                 };
    8120              : 
    8121            0 :                 entry.map(|(tid, _)| tid).copied()
    8122            0 :             }
    8123            0 :         });
    8124              : 
    8125            0 :         while !tid_iter.finished() {
    8126            0 :             if cancel.is_cancelled() {
    8127            0 :                 match self
    8128            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8129            0 :                     .await
    8130              :                 {
    8131            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    8132            0 :                     Err(err) => {
    8133            0 :                         return Err(OperationError::FinalizeError(
    8134            0 :                             format!(
    8135            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    8136            0 :                                 node_id, err
    8137            0 :                             )
    8138            0 :                             .into(),
    8139            0 :                         ));
    8140              :                     }
    8141              :                 }
    8142            0 :             }
    8143            0 : 
    8144            0 :             drain_utils::validate_node_state(&node_id, self.inner.read().unwrap().nodes.clone())?;
    8145              : 
    8146            0 :             while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    8147            0 :                 let tid = match tid_iter.next() {
    8148            0 :                     Some(tid) => tid,
    8149              :                     None => {
    8150            0 :                         break;
    8151              :                     }
    8152              :                 };
    8153              : 
    8154            0 :                 let tid_drain = TenantShardDrain {
    8155            0 :                     drained_node: node_id,
    8156            0 :                     tenant_shard_id: tid,
    8157            0 :                 };
    8158              : 
    8159            0 :                 let dest_node_id = {
    8160            0 :                     let locked = self.inner.read().unwrap();
    8161            0 : 
    8162            0 :                     match tid_drain
    8163            0 :                         .tenant_shard_eligible_for_drain(&locked.tenants, &locked.scheduler)
    8164              :                     {
    8165            0 :                         Some(node_id) => node_id,
    8166              :                         None => {
    8167            0 :                             continue;
    8168              :                         }
    8169              :                     }
    8170              :                 };
    8171              : 
    8172            0 :                 match self.secondary_lag(&dest_node_id, tid).await {
    8173            0 :                     Ok(Some(lag)) if lag <= max_secondary_lag_bytes => {
    8174            0 :                         // The secondary is reasonably up to date.
    8175            0 :                         // Migrate to it
    8176            0 :                     }
    8177            0 :                     Ok(Some(lag)) => {
    8178            0 :                         tracing::info!(
    8179            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8180            0 :                             "Secondary on node {dest_node_id} is lagging by {lag}. Skipping reconcile."
    8181              :                         );
    8182            0 :                         continue;
    8183              :                     }
    8184              :                     Ok(None) => {
    8185            0 :                         tracing::info!(
    8186            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8187            0 :                             "Could not determine lag for secondary on node {dest_node_id}. Skipping reconcile."
    8188              :                         );
    8189            0 :                         continue;
    8190              :                     }
    8191            0 :                     Err(err) => {
    8192            0 :                         tracing::warn!(
    8193            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8194            0 :                             "Failed to get secondary lag from node {dest_node_id}. Skipping reconcile: {err}"
    8195              :                         );
    8196            0 :                         continue;
    8197              :                     }
    8198              :                 }
    8199              : 
    8200              :                 {
    8201            0 :                     let mut locked = self.inner.write().unwrap();
    8202            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    8203            0 :                     let rescheduled = tid_drain.reschedule_to_secondary(
    8204            0 :                         dest_node_id,
    8205            0 :                         tenants,
    8206            0 :                         scheduler,
    8207            0 :                         nodes,
    8208            0 :                     )?;
    8209              : 
    8210            0 :                     if let Some(tenant_shard) = rescheduled {
    8211            0 :                         let waiter = self.maybe_configured_reconcile_shard(
    8212            0 :                             tenant_shard,
    8213            0 :                             nodes,
    8214            0 :                             reconciler_config,
    8215            0 :                         );
    8216            0 :                         if let Some(some) = waiter {
    8217            0 :                             waiters.push(some);
    8218            0 :                         }
    8219            0 :                     }
    8220              :                 }
    8221              :             }
    8222              : 
    8223            0 :             waiters = self
    8224            0 :                 .await_waiters_remainder(waiters, WAITER_FILL_DRAIN_POLL_TIMEOUT)
    8225            0 :                 .await;
    8226              : 
    8227            0 :             failpoint_support::sleep_millis_async!("sleepy-drain-loop", &cancel);
    8228              :         }
    8229              : 
    8230            0 :         while !waiters.is_empty() {
    8231            0 :             if cancel.is_cancelled() {
    8232            0 :                 match self
    8233            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8234            0 :                     .await
    8235              :                 {
    8236            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    8237            0 :                     Err(err) => {
    8238            0 :                         return Err(OperationError::FinalizeError(
    8239            0 :                             format!(
    8240            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    8241            0 :                                 node_id, err
    8242            0 :                             )
    8243            0 :                             .into(),
    8244            0 :                         ));
    8245              :                     }
    8246              :                 }
    8247            0 :             }
    8248            0 : 
    8249            0 :             tracing::info!("Awaiting {} pending drain reconciliations", waiters.len());
    8250              : 
    8251            0 :             waiters = self
    8252            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    8253            0 :                 .await;
    8254              :         }
    8255              : 
    8256              :         // At this point we have done the best we could to drain shards from this node.
    8257              :         // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]`
    8258              :         // to complete the drain.
    8259            0 :         if let Err(err) = self
    8260            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart))
    8261            0 :             .await
    8262              :         {
    8263              :             // This is not fatal. Anything that is polling the node scheduling policy to detect
    8264              :             // the end of the drain operations will hang, but all such places should enforce an
    8265              :             // overall timeout. The scheduling policy will be updated upon node re-attach and/or
    8266              :             // by the counterpart fill operation.
    8267            0 :             return Err(OperationError::FinalizeError(
    8268            0 :                 format!(
    8269            0 :                     "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}"
    8270            0 :                 )
    8271            0 :                 .into(),
    8272            0 :             ));
    8273            0 :         }
    8274            0 : 
    8275            0 :         Ok(())
    8276            0 :     }
    8277              : 
    8278              :     /// Create a node fill plan (pick secondaries to promote), based on:
    8279              :     /// 1. Shards which have a secondary on this node, and this node is in their home AZ, and are currently attached to a node
    8280              :     ///    outside their home AZ, should be migrated back here.
    8281              :     /// 2. If after step 1 we have not migrated enough shards for this node to have its fair share of
    8282              :     ///    attached shards, we will promote more shards from the nodes with the most attached shards, unless
    8283              :     ///    those shards have a home AZ that doesn't match the node we're filling.
    8284            0 :     fn fill_node_plan(&self, node_id: NodeId) -> Vec<TenantShardId> {
    8285            0 :         let mut locked = self.inner.write().unwrap();
    8286            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    8287            0 : 
    8288            0 :         let node_az = nodes
    8289            0 :             .get(&node_id)
    8290            0 :             .expect("Node must exist")
    8291            0 :             .get_availability_zone_id()
    8292            0 :             .clone();
    8293            0 : 
    8294            0 :         // The tenant shard IDs that we plan to promote from secondary to attached on this node
    8295            0 :         let mut plan = Vec::new();
    8296            0 : 
    8297            0 :         // Collect shards which do not have a preferred AZ & are elegible for moving in stage 2
    8298            0 :         let mut free_tids_by_node: HashMap<NodeId, Vec<TenantShardId>> = HashMap::new();
    8299            0 : 
    8300            0 :         // Don't respect AZ preferences if there is only one AZ.  This comes up in tests, but it could
    8301            0 :         // conceivably come up in real life if deploying a single-AZ region intentionally.
    8302            0 :         let respect_azs = nodes
    8303            0 :             .values()
    8304            0 :             .map(|n| n.get_availability_zone_id())
    8305            0 :             .unique()
    8306            0 :             .count()
    8307            0 :             > 1;
    8308              : 
    8309              :         // Step 1: collect all shards that we are required to migrate back to this node because their AZ preference
    8310              :         // requires it.
    8311            0 :         for (tsid, tenant_shard) in tenants {
    8312            0 :             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    8313              :                 // Shard doesn't have a secondary on this node, ignore it.
    8314            0 :                 continue;
    8315            0 :             }
    8316            0 : 
    8317            0 :             // AZ check: when filling nodes after a restart, our intent is to move _back_ the
    8318            0 :             // shards which belong on this node, not to promote shards whose scheduling preference
    8319            0 :             // would be on their currently attached node.  So will avoid promoting shards whose
    8320            0 :             // home AZ doesn't match the AZ of the node we're filling.
    8321            0 :             match tenant_shard.preferred_az() {
    8322            0 :                 _ if !respect_azs => {
    8323            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    8324            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    8325            0 :                     }
    8326              :                 }
    8327              :                 None => {
    8328              :                     // Shard doesn't have an AZ preference: it is elegible to be moved, but we
    8329              :                     // will only do so if our target shard count requires it.
    8330            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    8331            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    8332            0 :                     }
    8333              :                 }
    8334            0 :                 Some(az) if az == &node_az => {
    8335              :                     // This shard's home AZ is equal to the node we're filling: it should
    8336              :                     // be moved back to this node as part of filling, unless its currently
    8337              :                     // attached location is also in its home AZ.
    8338            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    8339            0 :                         if nodes
    8340            0 :                             .get(primary)
    8341            0 :                             .expect("referenced node must exist")
    8342            0 :                             .get_availability_zone_id()
    8343            0 :                             != tenant_shard
    8344            0 :                                 .preferred_az()
    8345            0 :                                 .expect("tenant must have an AZ preference")
    8346              :                         {
    8347            0 :                             plan.push(*tsid)
    8348            0 :                         }
    8349              :                     } else {
    8350            0 :                         plan.push(*tsid)
    8351              :                     }
    8352              :                 }
    8353            0 :                 Some(_) => {
    8354            0 :                     // This shard's home AZ is somewhere other than the node we're filling,
    8355            0 :                     // it may not be moved back to this node as part of filling.  Ignore it
    8356            0 :                 }
    8357              :             }
    8358              :         }
    8359              : 
    8360              :         // Step 2: also promote any AZ-agnostic shards as required to achieve the target number of attachments
    8361            0 :         let fill_requirement = locked.scheduler.compute_fill_requirement(node_id);
    8362            0 : 
    8363            0 :         let expected_attached = locked.scheduler.expected_attached_shard_count();
    8364            0 :         let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count();
    8365            0 : 
    8366            0 :         let mut promoted_per_tenant: HashMap<TenantId, usize> = HashMap::new();
    8367              : 
    8368            0 :         for (node_id, attached) in nodes_by_load {
    8369            0 :             let available = locked.nodes.get(&node_id).is_some_and(|n| n.is_available());
    8370            0 :             if !available {
    8371            0 :                 continue;
    8372            0 :             }
    8373            0 : 
    8374            0 :             if plan.len() >= fill_requirement
    8375            0 :                 || free_tids_by_node.is_empty()
    8376            0 :                 || attached <= expected_attached
    8377              :             {
    8378            0 :                 break;
    8379            0 :             }
    8380            0 : 
    8381            0 :             let can_take = attached - expected_attached;
    8382            0 :             let needed = fill_requirement - plan.len();
    8383            0 :             let mut take = std::cmp::min(can_take, needed);
    8384            0 : 
    8385            0 :             let mut remove_node = false;
    8386            0 :             while take > 0 {
    8387            0 :                 match free_tids_by_node.get_mut(&node_id) {
    8388            0 :                     Some(tids) => match tids.pop() {
    8389            0 :                         Some(tid) => {
    8390            0 :                             let max_promote_for_tenant = std::cmp::max(
    8391            0 :                                 tid.shard_count.count() as usize / locked.nodes.len(),
    8392            0 :                                 1,
    8393            0 :                             );
    8394            0 :                             let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default();
    8395            0 :                             if *promoted < max_promote_for_tenant {
    8396            0 :                                 plan.push(tid);
    8397            0 :                                 *promoted += 1;
    8398            0 :                                 take -= 1;
    8399            0 :                             }
    8400              :                         }
    8401              :                         None => {
    8402            0 :                             remove_node = true;
    8403            0 :                             break;
    8404              :                         }
    8405              :                     },
    8406              :                     None => {
    8407            0 :                         break;
    8408              :                     }
    8409              :                 }
    8410              :             }
    8411              : 
    8412            0 :             if remove_node {
    8413            0 :                 free_tids_by_node.remove(&node_id);
    8414            0 :             }
    8415              :         }
    8416              : 
    8417            0 :         plan
    8418            0 :     }
    8419              : 
    8420              :     /// Fill a node by promoting its secondaries until the cluster is balanced
    8421              :     /// with regards to attached shard counts. Note that this operation only
    8422              :     /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`].
    8423              :     /// This is a long running operation and it should run as a separate Tokio task.
    8424            0 :     pub(crate) async fn fill_node(
    8425            0 :         &self,
    8426            0 :         node_id: NodeId,
    8427            0 :         cancel: CancellationToken,
    8428            0 :     ) -> Result<(), OperationError> {
    8429              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(20);
    8430              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    8431            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    8432            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    8433            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    8434            0 :             .build();
    8435            0 : 
    8436            0 :         let mut tids_to_promote = self.fill_node_plan(node_id);
    8437            0 :         let mut waiters = Vec::new();
    8438              : 
    8439              :         // Execute the plan we've composed above. Before aplying each move from the plan,
    8440              :         // we validate to ensure that it has not gone stale in the meantime.
    8441            0 :         while !tids_to_promote.is_empty() {
    8442            0 :             if cancel.is_cancelled() {
    8443            0 :                 match self
    8444            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8445            0 :                     .await
    8446              :                 {
    8447            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    8448            0 :                     Err(err) => {
    8449            0 :                         return Err(OperationError::FinalizeError(
    8450            0 :                             format!(
    8451            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    8452            0 :                                 node_id, err
    8453            0 :                             )
    8454            0 :                             .into(),
    8455            0 :                         ));
    8456              :                     }
    8457              :                 }
    8458            0 :             }
    8459            0 : 
    8460            0 :             {
    8461            0 :                 let mut locked = self.inner.write().unwrap();
    8462            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    8463              : 
    8464            0 :                 let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
    8465            0 :                     format!("node {node_id} was removed").into(),
    8466            0 :                 ))?;
    8467              : 
    8468            0 :                 let current_policy = node.get_scheduling();
    8469            0 :                 if !matches!(current_policy, NodeSchedulingPolicy::Filling) {
    8470              :                     // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
    8471              :                     // about it
    8472            0 :                     return Err(OperationError::NodeStateChanged(
    8473            0 :                         format!("node {node_id} changed state to {current_policy:?}").into(),
    8474            0 :                     ));
    8475            0 :                 }
    8476              : 
    8477            0 :                 while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    8478            0 :                     if let Some(tid) = tids_to_promote.pop() {
    8479            0 :                         if let Some(tenant_shard) = tenants.get_mut(&tid) {
    8480              :                             // If the node being filled is not a secondary anymore,
    8481              :                             // skip the promotion.
    8482            0 :                             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    8483            0 :                                 continue;
    8484            0 :                             }
    8485            0 : 
    8486            0 :                             let previously_attached_to = *tenant_shard.intent.get_attached();
    8487            0 :                             match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) {
    8488            0 :                                 Err(e) => {
    8489            0 :                                     tracing::warn!(
    8490            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8491            0 :                                         "Scheduling error when filling pageserver {} : {e}", node_id
    8492              :                                     );
    8493              :                                 }
    8494              :                                 Ok(()) => {
    8495            0 :                                     tracing::info!(
    8496            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    8497            0 :                                         "Rescheduled shard while filling node {}: {:?} -> {}",
    8498              :                                         node_id,
    8499              :                                         previously_attached_to,
    8500              :                                         node_id
    8501              :                                     );
    8502              : 
    8503            0 :                                     if let Some(waiter) = self.maybe_configured_reconcile_shard(
    8504            0 :                                         tenant_shard,
    8505            0 :                                         nodes,
    8506            0 :                                         reconciler_config,
    8507            0 :                                     ) {
    8508            0 :                                         waiters.push(waiter);
    8509            0 :                                     }
    8510              :                                 }
    8511              :                             }
    8512            0 :                         }
    8513              :                     } else {
    8514            0 :                         break;
    8515              :                     }
    8516              :                 }
    8517              :             }
    8518              : 
    8519            0 :             waiters = self
    8520            0 :                 .await_waiters_remainder(waiters, WAITER_FILL_DRAIN_POLL_TIMEOUT)
    8521            0 :                 .await;
    8522              :         }
    8523              : 
    8524            0 :         while !waiters.is_empty() {
    8525            0 :             if cancel.is_cancelled() {
    8526            0 :                 match self
    8527            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8528            0 :                     .await
    8529              :                 {
    8530            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    8531            0 :                     Err(err) => {
    8532            0 :                         return Err(OperationError::FinalizeError(
    8533            0 :                             format!(
    8534            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    8535            0 :                                 node_id, err
    8536            0 :                             )
    8537            0 :                             .into(),
    8538            0 :                         ));
    8539              :                     }
    8540              :                 }
    8541            0 :             }
    8542            0 : 
    8543            0 :             tracing::info!("Awaiting {} pending fill reconciliations", waiters.len());
    8544              : 
    8545            0 :             waiters = self
    8546            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    8547            0 :                 .await;
    8548              :         }
    8549              : 
    8550            0 :         if let Err(err) = self
    8551            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    8552            0 :             .await
    8553              :         {
    8554              :             // This isn't a huge issue since the filling process starts upon request. However, it
    8555              :             // will prevent the next drain from starting. The only case in which this can fail
    8556              :             // is database unavailability. Such a case will require manual intervention.
    8557            0 :             return Err(OperationError::FinalizeError(
    8558            0 :                 format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}")
    8559            0 :                     .into(),
    8560            0 :             ));
    8561            0 :         }
    8562            0 : 
    8563            0 :         Ok(())
    8564            0 :     }
    8565              : 
    8566              :     /// Updates scrubber metadata health check results.
    8567            0 :     pub(crate) async fn metadata_health_update(
    8568            0 :         &self,
    8569            0 :         update_req: MetadataHealthUpdateRequest,
    8570            0 :     ) -> Result<(), ApiError> {
    8571            0 :         let now = chrono::offset::Utc::now();
    8572            0 :         let (healthy_records, unhealthy_records) = {
    8573            0 :             let locked = self.inner.read().unwrap();
    8574            0 :             let healthy_records = update_req
    8575            0 :                 .healthy_tenant_shards
    8576            0 :                 .into_iter()
    8577            0 :                 // Retain only health records associated with tenant shards managed by storage controller.
    8578            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    8579            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, true, now))
    8580            0 :                 .collect();
    8581            0 :             let unhealthy_records = update_req
    8582            0 :                 .unhealthy_tenant_shards
    8583            0 :                 .into_iter()
    8584            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    8585            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, false, now))
    8586            0 :                 .collect();
    8587            0 : 
    8588            0 :             (healthy_records, unhealthy_records)
    8589            0 :         };
    8590            0 : 
    8591            0 :         self.persistence
    8592            0 :             .update_metadata_health_records(healthy_records, unhealthy_records, now)
    8593            0 :             .await?;
    8594            0 :         Ok(())
    8595            0 :     }
    8596              : 
    8597              :     /// Lists the tenant shards that has unhealthy metadata status.
    8598            0 :     pub(crate) async fn metadata_health_list_unhealthy(
    8599            0 :         &self,
    8600            0 :     ) -> Result<Vec<TenantShardId>, ApiError> {
    8601            0 :         let result = self
    8602            0 :             .persistence
    8603            0 :             .list_unhealthy_metadata_health_records()
    8604            0 :             .await?
    8605            0 :             .iter()
    8606            0 :             .map(|p| p.get_tenant_shard_id().unwrap())
    8607            0 :             .collect();
    8608            0 : 
    8609            0 :         Ok(result)
    8610            0 :     }
    8611              : 
    8612              :     /// Lists the tenant shards that have not been scrubbed for some duration.
    8613            0 :     pub(crate) async fn metadata_health_list_outdated(
    8614            0 :         &self,
    8615            0 :         not_scrubbed_for: Duration,
    8616            0 :     ) -> Result<Vec<MetadataHealthRecord>, ApiError> {
    8617            0 :         let earlier = chrono::offset::Utc::now() - not_scrubbed_for;
    8618            0 :         let result = self
    8619            0 :             .persistence
    8620            0 :             .list_outdated_metadata_health_records(earlier)
    8621            0 :             .await?
    8622            0 :             .into_iter()
    8623            0 :             .map(|record| record.into())
    8624            0 :             .collect();
    8625            0 :         Ok(result)
    8626            0 :     }
    8627              : 
    8628            0 :     pub(crate) fn get_leadership_status(&self) -> LeadershipStatus {
    8629            0 :         self.inner.read().unwrap().get_leadership_status()
    8630            0 :     }
    8631              : 
    8632            0 :     pub(crate) async fn step_down(&self) -> GlobalObservedState {
    8633            0 :         tracing::info!("Received step down request from peer");
    8634            0 :         failpoint_support::sleep_millis_async!("sleep-on-step-down-handling");
    8635              : 
    8636            0 :         self.inner.write().unwrap().step_down();
    8637            0 :         // TODO: would it make sense to have a time-out for this?
    8638            0 :         self.stop_reconciliations(StopReconciliationsReason::SteppingDown)
    8639            0 :             .await;
    8640              : 
    8641            0 :         let mut global_observed = GlobalObservedState::default();
    8642            0 :         let locked = self.inner.read().unwrap();
    8643            0 :         for (tid, tenant_shard) in locked.tenants.iter() {
    8644            0 :             global_observed
    8645            0 :                 .0
    8646            0 :                 .insert(*tid, tenant_shard.observed.clone());
    8647            0 :         }
    8648              : 
    8649            0 :         global_observed
    8650            0 :     }
    8651              : 
    8652            0 :     pub(crate) async fn update_shards_preferred_azs(
    8653            0 :         &self,
    8654            0 :         req: ShardsPreferredAzsRequest,
    8655            0 :     ) -> Result<ShardsPreferredAzsResponse, ApiError> {
    8656            0 :         let preferred_azs = req.preferred_az_ids.into_iter().collect::<Vec<_>>();
    8657            0 :         let updated = self
    8658            0 :             .persistence
    8659            0 :             .set_tenant_shard_preferred_azs(preferred_azs)
    8660            0 :             .await
    8661            0 :             .map_err(|err| {
    8662            0 :                 ApiError::InternalServerError(anyhow::anyhow!(
    8663            0 :                     "Failed to persist preferred AZs: {err}"
    8664            0 :                 ))
    8665            0 :             })?;
    8666              : 
    8667            0 :         let mut updated_in_mem_and_db = Vec::default();
    8668            0 : 
    8669            0 :         let mut locked = self.inner.write().unwrap();
    8670            0 :         let state = locked.deref_mut();
    8671            0 :         for (tid, az_id) in updated {
    8672            0 :             let shard = state.tenants.get_mut(&tid);
    8673            0 :             if let Some(shard) = shard {
    8674            0 :                 shard.set_preferred_az(&mut state.scheduler, az_id);
    8675            0 :                 updated_in_mem_and_db.push(tid);
    8676            0 :             }
    8677              :         }
    8678              : 
    8679            0 :         Ok(ShardsPreferredAzsResponse {
    8680            0 :             updated: updated_in_mem_and_db,
    8681            0 :         })
    8682            0 :     }
    8683              : }
    8684              : 
    8685              : #[cfg(test)]
    8686              : mod tests {
    8687              :     use super::*;
    8688              : 
    8689              :     /// Tests Service::compute_split_shards. For readability, this specifies sizes in GBs rather
    8690              :     /// than bytes. Note that max_logical_size is the total logical size of the largest timeline
    8691              :     /// summed across all shards.
    8692              :     #[test]
    8693            1 :     fn compute_split_shards() {
    8694            1 :         // Size-based split: two shards have a 500 GB timeline, which need to split into 8 shards
    8695            1 :         // that are <= 64 GB,
    8696            1 :         assert_eq!(
    8697            1 :             Service::compute_split_shards(ShardSplitInputs {
    8698            1 :                 shard_count: ShardCount(2),
    8699            1 :                 max_logical_size: 500,
    8700            1 :                 split_threshold: 64,
    8701            1 :                 max_split_shards: 16,
    8702            1 :                 initial_split_threshold: 0,
    8703            1 :                 initial_split_shards: 0,
    8704            1 :             }),
    8705            1 :             Some(ShardCount(8))
    8706            1 :         );
    8707              : 
    8708              :         // Size-based split: noop at or below threshold, fires above.
    8709            1 :         assert_eq!(
    8710            1 :             Service::compute_split_shards(ShardSplitInputs {
    8711            1 :                 shard_count: ShardCount(2),
    8712            1 :                 max_logical_size: 127,
    8713            1 :                 split_threshold: 64,
    8714            1 :                 max_split_shards: 16,
    8715            1 :                 initial_split_threshold: 0,
    8716            1 :                 initial_split_shards: 0,
    8717            1 :             }),
    8718            1 :             None,
    8719            1 :         );
    8720            1 :         assert_eq!(
    8721            1 :             Service::compute_split_shards(ShardSplitInputs {
    8722            1 :                 shard_count: ShardCount(2),
    8723            1 :                 max_logical_size: 128,
    8724            1 :                 split_threshold: 64,
    8725            1 :                 max_split_shards: 16,
    8726            1 :                 initial_split_threshold: 0,
    8727            1 :                 initial_split_shards: 0,
    8728            1 :             }),
    8729            1 :             None,
    8730            1 :         );
    8731            1 :         assert_eq!(
    8732            1 :             Service::compute_split_shards(ShardSplitInputs {
    8733            1 :                 shard_count: ShardCount(2),
    8734            1 :                 max_logical_size: 129,
    8735            1 :                 split_threshold: 64,
    8736            1 :                 max_split_shards: 16,
    8737            1 :                 initial_split_threshold: 0,
    8738            1 :                 initial_split_shards: 0,
    8739            1 :             }),
    8740            1 :             Some(ShardCount(4)),
    8741            1 :         );
    8742              : 
    8743              :         // Size-based split: clamped to max_split_shards.
    8744            1 :         assert_eq!(
    8745            1 :             Service::compute_split_shards(ShardSplitInputs {
    8746            1 :                 shard_count: ShardCount(2),
    8747            1 :                 max_logical_size: 10000,
    8748            1 :                 split_threshold: 64,
    8749            1 :                 max_split_shards: 16,
    8750            1 :                 initial_split_threshold: 0,
    8751            1 :                 initial_split_shards: 0,
    8752            1 :             }),
    8753            1 :             Some(ShardCount(16))
    8754            1 :         );
    8755              : 
    8756              :         // Size-based split: tenant already at or beyond max_split_shards is not split.
    8757            1 :         assert_eq!(
    8758            1 :             Service::compute_split_shards(ShardSplitInputs {
    8759            1 :                 shard_count: ShardCount(16),
    8760            1 :                 max_logical_size: 10000,
    8761            1 :                 split_threshold: 64,
    8762            1 :                 max_split_shards: 16,
    8763            1 :                 initial_split_threshold: 0,
    8764            1 :                 initial_split_shards: 0,
    8765            1 :             }),
    8766            1 :             None
    8767            1 :         );
    8768              : 
    8769            1 :         assert_eq!(
    8770            1 :             Service::compute_split_shards(ShardSplitInputs {
    8771            1 :                 shard_count: ShardCount(32),
    8772            1 :                 max_logical_size: 10000,
    8773            1 :                 split_threshold: 64,
    8774            1 :                 max_split_shards: 16,
    8775            1 :                 initial_split_threshold: 0,
    8776            1 :                 initial_split_shards: 0,
    8777            1 :             }),
    8778            1 :             None
    8779            1 :         );
    8780              : 
    8781              :         // Size-based split: a non-power-of-2 shard count is normalized to power-of-2 if it
    8782              :         // exceeds split_threshold (i.e. a 3-shard tenant splits into 8, not 6).
    8783            1 :         assert_eq!(
    8784            1 :             Service::compute_split_shards(ShardSplitInputs {
    8785            1 :                 shard_count: ShardCount(3),
    8786            1 :                 max_logical_size: 320,
    8787            1 :                 split_threshold: 64,
    8788            1 :                 max_split_shards: 16,
    8789            1 :                 initial_split_threshold: 0,
    8790            1 :                 initial_split_shards: 0,
    8791            1 :             }),
    8792            1 :             Some(ShardCount(8))
    8793            1 :         );
    8794              : 
    8795              :         // Size-based split: a non-power-of-2 shard count is not normalized to power-of-2 if the
    8796              :         // existing shards are below or at split_threshold, but splits into 4 if it exceeds it.
    8797            1 :         assert_eq!(
    8798            1 :             Service::compute_split_shards(ShardSplitInputs {
    8799            1 :                 shard_count: ShardCount(3),
    8800            1 :                 max_logical_size: 191,
    8801            1 :                 split_threshold: 64,
    8802            1 :                 max_split_shards: 16,
    8803            1 :                 initial_split_threshold: 0,
    8804            1 :                 initial_split_shards: 0,
    8805            1 :             }),
    8806            1 :             None
    8807            1 :         );
    8808            1 :         assert_eq!(
    8809            1 :             Service::compute_split_shards(ShardSplitInputs {
    8810            1 :                 shard_count: ShardCount(3),
    8811            1 :                 max_logical_size: 192,
    8812            1 :                 split_threshold: 64,
    8813            1 :                 max_split_shards: 16,
    8814            1 :                 initial_split_threshold: 0,
    8815            1 :                 initial_split_shards: 0,
    8816            1 :             }),
    8817            1 :             None
    8818            1 :         );
    8819            1 :         assert_eq!(
    8820            1 :             Service::compute_split_shards(ShardSplitInputs {
    8821            1 :                 shard_count: ShardCount(3),
    8822            1 :                 max_logical_size: 193,
    8823            1 :                 split_threshold: 64,
    8824            1 :                 max_split_shards: 16,
    8825            1 :                 initial_split_threshold: 0,
    8826            1 :                 initial_split_shards: 0,
    8827            1 :             }),
    8828            1 :             Some(ShardCount(4))
    8829            1 :         );
    8830              : 
    8831              :         // Initial split: tenant has a 10 GB timeline, split into 4 shards.
    8832            1 :         assert_eq!(
    8833            1 :             Service::compute_split_shards(ShardSplitInputs {
    8834            1 :                 shard_count: ShardCount(1),
    8835            1 :                 max_logical_size: 10,
    8836            1 :                 split_threshold: 0,
    8837            1 :                 max_split_shards: 16,
    8838            1 :                 initial_split_threshold: 8,
    8839            1 :                 initial_split_shards: 4,
    8840            1 :             }),
    8841            1 :             Some(ShardCount(4))
    8842            1 :         );
    8843              : 
    8844              :         // Initial split: 0 ShardCount is equivalent to 1.
    8845            1 :         assert_eq!(
    8846            1 :             Service::compute_split_shards(ShardSplitInputs {
    8847            1 :                 shard_count: ShardCount(0),
    8848            1 :                 max_logical_size: 10,
    8849            1 :                 split_threshold: 0,
    8850            1 :                 max_split_shards: 16,
    8851            1 :                 initial_split_threshold: 8,
    8852            1 :                 initial_split_shards: 4,
    8853            1 :             }),
    8854            1 :             Some(ShardCount(4))
    8855            1 :         );
    8856              : 
    8857              :         // Initial split: at or below threshold is noop.
    8858            1 :         assert_eq!(
    8859            1 :             Service::compute_split_shards(ShardSplitInputs {
    8860            1 :                 shard_count: ShardCount(1),
    8861            1 :                 max_logical_size: 7,
    8862            1 :                 split_threshold: 0,
    8863            1 :                 max_split_shards: 16,
    8864            1 :                 initial_split_threshold: 8,
    8865            1 :                 initial_split_shards: 4,
    8866            1 :             }),
    8867            1 :             None,
    8868            1 :         );
    8869            1 :         assert_eq!(
    8870            1 :             Service::compute_split_shards(ShardSplitInputs {
    8871            1 :                 shard_count: ShardCount(1),
    8872            1 :                 max_logical_size: 8,
    8873            1 :                 split_threshold: 0,
    8874            1 :                 max_split_shards: 16,
    8875            1 :                 initial_split_threshold: 8,
    8876            1 :                 initial_split_shards: 4,
    8877            1 :             }),
    8878            1 :             None,
    8879            1 :         );
    8880            1 :         assert_eq!(
    8881            1 :             Service::compute_split_shards(ShardSplitInputs {
    8882            1 :                 shard_count: ShardCount(1),
    8883            1 :                 max_logical_size: 9,
    8884            1 :                 split_threshold: 0,
    8885            1 :                 max_split_shards: 16,
    8886            1 :                 initial_split_threshold: 8,
    8887            1 :                 initial_split_shards: 4,
    8888            1 :             }),
    8889            1 :             Some(ShardCount(4))
    8890            1 :         );
    8891              : 
    8892              :         // Initial split: already sharded tenant is not affected, even if above threshold and below
    8893              :         // shard count.
    8894            1 :         assert_eq!(
    8895            1 :             Service::compute_split_shards(ShardSplitInputs {
    8896            1 :                 shard_count: ShardCount(2),
    8897            1 :                 max_logical_size: 20,
    8898            1 :                 split_threshold: 0,
    8899            1 :                 max_split_shards: 16,
    8900            1 :                 initial_split_threshold: 8,
    8901            1 :                 initial_split_shards: 4,
    8902            1 :             }),
    8903            1 :             None,
    8904            1 :         );
    8905              : 
    8906              :         // Initial split: clamped to max_shards.
    8907            1 :         assert_eq!(
    8908            1 :             Service::compute_split_shards(ShardSplitInputs {
    8909            1 :                 shard_count: ShardCount(1),
    8910            1 :                 max_logical_size: 10,
    8911            1 :                 split_threshold: 0,
    8912            1 :                 max_split_shards: 3,
    8913            1 :                 initial_split_threshold: 8,
    8914            1 :                 initial_split_shards: 4,
    8915            1 :             }),
    8916            1 :             Some(ShardCount(3)),
    8917            1 :         );
    8918              : 
    8919              :         // Initial+size split: tenant eligible for both will use the larger shard count.
    8920            1 :         assert_eq!(
    8921            1 :             Service::compute_split_shards(ShardSplitInputs {
    8922            1 :                 shard_count: ShardCount(1),
    8923            1 :                 max_logical_size: 10,
    8924            1 :                 split_threshold: 64,
    8925            1 :                 max_split_shards: 16,
    8926            1 :                 initial_split_threshold: 8,
    8927            1 :                 initial_split_shards: 4,
    8928            1 :             }),
    8929            1 :             Some(ShardCount(4)),
    8930            1 :         );
    8931            1 :         assert_eq!(
    8932            1 :             Service::compute_split_shards(ShardSplitInputs {
    8933            1 :                 shard_count: ShardCount(1),
    8934            1 :                 max_logical_size: 500,
    8935            1 :                 split_threshold: 64,
    8936            1 :                 max_split_shards: 16,
    8937            1 :                 initial_split_threshold: 8,
    8938            1 :                 initial_split_shards: 4,
    8939            1 :             }),
    8940            1 :             Some(ShardCount(8)),
    8941            1 :         );
    8942              : 
    8943              :         // Initial+size split: sharded tenant is only eligible for size-based split.
    8944            1 :         assert_eq!(
    8945            1 :             Service::compute_split_shards(ShardSplitInputs {
    8946            1 :                 shard_count: ShardCount(2),
    8947            1 :                 max_logical_size: 200,
    8948            1 :                 split_threshold: 64,
    8949            1 :                 max_split_shards: 16,
    8950            1 :                 initial_split_threshold: 8,
    8951            1 :                 initial_split_shards: 8,
    8952            1 :             }),
    8953            1 :             Some(ShardCount(4)),
    8954            1 :         );
    8955              : 
    8956              :         // Initial+size split: uses the larger shard count even with initial_split_threshold above
    8957              :         // split_threshold.
    8958            1 :         assert_eq!(
    8959            1 :             Service::compute_split_shards(ShardSplitInputs {
    8960            1 :                 shard_count: ShardCount(1),
    8961            1 :                 max_logical_size: 10,
    8962            1 :                 split_threshold: 4,
    8963            1 :                 max_split_shards: 16,
    8964            1 :                 initial_split_threshold: 8,
    8965            1 :                 initial_split_shards: 8,
    8966            1 :             }),
    8967            1 :             Some(ShardCount(8)),
    8968            1 :         );
    8969              : 
    8970              :         // Test backwards compatibility with production settings when initial/size-based splits were
    8971              :         // rolled out: a single split into 8 shards at 64 GB. Any already sharded tenants with <8
    8972              :         // shards will split according to split_threshold.
    8973            1 :         assert_eq!(
    8974            1 :             Service::compute_split_shards(ShardSplitInputs {
    8975            1 :                 shard_count: ShardCount(1),
    8976            1 :                 max_logical_size: 65,
    8977            1 :                 split_threshold: 64,
    8978            1 :                 max_split_shards: 8,
    8979            1 :                 initial_split_threshold: 64,
    8980            1 :                 initial_split_shards: 8,
    8981            1 :             }),
    8982            1 :             Some(ShardCount(8)),
    8983            1 :         );
    8984              : 
    8985            1 :         assert_eq!(
    8986            1 :             Service::compute_split_shards(ShardSplitInputs {
    8987            1 :                 shard_count: ShardCount(1),
    8988            1 :                 max_logical_size: 64,
    8989            1 :                 split_threshold: 64,
    8990            1 :                 max_split_shards: 8,
    8991            1 :                 initial_split_threshold: 64,
    8992            1 :                 initial_split_shards: 8,
    8993            1 :             }),
    8994            1 :             None,
    8995            1 :         );
    8996              : 
    8997            1 :         assert_eq!(
    8998            1 :             Service::compute_split_shards(ShardSplitInputs {
    8999            1 :                 shard_count: ShardCount(2),
    9000            1 :                 max_logical_size: 129,
    9001            1 :                 split_threshold: 64,
    9002            1 :                 max_split_shards: 8,
    9003            1 :                 initial_split_threshold: 64,
    9004            1 :                 initial_split_shards: 8,
    9005            1 :             }),
    9006            1 :             Some(ShardCount(4)),
    9007            1 :         );
    9008            1 :     }
    9009              : }
        

Generated by: LCOV version 2.1-beta