LCOV - code coverage report
Current view: top level - storage_controller/src - service.rs (source / functions) Coverage Total Hit
Test: 1e20c4f2b28aa592527961bb32170ebbd2c9172f.info Lines: 4.5 % 5781 258
Test Date: 2025-07-16 12:29:03 Functions: 0.4 % 524 2

            Line data    Source code
       1              : pub mod chaos_injector;
       2              : pub mod feature_flag;
       3              : pub(crate) mod safekeeper_reconciler;
       4              : mod safekeeper_service;
       5              : mod tenant_shard_iterator;
       6              : 
       7              : use std::borrow::Cow;
       8              : use std::cmp::Ordering;
       9              : use std::collections::{BTreeMap, HashMap, HashSet};
      10              : use std::error::Error;
      11              : use std::num::NonZeroU32;
      12              : use std::ops::{Deref, DerefMut};
      13              : use std::path::PathBuf;
      14              : use std::str::FromStr;
      15              : use std::sync::{Arc, OnceLock};
      16              : use std::time::{Duration, Instant, SystemTime};
      17              : 
      18              : use anyhow::Context;
      19              : use control_plane::storage_controller::{
      20              :     AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
      21              : };
      22              : use diesel::result::DatabaseErrorKind;
      23              : use futures::StreamExt;
      24              : use futures::stream::FuturesUnordered;
      25              : use http_utils::error::ApiError;
      26              : use hyper::Uri;
      27              : use itertools::Itertools;
      28              : use pageserver_api::config::PostHogConfig;
      29              : use pageserver_api::controller_api::{
      30              :     AvailabilityZone, MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability,
      31              :     NodeRegisterRequest, NodeSchedulingPolicy, NodeShard, NodeShardResponse, PlacementPolicy,
      32              :     ShardSchedulingPolicy, ShardsPreferredAzsRequest, ShardsPreferredAzsResponse,
      33              :     SkSchedulingPolicy, TenantCreateRequest, TenantCreateResponse, TenantCreateResponseShard,
      34              :     TenantDescribeResponse, TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
      35              :     TenantShardMigrateRequest, TenantShardMigrateResponse, TenantTimelineDescribeResponse,
      36              : };
      37              : use pageserver_api::models::{
      38              :     self, DetachBehavior, LocationConfig, LocationConfigListResponse, LocationConfigMode, LsnLease,
      39              :     PageserverUtilization, SecondaryProgress, ShardImportStatus, ShardParameters, TenantConfig,
      40              :     TenantConfigPatchRequest, TenantConfigRequest, TenantLocationConfigRequest,
      41              :     TenantLocationConfigResponse, TenantShardLocation, TenantShardSplitRequest,
      42              :     TenantShardSplitResponse, TenantSorting, TenantTimeTravelRequest,
      43              :     TimelineArchivalConfigRequest, TimelineCreateRequest, TimelineCreateResponseStorcon,
      44              :     TimelineInfo, TopTenantShardItem, TopTenantShardsRequest,
      45              : };
      46              : use pageserver_api::shard::{
      47              :     DEFAULT_STRIPE_SIZE, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
      48              : };
      49              : use pageserver_api::upcall_api::{
      50              :     PutTimelineImportStatusRequest, ReAttachRequest, ReAttachResponse, ReAttachResponseTenant,
      51              :     TimelineImportStatusRequest, ValidateRequest, ValidateResponse, ValidateResponseTenant,
      52              : };
      53              : use pageserver_client::{BlockUnblock, mgmt_api};
      54              : use reqwest::{Certificate, StatusCode};
      55              : use safekeeper_api::models::SafekeeperUtilization;
      56              : use safekeeper_reconciler::SafekeeperReconcilers;
      57              : use tenant_shard_iterator::{TenantShardExclusiveIterator, create_shared_shard_iterator};
      58              : use tokio::sync::TryAcquireError;
      59              : use tokio::sync::mpsc::error::TrySendError;
      60              : use tokio_util::sync::CancellationToken;
      61              : use tracing::{Instrument, debug, error, info, info_span, instrument, warn};
      62              : use utils::completion::Barrier;
      63              : use utils::env;
      64              : use utils::generation::Generation;
      65              : use utils::id::{NodeId, TenantId, TimelineId};
      66              : use utils::lsn::Lsn;
      67              : use utils::shard::ShardIndex;
      68              : use utils::sync::gate::{Gate, GateGuard};
      69              : use utils::{failpoint_support, pausable_failpoint};
      70              : 
      71              : use crate::background_node_operations::{
      72              :     Delete, Drain, Fill, MAX_RECONCILES_PER_OPERATION, Operation, OperationError, OperationHandler,
      73              : };
      74              : use crate::compute_hook::{self, ComputeHook, NotifyError};
      75              : use crate::heartbeater::{Heartbeater, PageserverState, SafekeeperState};
      76              : use crate::id_lock_map::{
      77              :     IdLockMap, TracingExclusiveGuard, trace_exclusive_lock, trace_shared_lock,
      78              : };
      79              : use crate::leadership::Leadership;
      80              : use crate::metrics;
      81              : use crate::node::{AvailabilityTransition, Node};
      82              : use crate::operation_utils::{self, TenantShardDrain};
      83              : use crate::pageserver_client::PageserverClient;
      84              : use crate::peer_client::GlobalObservedState;
      85              : use crate::persistence::split_state::SplitState;
      86              : use crate::persistence::{
      87              :     AbortShardSplitStatus, ControllerPersistence, DatabaseError, DatabaseResult,
      88              :     MetadataHealthPersistence, Persistence, ShardGenerationState, TenantFilter,
      89              :     TenantShardPersistence,
      90              : };
      91              : use crate::reconciler::{
      92              :     ReconcileError, ReconcileUnits, ReconcilerConfig, ReconcilerConfigBuilder, ReconcilerPriority,
      93              :     attached_location_conf,
      94              : };
      95              : use crate::safekeeper::Safekeeper;
      96              : use crate::scheduler::{
      97              :     AttachedShardTag, MaySchedule, ScheduleContext, ScheduleError, ScheduleMode, Scheduler,
      98              : };
      99              : use crate::tenant_shard::{
     100              :     IntentState, MigrateAttachment, ObservedState, ObservedStateDelta, ObservedStateLocation,
     101              :     ReconcileNeeded, ReconcileResult, ReconcileWaitError, ReconcilerStatus, ReconcilerWaiter,
     102              :     ScheduleOptimization, ScheduleOptimizationAction, TenantShard,
     103              : };
     104              : use crate::timeline_import::{
     105              :     FinalizingImport, ImportResult, ShardImportStatuses, TimelineImport,
     106              :     TimelineImportFinalizeError, TimelineImportState, UpcallClient,
     107              : };
     108              : 
     109              : const WAITER_OPERATION_POLL_TIMEOUT: Duration = Duration::from_millis(500);
     110              : 
     111              : // For operations that should be quick, like attaching a new tenant
     112              : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
     113              : 
     114              : // For operations that might be slow, like migrating a tenant with
     115              : // some data in it.
     116              : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     117              : 
     118              : // If we receive a call using Secondary mode initially, it will omit generation.  We will initialize
     119              : // tenant shards into this generation, and as long as it remains in this generation, we will accept
     120              : // input generation from future requests as authoritative.
     121              : const INITIAL_GENERATION: Generation = Generation::new(0);
     122              : 
     123              : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
     124              : /// up on unresponsive pageservers and proceed.
     125              : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     126              : 
     127              : /// How long a node may be unresponsive to heartbeats before we declare it offline.
     128              : /// This must be long enough to cover node restarts as well as normal operations: in future
     129              : pub const MAX_OFFLINE_INTERVAL_DEFAULT: Duration = Duration::from_secs(30);
     130              : 
     131              : /// How long a node may be unresponsive to heartbeats during start up before we declare it
     132              : /// offline.
     133              : ///
     134              : /// This is much more lenient than [`MAX_OFFLINE_INTERVAL_DEFAULT`] since the pageserver's
     135              : /// handling of the re-attach response may take a long time and blocks heartbeats from
     136              : /// being handled on the pageserver side.
     137              : pub const MAX_WARMING_UP_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
     138              : 
     139              : /// How often to send heartbeats to registered nodes?
     140              : pub const HEARTBEAT_INTERVAL_DEFAULT: Duration = Duration::from_secs(5);
     141              : 
     142              : /// How long is too long for a reconciliation?
     143              : pub const LONG_RECONCILE_THRESHOLD_DEFAULT: Duration = Duration::from_secs(120);
     144              : 
     145              : #[derive(Clone, strum_macros::Display)]
     146              : enum TenantOperations {
     147              :     Create,
     148              :     LocationConfig,
     149              :     ConfigSet,
     150              :     ConfigPatch,
     151              :     TimeTravelRemoteStorage,
     152              :     Delete,
     153              :     UpdatePolicy,
     154              :     ShardSplit,
     155              :     SecondaryDownload,
     156              :     TimelineCreate,
     157              :     TimelineDelete,
     158              :     AttachHook,
     159              :     TimelineArchivalConfig,
     160              :     TimelineDetachAncestor,
     161              :     TimelineGcBlockUnblock,
     162              :     DropDetached,
     163              :     DownloadHeatmapLayers,
     164              :     TimelineLsnLease,
     165              :     TimelineSafekeeperMigrate,
     166              : }
     167              : 
     168              : #[derive(Clone, strum_macros::Display)]
     169              : enum NodeOperations {
     170              :     Register,
     171              :     Configure,
     172              :     Delete,
     173              :     DeleteTombstone,
     174              : }
     175              : 
     176              : /// The leadership status for the storage controller process.
     177              : /// Allowed transitions are:
     178              : /// 1. Leader -> SteppedDown
     179              : /// 2. Candidate -> Leader
     180              : #[derive(
     181              :     Eq,
     182              :     PartialEq,
     183              :     Copy,
     184              :     Clone,
     185              :     strum_macros::Display,
     186              :     strum_macros::EnumIter,
     187              :     measured::FixedCardinalityLabel,
     188              : )]
     189              : #[strum(serialize_all = "snake_case")]
     190              : pub(crate) enum LeadershipStatus {
     191              :     /// This is the steady state where the storage controller can produce
     192              :     /// side effects in the cluster.
     193              :     Leader,
     194              :     /// We've been notified to step down by another candidate. No reconciliations
     195              :     /// take place in this state.
     196              :     SteppedDown,
     197              :     /// Initial state for a new storage controller instance. Will attempt to assume leadership.
     198              :     #[allow(unused)]
     199              :     Candidate,
     200              : }
     201              : 
     202              : enum ShardGenerationValidity {
     203              :     Valid,
     204              :     Mismatched {
     205              :         claimed: Generation,
     206              :         actual: Option<Generation>,
     207              :     },
     208              : }
     209              : 
     210              : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
     211              : pub const PRIORITY_RECONCILER_CONCURRENCY_DEFAULT: usize = 256;
     212              : pub const SAFEKEEPER_RECONCILER_CONCURRENCY_DEFAULT: usize = 32;
     213              : 
     214              : // Number of consecutive reconciliation errors, occured for one shard,
     215              : // after which the shard is ignored when considering to run optimizations.
     216              : const MAX_CONSECUTIVE_RECONCILIATION_ERRORS: usize = 5;
     217              : 
     218              : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
     219              : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
     220              : // than they're being pushed onto the queue.
     221              : const MAX_DELAYED_RECONCILES: usize = 10000;
     222              : 
     223              : // Top level state available to all HTTP handlers
     224              : struct ServiceState {
     225              :     leadership_status: LeadershipStatus,
     226              : 
     227              :     tenants: BTreeMap<TenantShardId, TenantShard>,
     228              : 
     229              :     nodes: Arc<HashMap<NodeId, Node>>,
     230              : 
     231              :     safekeepers: Arc<HashMap<NodeId, Safekeeper>>,
     232              : 
     233              :     safekeeper_reconcilers: SafekeeperReconcilers,
     234              : 
     235              :     scheduler: Scheduler,
     236              : 
     237              :     /// Ongoing background operation on the cluster if any is running.
     238              :     /// Note that only one such operation may run at any given time,
     239              :     /// hence the type choice.
     240              :     ongoing_operation: Option<OperationHandler>,
     241              : 
     242              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     243              :     delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     244              : 
     245              :     /// Tracks ongoing timeline import finalization tasks
     246              :     imports_finalizing: BTreeMap<(TenantId, TimelineId), FinalizingImport>,
     247              : }
     248              : 
     249              : /// Transform an error from a pageserver into an error to return to callers of a storage
     250              : /// controller API.
     251            0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
     252            0 :     match e {
     253            0 :         mgmt_api::Error::SendRequest(e) => {
     254              :             // Presume errors sending requests are connectivity/availability issues
     255            0 :             ApiError::ResourceUnavailable(format!("{node} error sending request: {e}").into())
     256              :         }
     257            0 :         mgmt_api::Error::ReceiveErrorBody(str) => {
     258              :             // Presume errors receiving body are connectivity/availability issues
     259            0 :             ApiError::ResourceUnavailable(
     260            0 :                 format!("{node} error receiving error body: {str}").into(),
     261            0 :             )
     262              :         }
     263            0 :         mgmt_api::Error::ReceiveBody(err) if err.is_decode() => {
     264              :             // Return 500 for decoding errors.
     265            0 :             ApiError::InternalServerError(anyhow::Error::from(err).context("error decoding body"))
     266              :         }
     267            0 :         mgmt_api::Error::ReceiveBody(err) => {
     268              :             // Presume errors receiving body are connectivity/availability issues except for decoding errors
     269            0 :             let src_str = err.source().map(|e| e.to_string()).unwrap_or_default();
     270            0 :             ApiError::ResourceUnavailable(
     271            0 :                 format!("{node} error receiving error body: {err} {src_str}").into(),
     272            0 :             )
     273              :         }
     274            0 :         mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
     275            0 :             ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
     276              :         }
     277            0 :         mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
     278            0 :             ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
     279              :         }
     280            0 :         mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
     281            0 :         | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
     282              :             // Auth errors talking to a pageserver are not auth errors for the caller: they are
     283              :             // internal server errors, showing that something is wrong with the pageserver or
     284              :             // storage controller's auth configuration.
     285            0 :             ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
     286              :         }
     287            0 :         mgmt_api::Error::ApiError(status @ StatusCode::TOO_MANY_REQUESTS, msg) => {
     288              :             // Pass through 429 errors: if pageserver is asking us to wait + retry, we in
     289              :             // turn ask our clients to wait + retry
     290            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     291              :         }
     292            0 :         mgmt_api::Error::ApiError(status, msg) => {
     293              :             // Presume general case of pageserver API errors is that we tried to do something
     294              :             // that can't be done right now.
     295            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     296              :         }
     297            0 :         mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
     298            0 :         mgmt_api::Error::Timeout(e) => ApiError::Timeout(e.into()),
     299              :     }
     300            0 : }
     301              : 
     302              : impl ServiceState {
     303            0 :     fn new(
     304            0 :         nodes: HashMap<NodeId, Node>,
     305            0 :         safekeepers: HashMap<NodeId, Safekeeper>,
     306            0 :         tenants: BTreeMap<TenantShardId, TenantShard>,
     307            0 :         scheduler: Scheduler,
     308            0 :         delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     309            0 :         initial_leadership_status: LeadershipStatus,
     310            0 :         reconcilers_cancel: CancellationToken,
     311            0 :     ) -> Self {
     312            0 :         metrics::update_leadership_status(initial_leadership_status);
     313              : 
     314            0 :         Self {
     315            0 :             leadership_status: initial_leadership_status,
     316            0 :             tenants,
     317            0 :             nodes: Arc::new(nodes),
     318            0 :             safekeepers: Arc::new(safekeepers),
     319            0 :             safekeeper_reconcilers: SafekeeperReconcilers::new(reconcilers_cancel),
     320            0 :             scheduler,
     321            0 :             ongoing_operation: None,
     322            0 :             delayed_reconcile_rx,
     323            0 :             imports_finalizing: Default::default(),
     324            0 :         }
     325            0 :     }
     326              : 
     327            0 :     fn parts_mut(
     328            0 :         &mut self,
     329            0 :     ) -> (
     330            0 :         &mut Arc<HashMap<NodeId, Node>>,
     331            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     332            0 :         &mut Scheduler,
     333            0 :     ) {
     334            0 :         (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
     335            0 :     }
     336              : 
     337              :     #[allow(clippy::type_complexity)]
     338            0 :     fn parts_mut_sk(
     339            0 :         &mut self,
     340            0 :     ) -> (
     341            0 :         &mut Arc<HashMap<NodeId, Node>>,
     342            0 :         &mut Arc<HashMap<NodeId, Safekeeper>>,
     343            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     344            0 :         &mut Scheduler,
     345            0 :     ) {
     346            0 :         (
     347            0 :             &mut self.nodes,
     348            0 :             &mut self.safekeepers,
     349            0 :             &mut self.tenants,
     350            0 :             &mut self.scheduler,
     351            0 :         )
     352            0 :     }
     353              : 
     354            0 :     fn get_leadership_status(&self) -> LeadershipStatus {
     355            0 :         self.leadership_status
     356            0 :     }
     357              : 
     358            0 :     fn step_down(&mut self) {
     359            0 :         self.leadership_status = LeadershipStatus::SteppedDown;
     360            0 :         metrics::update_leadership_status(self.leadership_status);
     361            0 :     }
     362              : 
     363            0 :     fn become_leader(&mut self) {
     364            0 :         self.leadership_status = LeadershipStatus::Leader;
     365            0 :         metrics::update_leadership_status(self.leadership_status);
     366            0 :     }
     367              : }
     368              : 
     369              : #[derive(Clone)]
     370              : pub struct Config {
     371              :     // All pageservers managed by one instance of this service must have
     372              :     // the same public key.  This JWT token will be used to authenticate
     373              :     // this service to the pageservers it manages.
     374              :     pub pageserver_jwt_token: Option<String>,
     375              : 
     376              :     // All safekeepers managed by one instance of this service must have
     377              :     // the same public key. This JWT token will be used to authenticate
     378              :     // this service to the safekeepers it manages.
     379              :     pub safekeeper_jwt_token: Option<String>,
     380              : 
     381              :     // This JWT token will be used to authenticate this service to the control plane.
     382              :     pub control_plane_jwt_token: Option<String>,
     383              : 
     384              :     // This JWT token will be used to authenticate with other storage controller instances
     385              :     pub peer_jwt_token: Option<String>,
     386              : 
     387              :     /// Prefix for storage API endpoints of the control plane. We use this prefix to compute
     388              :     /// URLs that we use to send pageserver and safekeeper attachment locations.
     389              :     /// If this is None, the compute hook will assume it is running in a test environment
     390              :     /// and try to invoke neon_local instead.
     391              :     pub control_plane_url: Option<String>,
     392              : 
     393              :     /// Grace period within which a pageserver does not respond to heartbeats, but is still
     394              :     /// considered active. Once the grace period elapses, the next heartbeat failure will
     395              :     /// mark the pagseserver offline.
     396              :     pub max_offline_interval: Duration,
     397              : 
     398              :     /// Extended grace period within which pageserver may not respond to heartbeats.
     399              :     /// This extended grace period kicks in after the node has been drained for restart
     400              :     /// and/or upon handling the re-attach request from a node.
     401              :     pub max_warming_up_interval: Duration,
     402              : 
     403              :     /// How many normal-priority Reconcilers may be spawned concurrently
     404              :     pub reconciler_concurrency: usize,
     405              : 
     406              :     /// How many high-priority Reconcilers may be spawned concurrently
     407              :     pub priority_reconciler_concurrency: usize,
     408              : 
     409              :     /// How many safekeeper reconciles may happen concurrently (per safekeeper)
     410              :     pub safekeeper_reconciler_concurrency: usize,
     411              : 
     412              :     /// How many API requests per second to allow per tenant, across all
     413              :     /// tenant-scoped API endpoints. Further API requests queue until ready.
     414              :     pub tenant_rate_limit: NonZeroU32,
     415              : 
     416              :     /// If a tenant shard's largest timeline (max_logical_size) exceeds this value, all tenant
     417              :     /// shards will be split in 2 until they fall below split_threshold (up to max_split_shards).
     418              :     ///
     419              :     /// This will greedily split into as many shards as necessary to fall below split_threshold, as
     420              :     /// powers of 2: if a tenant shard is 7 times larger than split_threshold, it will split into 8
     421              :     /// immediately, rather than first 2 then 4 then 8.
     422              :     ///
     423              :     /// None or 0 disables auto-splitting.
     424              :     ///
     425              :     /// TODO: consider using total logical size of all timelines instead.
     426              :     pub split_threshold: Option<u64>,
     427              : 
     428              :     /// The maximum number of shards a tenant can be split into during autosplits. Does not affect
     429              :     /// manual split requests. 0 or 1 disables autosplits, as we already have 1 shard.
     430              :     pub max_split_shards: u8,
     431              : 
     432              :     /// The size at which an unsharded tenant should initially split. Ingestion is significantly
     433              :     /// faster with multiple shards, so eagerly splitting below split_threshold will typically speed
     434              :     /// up initial ingestion of large tenants.
     435              :     ///
     436              :     /// This should be below split_threshold, but it is not required. If both split_threshold and
     437              :     /// initial_split_threshold qualify, the largest number of target shards will be used.
     438              :     ///
     439              :     /// Does not apply to already sharded tenants: changing initial_split_threshold or
     440              :     /// initial_split_shards is not retroactive for already-sharded tenants.
     441              :     ///
     442              :     /// None or 0 disables initial splits.
     443              :     pub initial_split_threshold: Option<u64>,
     444              : 
     445              :     /// The number of shards to split into when reaching initial_split_threshold. Will
     446              :     /// be clamped to max_split_shards.
     447              :     ///
     448              :     /// 0 or 1 disables initial splits. Has no effect if initial_split_threshold is disabled.
     449              :     pub initial_split_shards: u8,
     450              : 
     451              :     // TODO: make this cfg(feature  = "testing")
     452              :     pub neon_local_repo_dir: Option<PathBuf>,
     453              : 
     454              :     // Maximum acceptable download lag for the secondary location
     455              :     // while draining a node. If the secondary location is lagging
     456              :     // by more than the configured amount, then the secondary is not
     457              :     // upgraded to primary.
     458              :     pub max_secondary_lag_bytes: Option<u64>,
     459              : 
     460              :     pub heartbeat_interval: Duration,
     461              : 
     462              :     pub address_for_peers: Option<Uri>,
     463              : 
     464              :     pub start_as_candidate: bool,
     465              : 
     466              :     pub long_reconcile_threshold: Duration,
     467              : 
     468              :     pub use_https_pageserver_api: bool,
     469              : 
     470              :     pub use_https_safekeeper_api: bool,
     471              : 
     472              :     pub ssl_ca_certs: Vec<Certificate>,
     473              : 
     474              :     pub timelines_onto_safekeepers: bool,
     475              : 
     476              :     pub use_local_compute_notifications: bool,
     477              : 
     478              :     /// Number of safekeepers to choose for a timeline when creating it.
     479              :     /// Safekeepers will be choosen from different availability zones.
     480              :     pub timeline_safekeeper_count: usize,
     481              : 
     482              :     /// PostHog integration config
     483              :     pub posthog_config: Option<PostHogConfig>,
     484              : 
     485              :     /// When set, actively checks and initiates heatmap downloads/uploads.
     486              :     pub kick_secondary_downloads: bool,
     487              : 
     488              :     /// Timeout used for HTTP client of split requests. [`Duration::MAX`] if None.
     489              :     pub shard_split_request_timeout: Duration,
     490              : }
     491              : 
     492              : impl From<DatabaseError> for ApiError {
     493            0 :     fn from(err: DatabaseError) -> ApiError {
     494            0 :         match err {
     495            0 :             DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
     496              :             // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
     497              :             DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
     498            0 :                 ApiError::ShuttingDown
     499              :             }
     500            0 :             DatabaseError::Logical(reason) | DatabaseError::Migration(reason) => {
     501            0 :                 ApiError::InternalServerError(anyhow::anyhow!(reason))
     502              :             }
     503            0 :             DatabaseError::Cas(reason) => ApiError::Conflict(reason),
     504              :         }
     505            0 :     }
     506              : }
     507              : 
     508              : enum InitialShardScheduleOutcome {
     509              :     Scheduled(TenantCreateResponseShard),
     510              :     NotScheduled,
     511              :     ShardScheduleError(ScheduleError),
     512              : }
     513              : 
     514              : pub struct Service {
     515              :     inner: Arc<std::sync::RwLock<ServiceState>>,
     516              :     config: Config,
     517              :     persistence: Arc<Persistence>,
     518              :     compute_hook: Arc<ComputeHook>,
     519              :     result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResultRequest>,
     520              : 
     521              :     heartbeater_ps: Heartbeater<Node, PageserverState>,
     522              :     heartbeater_sk: Heartbeater<Safekeeper, SafekeeperState>,
     523              : 
     524              :     // Channel for background cleanup from failed operations that require cleanup, such as shard split
     525              :     abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
     526              : 
     527              :     // Locking on a tenant granularity (covers all shards in the tenant):
     528              :     // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
     529              :     // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
     530              :     tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
     531              : 
     532              :     // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
     533              :     // that transition it to/from Active.
     534              :     node_op_locks: IdLockMap<NodeId, NodeOperations>,
     535              : 
     536              :     // Limit how many Reconcilers we will spawn concurrently for normal-priority tasks such as background reconciliations
     537              :     // and reconciliation on startup.
     538              :     reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     539              : 
     540              :     // Limit how many Reconcilers we will spawn concurrently for high-priority tasks such as tenant/timeline CRUD, which
     541              :     // a human user might be waiting for.
     542              :     priority_reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     543              : 
     544              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     545              :     /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
     546              :     ///
     547              :     /// Note that this state logically lives inside ServiceState, but carrying Sender here makes the code simpler
     548              :     /// by avoiding needing a &mut ref to something inside the ServiceState.  This could be optimized to
     549              :     /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
     550              :     delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
     551              : 
     552              :     // Process shutdown will fire this token
     553              :     cancel: CancellationToken,
     554              : 
     555              :     // Child token of [`Service::cancel`] used by reconcilers
     556              :     reconcilers_cancel: CancellationToken,
     557              : 
     558              :     // Background tasks will hold this gate
     559              :     gate: Gate,
     560              : 
     561              :     // Reconcilers background tasks will hold this gate
     562              :     reconcilers_gate: Gate,
     563              : 
     564              :     /// This waits for initial reconciliation with pageservers to complete.  Until this barrier
     565              :     /// passes, it isn't safe to do any actions that mutate tenants.
     566              :     pub(crate) startup_complete: Barrier,
     567              : 
     568              :     /// HTTP client with proper CA certs.
     569              :     http_client: reqwest::Client,
     570              : 
     571              :     /// Handle for the step down background task if one was ever requested
     572              :     step_down_barrier: OnceLock<tokio::sync::watch::Receiver<Option<GlobalObservedState>>>,
     573              : }
     574              : 
     575              : impl From<ReconcileWaitError> for ApiError {
     576            0 :     fn from(value: ReconcileWaitError) -> Self {
     577            0 :         match value {
     578            0 :             ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
     579            0 :             e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
     580            0 :             e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
     581              :         }
     582            0 :     }
     583              : }
     584              : 
     585              : impl From<OperationError> for ApiError {
     586            0 :     fn from(value: OperationError) -> Self {
     587            0 :         match value {
     588            0 :             OperationError::NodeStateChanged(err)
     589            0 :             | OperationError::FinalizeError(err)
     590            0 :             | OperationError::ImpossibleConstraint(err) => {
     591            0 :                 ApiError::InternalServerError(anyhow::anyhow!(err))
     592              :             }
     593            0 :             OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()),
     594              :         }
     595            0 :     }
     596              : }
     597              : 
     598              : #[allow(clippy::large_enum_variant)]
     599              : enum TenantCreateOrUpdate {
     600              :     Create(TenantCreateRequest),
     601              :     Update(Vec<ShardUpdate>),
     602              : }
     603              : 
     604              : struct ShardSplitParams {
     605              :     old_shard_count: ShardCount,
     606              :     new_shard_count: ShardCount,
     607              :     new_stripe_size: Option<ShardStripeSize>,
     608              :     targets: Vec<ShardSplitTarget>,
     609              :     policy: PlacementPolicy,
     610              :     config: TenantConfig,
     611              :     shard_ident: ShardIdentity,
     612              :     preferred_az_id: Option<AvailabilityZone>,
     613              : }
     614              : 
     615              : // When preparing for a shard split, we may either choose to proceed with the split,
     616              : // or find that the work is already done and return NoOp.
     617              : enum ShardSplitAction {
     618              :     Split(Box<ShardSplitParams>),
     619              :     NoOp(TenantShardSplitResponse),
     620              : }
     621              : 
     622              : // A parent shard which will be split
     623              : struct ShardSplitTarget {
     624              :     parent_id: TenantShardId,
     625              :     node: Node,
     626              :     child_ids: Vec<TenantShardId>,
     627              : }
     628              : 
     629              : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
     630              : /// might not be available.  We therefore use a queue of abort operations processed in the background.
     631              : struct TenantShardSplitAbort {
     632              :     tenant_id: TenantId,
     633              :     /// The target values from the request that failed
     634              :     new_shard_count: ShardCount,
     635              :     new_stripe_size: Option<ShardStripeSize>,
     636              :     /// Until this abort op is complete, no other operations may be done on the tenant
     637              :     _tenant_lock: TracingExclusiveGuard<TenantOperations>,
     638              :     /// The reconciler gate for the duration of the split operation, and any included abort.
     639              :     _gate: GateGuard,
     640              : }
     641              : 
     642              : #[derive(thiserror::Error, Debug)]
     643              : enum TenantShardSplitAbortError {
     644              :     #[error(transparent)]
     645              :     Database(#[from] DatabaseError),
     646              :     #[error(transparent)]
     647              :     Remote(#[from] mgmt_api::Error),
     648              :     #[error("Unavailable")]
     649              :     Unavailable,
     650              : }
     651              : 
     652              : /// Inputs for computing a target shard count for a tenant.
     653              : struct ShardSplitInputs {
     654              :     /// Current shard count.
     655              :     shard_count: ShardCount,
     656              :     /// Total size of largest timeline summed across all shards.
     657              :     max_logical_size: u64,
     658              :     /// Size-based split threshold. Zero if size-based splits are disabled.
     659              :     split_threshold: u64,
     660              :     /// Upper bound on target shards. 0 or 1 disables splits.
     661              :     max_split_shards: u8,
     662              :     /// Initial split threshold. Zero if initial splits are disabled.
     663              :     initial_split_threshold: u64,
     664              :     /// Number of shards for initial splits. 0 or 1 disables initial splits.
     665              :     initial_split_shards: u8,
     666              : }
     667              : 
     668              : struct ShardUpdate {
     669              :     tenant_shard_id: TenantShardId,
     670              :     placement_policy: PlacementPolicy,
     671              :     tenant_config: TenantConfig,
     672              : 
     673              :     /// If this is None, generation is not updated.
     674              :     generation: Option<Generation>,
     675              : 
     676              :     /// If this is None, scheduling policy is not updated.
     677              :     scheduling_policy: Option<ShardSchedulingPolicy>,
     678              : }
     679              : 
     680              : enum StopReconciliationsReason {
     681              :     ShuttingDown,
     682              :     SteppingDown,
     683              : }
     684              : 
     685              : impl std::fmt::Display for StopReconciliationsReason {
     686            0 :     fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result {
     687            0 :         let s = match self {
     688            0 :             Self::ShuttingDown => "Shutting down",
     689            0 :             Self::SteppingDown => "Stepping down",
     690              :         };
     691            0 :         write!(writer, "{s}")
     692            0 :     }
     693              : }
     694              : 
     695              : pub(crate) enum ReconcileResultRequest {
     696              :     ReconcileResult(ReconcileResult),
     697              :     Stop,
     698              : }
     699              : 
     700              : #[derive(Clone)]
     701              : struct MutationLocation {
     702              :     node: Node,
     703              :     generation: Generation,
     704              : }
     705              : 
     706              : #[derive(Clone)]
     707              : struct ShardMutationLocations {
     708              :     latest: MutationLocation,
     709              :     other: Vec<MutationLocation>,
     710              : }
     711              : 
     712              : #[derive(Default, Clone)]
     713              : struct TenantMutationLocations(BTreeMap<TenantShardId, ShardMutationLocations>);
     714              : 
     715              : struct ReconcileAllResult {
     716              :     spawned_reconciles: usize,
     717              :     keep_failing_reconciles: usize,
     718              :     has_delayed_reconciles: bool,
     719              : }
     720              : 
     721              : impl ReconcileAllResult {
     722            0 :     fn new(
     723            0 :         spawned_reconciles: usize,
     724            0 :         keep_failing_reconciles: usize,
     725            0 :         has_delayed_reconciles: bool,
     726            0 :     ) -> Self {
     727            0 :         assert!(
     728            0 :             spawned_reconciles >= keep_failing_reconciles,
     729            0 :             "It is impossible to have more keep-failing reconciles than spawned reconciles"
     730              :         );
     731            0 :         Self {
     732            0 :             spawned_reconciles,
     733            0 :             keep_failing_reconciles,
     734            0 :             has_delayed_reconciles,
     735            0 :         }
     736            0 :     }
     737              : 
     738              :     /// We can run optimizations only if we don't have any delayed reconciles and
     739              :     /// all spawned reconciles are also keep-failing reconciles.
     740            0 :     fn can_run_optimizations(&self) -> bool {
     741            0 :         !self.has_delayed_reconciles && self.spawned_reconciles == self.keep_failing_reconciles
     742            0 :     }
     743              : }
     744              : 
     745              : impl Service {
     746            0 :     pub fn get_config(&self) -> &Config {
     747            0 :         &self.config
     748            0 :     }
     749              : 
     750            0 :     pub fn get_http_client(&self) -> &reqwest::Client {
     751            0 :         &self.http_client
     752            0 :     }
     753              : 
     754              :     /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
     755              :     /// view of the world, and determine which pageservers are responsive.
     756              :     #[instrument(skip_all)]
     757              :     async fn startup_reconcile(
     758              :         self: &Arc<Service>,
     759              :         current_leader: Option<ControllerPersistence>,
     760              :         leader_step_down_state: Option<GlobalObservedState>,
     761              :         bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
     762              :             Result<(), (TenantShardId, NotifyError)>,
     763              :         >,
     764              :     ) {
     765              :         // Startup reconciliation does I/O to other services: whether they
     766              :         // are responsive or not, we should aim to finish within our deadline, because:
     767              :         // - If we don't, a k8s readiness hook watching /ready will kill us.
     768              :         // - While we're waiting for startup reconciliation, we are not fully
     769              :         //   available for end user operations like creating/deleting tenants and timelines.
     770              :         //
     771              :         // We set multiple deadlines to break up the time available between the phases of work: this is
     772              :         // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
     773              :         let start_at = Instant::now();
     774              :         let node_scan_deadline = start_at
     775              :             .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
     776              :             .expect("Reconcile timeout is a modest constant");
     777              : 
     778              :         let observed = if let Some(state) = leader_step_down_state {
     779              :             tracing::info!(
     780              :                 "Using observed state received from leader at {}",
     781              :                 current_leader.as_ref().unwrap().address
     782              :             );
     783              : 
     784              :             state
     785              :         } else {
     786              :             self.build_global_observed_state(node_scan_deadline).await
     787              :         };
     788              : 
     789              :         // Accumulate a list of any tenant locations that ought to be detached
     790              :         let mut cleanup = Vec::new();
     791              : 
     792              :         // Send initial heartbeat requests to all nodes loaded from the database
     793              :         let all_nodes = {
     794              :             let locked = self.inner.read().unwrap();
     795              :             locked.nodes.clone()
     796              :         };
     797              :         let (mut nodes_online, mut sks_online) =
     798              :             self.initial_heartbeat_round(all_nodes.keys()).await;
     799              : 
     800              :         // List of tenants for which we will attempt to notify compute of their location at startup
     801              :         let mut compute_notifications = Vec::new();
     802              : 
     803              :         // Populate intent and observed states for all tenants, based on reported state on pageservers
     804              :         tracing::info!("Populating tenant shards' states from initial pageserver scan...");
     805              :         let shard_count = {
     806              :             let mut locked = self.inner.write().unwrap();
     807              :             let (nodes, safekeepers, tenants, scheduler) = locked.parts_mut_sk();
     808              : 
     809              :             // Mark nodes online if they responded to us: nodes are offline by default after a restart.
     810              :             let mut new_nodes = (**nodes).clone();
     811              :             for (node_id, node) in new_nodes.iter_mut() {
     812              :                 if let Some(utilization) = nodes_online.remove(node_id) {
     813              :                     node.set_availability(NodeAvailability::Active(utilization));
     814              :                     scheduler.node_upsert(node);
     815              :                 }
     816              :             }
     817              :             *nodes = Arc::new(new_nodes);
     818              : 
     819              :             let mut new_sks = (**safekeepers).clone();
     820              :             for (node_id, node) in new_sks.iter_mut() {
     821              :                 if let Some((utilization, last_seen_at)) = sks_online.remove(node_id) {
     822              :                     node.set_availability(SafekeeperState::Available {
     823              :                         utilization,
     824              :                         last_seen_at,
     825              :                     });
     826              :                 }
     827              :             }
     828              :             *safekeepers = Arc::new(new_sks);
     829              : 
     830              :             for (tenant_shard_id, observed_state) in observed.0 {
     831              :                 let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
     832              :                     for node_id in observed_state.locations.keys() {
     833              :                         cleanup.push((tenant_shard_id, *node_id));
     834              :                     }
     835              : 
     836              :                     continue;
     837              :                 };
     838              : 
     839              :                 tenant_shard.observed = observed_state;
     840              :             }
     841              : 
     842              :             // Populate each tenant's intent state
     843              :             let mut schedule_context = ScheduleContext::default();
     844              :             for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
     845              :                 if tenant_shard_id.shard_number == ShardNumber(0) {
     846              :                     // Reset scheduling context each time we advance to the next Tenant
     847              :                     schedule_context = ScheduleContext::default();
     848              :                 }
     849              : 
     850              :                 tenant_shard.intent_from_observed(scheduler);
     851              :                 if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
     852              :                     // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
     853              :                     // not enough pageservers are available.  The tenant may well still be available
     854              :                     // to clients.
     855              :                     tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
     856              :                 } else {
     857              :                     // If we're both intending and observed to be attached at a particular node, we will
     858              :                     // emit a compute notification for this. In the case where our observed state does not
     859              :                     // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
     860              :                     if let Some(attached_at) = tenant_shard.stably_attached() {
     861              :                         compute_notifications.push(compute_hook::ShardUpdate {
     862              :                             tenant_shard_id: *tenant_shard_id,
     863              :                             node_id: attached_at,
     864              :                             stripe_size: tenant_shard.shard.stripe_size,
     865              :                             preferred_az: tenant_shard
     866              :                                 .preferred_az()
     867            0 :                                 .map(|az| Cow::Owned(az.clone())),
     868              :                         });
     869              :                     }
     870              :                 }
     871              :             }
     872              : 
     873              :             tenants.len()
     874              :         };
     875              : 
     876              :         // Before making any obeservable changes to the cluster, persist self
     877              :         // as leader in database and memory.
     878              :         let leadership = Leadership::new(
     879              :             self.persistence.clone(),
     880              :             self.config.clone(),
     881              :             self.cancel.child_token(),
     882              :         );
     883              : 
     884              :         if let Err(e) = leadership.become_leader(current_leader).await {
     885              :             tracing::error!("Failed to persist self as leader: {e}. Aborting start-up ...");
     886              :             std::process::exit(1);
     887              :         }
     888              : 
     889              :         let safekeepers = self.inner.read().unwrap().safekeepers.clone();
     890              :         let sk_schedule_requests =
     891              :             match safekeeper_reconciler::load_schedule_requests(self, &safekeepers).await {
     892              :                 Ok(v) => v,
     893              :                 Err(e) => {
     894              :                     tracing::warn!(
     895              :                         "Failed to load safekeeper pending ops at startup: {e}." // Don't abort for now: " Aborting start-up..."
     896              :                     );
     897              :                     // std::process::exit(1);
     898              :                     Vec::new()
     899              :                 }
     900              :             };
     901              : 
     902              :         {
     903              :             let mut locked = self.inner.write().unwrap();
     904              :             locked.become_leader();
     905              : 
     906              :             for (sk_id, _sk) in locked.safekeepers.clone().iter() {
     907              :                 locked.safekeeper_reconcilers.start_reconciler(*sk_id, self);
     908              :             }
     909              : 
     910              :             locked
     911              :                 .safekeeper_reconcilers
     912              :                 .schedule_request_vec(sk_schedule_requests);
     913              :         }
     914              : 
     915              :         // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
     916              :         // generation_pageserver in the database.
     917              : 
     918              :         // Emit compute hook notifications for all tenants which are already stably attached.  Other tenants
     919              :         // will emit compute hook notifications when they reconcile.
     920              :         //
     921              :         // Ordering: our calls to notify_attach_background synchronously establish a relative order for these notifications vs. any later
     922              :         // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
     923              :         // calls will be correctly ordered wrt these.
     924              :         //
     925              :         // Concurrency: we call notify_attach_background for all tenants, which will create O(N) tokio tasks, but almost all of them
     926              :         // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
     927              :         // unit and start doing I/O.
     928              :         tracing::info!(
     929              :             "Sending {} compute notifications",
     930              :             compute_notifications.len()
     931              :         );
     932              :         self.compute_hook.notify_attach_background(
     933              :             compute_notifications,
     934              :             bg_compute_notify_result_tx.clone(),
     935              :             &self.cancel,
     936              :         );
     937              : 
     938              :         // Finally, now that the service is up and running, launch reconcile operations for any tenants
     939              :         // which require it: under normal circumstances this should only include tenants that were in some
     940              :         // transient state before we restarted, or any tenants whose compute hooks failed above.
     941              :         tracing::info!("Checking for shards in need of reconciliation...");
     942              :         let reconcile_all_result = self.reconcile_all();
     943              :         // We will not wait for these reconciliation tasks to run here: we're now done with startup and
     944              :         // normal operations may proceed.
     945              : 
     946              :         // Clean up any tenants that were found on pageservers but are not known to us.  Do this in the
     947              :         // background because it does not need to complete in order to proceed with other work.
     948              :         if !cleanup.is_empty() {
     949              :             tracing::info!("Cleaning up {} locations in the background", cleanup.len());
     950              :             tokio::task::spawn({
     951              :                 let cleanup_self = self.clone();
     952            0 :                 async move { cleanup_self.cleanup_locations(cleanup).await }
     953              :             });
     954              :         }
     955              : 
     956              :         // Reconcile the timeline imports:
     957              :         // 1. Mark each tenant shard of tenants with an importing timeline as importing.
     958              :         // 2. Finalize the completed imports in the background. This handles the case where
     959              :         //    the previous storage controller instance shut down whilst finalizing imports.
     960              :         let imports = self.persistence.list_timeline_imports().await;
     961              :         match imports {
     962              :             Ok(mut imports) => {
     963              :                 {
     964              :                     let mut locked = self.inner.write().unwrap();
     965              :                     for import in &imports {
     966              :                         locked
     967              :                             .tenants
     968              :                             .range_mut(TenantShardId::tenant_range(import.tenant_id))
     969            0 :                             .for_each(|(_id, shard)| {
     970            0 :                                 shard.importing = TimelineImportState::Importing
     971            0 :                             });
     972              :                     }
     973              :                 }
     974              : 
     975            0 :                 imports.retain(|import| import.is_complete());
     976              :                 tokio::task::spawn({
     977              :                     let finalize_imports_self = self.clone();
     978            0 :                     async move {
     979            0 :                         finalize_imports_self
     980            0 :                             .finalize_timeline_imports(imports)
     981            0 :                             .await
     982            0 :                     }
     983              :                 });
     984              :             }
     985              :             Err(err) => {
     986              :                 tracing::error!("Could not retrieve completed imports from database: {err}");
     987              :             }
     988              :         }
     989              : 
     990              :         let spawned_reconciles = reconcile_all_result.spawned_reconciles;
     991              :         tracing::info!(
     992              :             "Startup complete, spawned {spawned_reconciles} reconciliation tasks ({shard_count} shards total)"
     993              :         );
     994              :     }
     995              : 
     996            0 :     async fn initial_heartbeat_round<'a>(
     997            0 :         &self,
     998            0 :         node_ids: impl Iterator<Item = &'a NodeId>,
     999            0 :     ) -> (
    1000            0 :         HashMap<NodeId, PageserverUtilization>,
    1001            0 :         HashMap<NodeId, (SafekeeperUtilization, Instant)>,
    1002            0 :     ) {
    1003            0 :         assert!(!self.startup_complete.is_ready());
    1004              : 
    1005            0 :         let all_nodes = {
    1006            0 :             let locked = self.inner.read().unwrap();
    1007            0 :             locked.nodes.clone()
    1008              :         };
    1009              : 
    1010            0 :         let mut nodes_to_heartbeat = HashMap::new();
    1011            0 :         for node_id in node_ids {
    1012            0 :             match all_nodes.get(node_id) {
    1013            0 :                 Some(node) => {
    1014            0 :                     nodes_to_heartbeat.insert(*node_id, node.clone());
    1015            0 :                 }
    1016              :                 None => {
    1017            0 :                     tracing::warn!("Node {node_id} was removed during start-up");
    1018              :                 }
    1019              :             }
    1020              :         }
    1021              : 
    1022            0 :         let all_sks = {
    1023            0 :             let locked = self.inner.read().unwrap();
    1024            0 :             locked.safekeepers.clone()
    1025              :         };
    1026              : 
    1027            0 :         tracing::info!("Sending initial heartbeats...");
    1028            0 :         let (res_ps, res_sk) = tokio::join!(
    1029            0 :             self.heartbeater_ps.heartbeat(Arc::new(nodes_to_heartbeat)),
    1030            0 :             self.heartbeater_sk.heartbeat(all_sks)
    1031              :         );
    1032              : 
    1033            0 :         let mut online_nodes = HashMap::new();
    1034            0 :         if let Ok(deltas) = res_ps {
    1035            0 :             for (node_id, status) in deltas.0 {
    1036            0 :                 match status {
    1037            0 :                     PageserverState::Available { utilization, .. } => {
    1038            0 :                         online_nodes.insert(node_id, utilization);
    1039            0 :                     }
    1040            0 :                     PageserverState::Offline => {}
    1041              :                     PageserverState::WarmingUp { .. } => {
    1042            0 :                         unreachable!("Nodes are never marked warming-up during startup reconcile")
    1043              :                     }
    1044              :                 }
    1045              :             }
    1046            0 :         }
    1047              : 
    1048            0 :         let mut online_sks = HashMap::new();
    1049            0 :         if let Ok(deltas) = res_sk {
    1050            0 :             for (node_id, status) in deltas.0 {
    1051            0 :                 match status {
    1052              :                     SafekeeperState::Available {
    1053            0 :                         utilization,
    1054            0 :                         last_seen_at,
    1055            0 :                     } => {
    1056            0 :                         online_sks.insert(node_id, (utilization, last_seen_at));
    1057            0 :                     }
    1058            0 :                     SafekeeperState::Offline => {}
    1059              :                 }
    1060              :             }
    1061            0 :         }
    1062              : 
    1063            0 :         (online_nodes, online_sks)
    1064            0 :     }
    1065              : 
    1066              :     /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
    1067              :     ///
    1068              :     /// The result includes only nodes which responded within the deadline
    1069            0 :     async fn scan_node_locations(
    1070            0 :         &self,
    1071            0 :         deadline: Instant,
    1072            0 :     ) -> HashMap<NodeId, LocationConfigListResponse> {
    1073            0 :         let nodes = {
    1074            0 :             let locked = self.inner.read().unwrap();
    1075            0 :             locked.nodes.clone()
    1076              :         };
    1077              : 
    1078            0 :         let mut node_results = HashMap::new();
    1079              : 
    1080            0 :         let mut node_list_futs = FuturesUnordered::new();
    1081              : 
    1082            0 :         tracing::info!("Scanning shards on {} nodes...", nodes.len());
    1083            0 :         for node in nodes.values() {
    1084            0 :             node_list_futs.push({
    1085            0 :                 async move {
    1086            0 :                     tracing::info!("Scanning shards on node {node}...");
    1087            0 :                     let timeout = Duration::from_secs(5);
    1088            0 :                     let response = node
    1089            0 :                         .with_client_retries(
    1090            0 :                             |client| async move { client.list_location_config().await },
    1091            0 :                             &self.http_client,
    1092            0 :                             &self.config.pageserver_jwt_token,
    1093              :                             1,
    1094              :                             5,
    1095            0 :                             timeout,
    1096            0 :                             &self.cancel,
    1097              :                         )
    1098            0 :                         .await;
    1099            0 :                     (node.get_id(), response)
    1100            0 :                 }
    1101              :             });
    1102              :         }
    1103              : 
    1104              :         loop {
    1105            0 :             let (node_id, result) = tokio::select! {
    1106            0 :                 next = node_list_futs.next() => {
    1107            0 :                     match next {
    1108            0 :                         Some(result) => result,
    1109              :                         None =>{
    1110              :                             // We got results for all our nodes
    1111            0 :                             break;
    1112              :                         }
    1113              : 
    1114              :                     }
    1115              :                 },
    1116            0 :                 _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
    1117              :                     // Give up waiting for anyone who hasn't responded: we will yield the results that we have
    1118            0 :                     tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
    1119            0 :                     break;
    1120              :                 }
    1121              :             };
    1122              : 
    1123            0 :             let Some(list_response) = result else {
    1124            0 :                 tracing::info!("Shutdown during startup_reconcile");
    1125            0 :                 break;
    1126              :             };
    1127              : 
    1128            0 :             match list_response {
    1129            0 :                 Err(e) => {
    1130            0 :                     tracing::warn!("Could not scan node {} ({e})", node_id);
    1131              :                 }
    1132            0 :                 Ok(listing) => {
    1133            0 :                     node_results.insert(node_id, listing);
    1134            0 :                 }
    1135              :             }
    1136              :         }
    1137              : 
    1138            0 :         node_results
    1139            0 :     }
    1140              : 
    1141            0 :     async fn build_global_observed_state(&self, deadline: Instant) -> GlobalObservedState {
    1142            0 :         let node_listings = self.scan_node_locations(deadline).await;
    1143            0 :         let mut observed = GlobalObservedState::default();
    1144              : 
    1145            0 :         for (node_id, location_confs) in node_listings {
    1146            0 :             tracing::info!(
    1147            0 :                 "Received {} shard statuses from pageserver {}",
    1148            0 :                 location_confs.tenant_shards.len(),
    1149              :                 node_id
    1150              :             );
    1151              : 
    1152            0 :             for (tid, location_conf) in location_confs.tenant_shards {
    1153            0 :                 let entry = observed.0.entry(tid).or_default();
    1154            0 :                 entry.locations.insert(
    1155            0 :                     node_id,
    1156            0 :                     ObservedStateLocation {
    1157            0 :                         conf: location_conf,
    1158            0 :                     },
    1159            0 :                 );
    1160            0 :             }
    1161              :         }
    1162              : 
    1163            0 :         observed
    1164            0 :     }
    1165              : 
    1166              :     /// Used during [`Self::startup_reconcile`] and shard splits: detach a list of unknown-to-us
    1167              :     /// tenants from pageservers.
    1168              :     ///
    1169              :     /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
    1170              :     /// tenants, then it is probably something incompletely deleted before: we will not fight with any
    1171              :     /// other task trying to attach it.
    1172              :     #[instrument(skip_all)]
    1173              :     async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
    1174              :         let nodes = self.inner.read().unwrap().nodes.clone();
    1175              : 
    1176              :         for (tenant_shard_id, node_id) in cleanup {
    1177              :             // A node reported a tenant_shard_id which is unknown to us: detach it.
    1178              :             let Some(node) = nodes.get(&node_id) else {
    1179              :                 // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
    1180              :                 // a location to clean up on a node that has since been removed.
    1181              :                 tracing::info!(
    1182              :                     "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
    1183              :                 );
    1184              :                 continue;
    1185              :             };
    1186              : 
    1187              :             if self.cancel.is_cancelled() {
    1188              :                 break;
    1189              :             }
    1190              : 
    1191              :             let client = PageserverClient::new(
    1192              :                 node.get_id(),
    1193              :                 self.http_client.clone(),
    1194              :                 node.base_url(),
    1195              :                 self.config.pageserver_jwt_token.as_deref(),
    1196              :             );
    1197              :             match client
    1198              :                 .location_config(
    1199              :                     tenant_shard_id,
    1200              :                     LocationConfig {
    1201              :                         mode: LocationConfigMode::Detached,
    1202              :                         generation: None,
    1203              :                         secondary_conf: None,
    1204              :                         shard_number: tenant_shard_id.shard_number.0,
    1205              :                         shard_count: tenant_shard_id.shard_count.literal(),
    1206              :                         shard_stripe_size: 0,
    1207              :                         tenant_conf: models::TenantConfig::default(),
    1208              :                     },
    1209              :                     None,
    1210              :                     false,
    1211              :                 )
    1212              :                 .await
    1213              :             {
    1214              :                 Ok(()) => {
    1215              :                     tracing::info!(
    1216              :                         "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
    1217              :                     );
    1218              :                 }
    1219              :                 Err(e) => {
    1220              :                     // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
    1221              :                     // break anything.
    1222              :                     tracing::error!(
    1223              :                         "Failed to detach unknown shard {tenant_shard_id} on pageserver {node_id}: {e}"
    1224              :                     );
    1225              :                 }
    1226              :             }
    1227              :         }
    1228              :     }
    1229              : 
    1230              :     /// Long running background task that periodically wakes up and looks for shards that need
    1231              :     /// reconciliation.  Reconciliation is fallible, so any reconciliation tasks that fail during
    1232              :     /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
    1233              :     /// for those retries.
    1234              :     #[instrument(skip_all)]
    1235              :     async fn background_reconcile(self: &Arc<Self>) {
    1236              :         self.startup_complete.clone().wait().await;
    1237              : 
    1238              :         const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
    1239              :         let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
    1240              :         while !self.reconcilers_cancel.is_cancelled() {
    1241              :             tokio::select! {
    1242              :               _ = interval.tick() => {
    1243              :                 let reconcile_all_result = self.reconcile_all();
    1244              :                 if reconcile_all_result.can_run_optimizations() {
    1245              :                     // Run optimizer only when we didn't find any other work to do
    1246              :                     self.optimize_all().await;
    1247              :                 }
    1248              :                 // Always attempt autosplits. Sharding is crucial for bulk ingest performance, so we
    1249              :                 // must be responsive when new projects begin ingesting and reach the threshold.
    1250              :                 self.autosplit_tenants().await;
    1251              :             }
    1252              :               _ = self.reconcilers_cancel.cancelled() => return
    1253              :             }
    1254              :         }
    1255              :     }
    1256              :     /// Heartbeat all storage nodes once in a while.
    1257              :     #[instrument(skip_all)]
    1258              :     async fn spawn_heartbeat_driver(self: &Arc<Self>) {
    1259              :         self.startup_complete.clone().wait().await;
    1260              : 
    1261              :         let mut interval = tokio::time::interval(self.config.heartbeat_interval);
    1262              :         while !self.cancel.is_cancelled() {
    1263              :             tokio::select! {
    1264              :               _ = interval.tick() => { }
    1265              :               _ = self.cancel.cancelled() => return
    1266              :             };
    1267              : 
    1268              :             let nodes = {
    1269              :                 let locked = self.inner.read().unwrap();
    1270              :                 locked.nodes.clone()
    1271              :             };
    1272              : 
    1273              :             let safekeepers = {
    1274              :                 let locked = self.inner.read().unwrap();
    1275              :                 locked.safekeepers.clone()
    1276              :             };
    1277              : 
    1278              :             let (res_ps, res_sk) = tokio::join!(
    1279              :                 self.heartbeater_ps.heartbeat(nodes),
    1280              :                 self.heartbeater_sk.heartbeat(safekeepers)
    1281              :             );
    1282              : 
    1283              :             if let Ok(deltas) = res_ps {
    1284              :                 let mut to_handle = Vec::default();
    1285              : 
    1286              :                 for (node_id, state) in deltas.0 {
    1287              :                     let new_availability = match state {
    1288              :                         PageserverState::Available { utilization, .. } => {
    1289              :                             NodeAvailability::Active(utilization)
    1290              :                         }
    1291              :                         PageserverState::WarmingUp { started_at } => {
    1292              :                             NodeAvailability::WarmingUp(started_at)
    1293              :                         }
    1294              :                         PageserverState::Offline => {
    1295              :                             // The node might have been placed in the WarmingUp state
    1296              :                             // while the heartbeat round was on-going. Hence, filter out
    1297              :                             // offline transitions for WarmingUp nodes that are still within
    1298              :                             // their grace period.
    1299              :                             if let Ok(NodeAvailability::WarmingUp(started_at)) = self
    1300              :                                 .get_node(node_id)
    1301              :                                 .await
    1302              :                                 .as_ref()
    1303            0 :                                 .map(|n| n.get_availability())
    1304              :                             {
    1305              :                                 let now = Instant::now();
    1306              :                                 if now - *started_at >= self.config.max_warming_up_interval {
    1307              :                                     NodeAvailability::Offline
    1308              :                                 } else {
    1309              :                                     NodeAvailability::WarmingUp(*started_at)
    1310              :                                 }
    1311              :                             } else {
    1312              :                                 NodeAvailability::Offline
    1313              :                             }
    1314              :                         }
    1315              :                     };
    1316              : 
    1317              :                     let node_lock = trace_exclusive_lock(
    1318              :                         &self.node_op_locks,
    1319              :                         node_id,
    1320              :                         NodeOperations::Configure,
    1321              :                     )
    1322              :                     .await;
    1323              : 
    1324              :                     pausable_failpoint!("heartbeat-pre-node-state-configure");
    1325              : 
    1326              :                     // This is the code path for geniune availability transitions (i.e node
    1327              :                     // goes unavailable and/or comes back online).
    1328              :                     let res = self
    1329              :                         .node_state_configure(node_id, Some(new_availability), None, &node_lock)
    1330              :                         .await;
    1331              : 
    1332              :                     match res {
    1333              :                         Ok(transition) => {
    1334              :                             // Keep hold of the lock until the availability transitions
    1335              :                             // have been handled in
    1336              :                             // [`Service::handle_node_availability_transitions`] in order avoid
    1337              :                             // racing with [`Service::external_node_configure`].
    1338              :                             to_handle.push((node_id, node_lock, transition));
    1339              :                         }
    1340              :                         Err(ApiError::NotFound(_)) => {
    1341              :                             // This should be rare, but legitimate since the heartbeats are done
    1342              :                             // on a snapshot of the nodes.
    1343              :                             tracing::info!("Node {} was not found after heartbeat round", node_id);
    1344              :                         }
    1345              :                         Err(ApiError::ShuttingDown) => {
    1346              :                             // No-op: we're shutting down, no need to try and update any nodes' statuses
    1347              :                         }
    1348              :                         Err(err) => {
    1349              :                             // Transition to active involves reconciling: if a node responds to a heartbeat then
    1350              :                             // becomes unavailable again, we may get an error here.
    1351              :                             tracing::error!(
    1352              :                                 "Failed to update node state {} after heartbeat round: {}",
    1353              :                                 node_id,
    1354              :                                 err
    1355              :                             );
    1356              :                         }
    1357              :                     }
    1358              :                 }
    1359              : 
    1360              :                 // We collected all the transitions above and now we handle them.
    1361              :                 let res = self.handle_node_availability_transitions(to_handle).await;
    1362              :                 if let Err(errs) = res {
    1363              :                     for (node_id, err) in errs {
    1364              :                         match err {
    1365              :                             ApiError::NotFound(_) => {
    1366              :                                 // This should be rare, but legitimate since the heartbeats are done
    1367              :                                 // on a snapshot of the nodes.
    1368              :                                 tracing::info!(
    1369              :                                     "Node {} was not found after heartbeat round",
    1370              :                                     node_id
    1371              :                                 );
    1372              :                             }
    1373              :                             err => {
    1374              :                                 tracing::error!(
    1375              :                                     "Failed to handle availability transition for {} after heartbeat round: {}",
    1376              :                                     node_id,
    1377              :                                     err
    1378              :                                 );
    1379              :                             }
    1380              :                         }
    1381              :                     }
    1382              :                 }
    1383              :             }
    1384              :             if let Ok(deltas) = res_sk {
    1385              :                 let mut to_activate = Vec::new();
    1386              :                 {
    1387              :                     let mut locked = self.inner.write().unwrap();
    1388              :                     let mut safekeepers = (*locked.safekeepers).clone();
    1389              : 
    1390              :                     for (id, state) in deltas.0 {
    1391              :                         let Some(sk) = safekeepers.get_mut(&id) else {
    1392              :                             tracing::info!(
    1393              :                                 "Couldn't update safekeeper safekeeper state for id {id} from heartbeat={state:?}"
    1394              :                             );
    1395              :                             continue;
    1396              :                         };
    1397              :                         if sk.scheduling_policy() == SkSchedulingPolicy::Activating
    1398              :                             && let SafekeeperState::Available { .. } = state
    1399              :                         {
    1400              :                             to_activate.push(id);
    1401              :                         }
    1402              :                         sk.set_availability(state);
    1403              :                     }
    1404              :                     locked.safekeepers = Arc::new(safekeepers);
    1405              :                 }
    1406              :                 for sk_id in to_activate {
    1407              :                     // TODO this can race with set_scheduling_policy (can create disjoint DB <-> in-memory state)
    1408              :                     tracing::info!("Activating safekeeper {sk_id}");
    1409              :                     match self.persistence.activate_safekeeper(sk_id.0 as i64).await {
    1410              :                         Ok(Some(())) => {}
    1411              :                         Ok(None) => {
    1412              :                             tracing::info!(
    1413              :                                 "safekeeper {sk_id} has been removed from db or has different scheduling policy than active or activating"
    1414              :                             );
    1415              :                         }
    1416              :                         Err(e) => {
    1417              :                             tracing::warn!("couldn't apply activation of {sk_id} to db: {e}");
    1418              :                             continue;
    1419              :                         }
    1420              :                     }
    1421              :                     if let Err(e) = self
    1422              :                         .set_safekeeper_scheduling_policy_in_mem(sk_id, SkSchedulingPolicy::Active)
    1423              :                         .await
    1424              :                     {
    1425              :                         tracing::info!("couldn't activate safekeeper {sk_id} in memory: {e}");
    1426              :                         continue;
    1427              :                     }
    1428              :                     tracing::info!("Activation of safekeeper {sk_id} done");
    1429              :                 }
    1430              :             }
    1431              :         }
    1432              :     }
    1433              : 
    1434              :     /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
    1435              :     /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
    1436              :     /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
    1437              :     /// will indicate that reconciliation is not needed.
    1438              :     #[instrument(skip_all, fields(
    1439              :         seq=%result.sequence,
    1440              :         tenant_id=%result.tenant_shard_id.tenant_id,
    1441              :         shard_id=%result.tenant_shard_id.shard_slug(),
    1442              :     ))]
    1443              :     fn process_result(&self, result: ReconcileResult) {
    1444              :         let mut locked = self.inner.write().unwrap();
    1445              :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    1446              :         let Some(tenant) = tenants.get_mut(&result.tenant_shard_id) else {
    1447              :             // A reconciliation result might race with removing a tenant: drop results for
    1448              :             // tenants that aren't in our map.
    1449              :             return;
    1450              :         };
    1451              : 
    1452              :         // Usually generation should only be updated via this path, so the max() isn't
    1453              :         // needed, but it is used to handle out-of-band updates via. e.g. test hook.
    1454              :         tenant.generation = std::cmp::max(tenant.generation, result.generation);
    1455              : 
    1456              :         // If the reconciler signals that it failed to notify compute, set this state on
    1457              :         // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
    1458              :         tenant.pending_compute_notification = result.pending_compute_notification;
    1459              : 
    1460              :         // Let the TenantShard know it is idle.
    1461              :         tenant.reconcile_complete(result.sequence);
    1462              : 
    1463              :         // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1464              :         // make to the tenant
    1465            0 :         let deltas = result.observed_deltas.into_iter().flat_map(|delta| {
    1466              :             // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
    1467              :             // make to the tenant
    1468            0 :             let node = nodes.get(delta.node_id())?;
    1469              : 
    1470            0 :             if node.is_available() {
    1471            0 :                 return Some(delta);
    1472            0 :             }
    1473              : 
    1474              :             // In case a node became unavailable concurrently with the reconcile, observed
    1475              :             // locations on it are now uncertain. By convention, set them to None in order
    1476              :             // for them to get refreshed when the node comes back online.
    1477            0 :             Some(ObservedStateDelta::Upsert(Box::new((
    1478            0 :                 node.get_id(),
    1479            0 :                 ObservedStateLocation { conf: None },
    1480            0 :             ))))
    1481            0 :         });
    1482              : 
    1483              :         match result.result {
    1484              :             Ok(()) => {
    1485              :                 tenant.consecutive_errors_count = 0;
    1486              :                 tenant.apply_observed_deltas(deltas);
    1487              :                 tenant.waiter.advance(result.sequence);
    1488              :             }
    1489              :             Err(e) => {
    1490              :                 match e {
    1491              :                     ReconcileError::Cancel => {
    1492              :                         tracing::info!("Reconciler was cancelled");
    1493              :                     }
    1494              :                     ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
    1495              :                         // This might be due to the reconciler getting cancelled, or it might
    1496              :                         // be due to the `Node` being marked offline.
    1497              :                         tracing::info!("Reconciler cancelled during pageserver API call");
    1498              :                     }
    1499              :                     _ => {
    1500              :                         tracing::warn!("Reconcile error: {}", e);
    1501              :                     }
    1502              :                 }
    1503              : 
    1504              :                 tenant.consecutive_errors_count = tenant.consecutive_errors_count.saturating_add(1);
    1505              : 
    1506              :                 // Ordering: populate last_error before advancing error_seq,
    1507              :                 // so that waiters will see the correct error after waiting.
    1508              :                 tenant.set_last_error(result.sequence, e);
    1509              : 
    1510              :                 // Skip deletions on reconcile failures
    1511              :                 let upsert_deltas =
    1512            0 :                     deltas.filter(|delta| matches!(delta, ObservedStateDelta::Upsert(_)));
    1513              :                 tenant.apply_observed_deltas(upsert_deltas);
    1514              :             }
    1515              :         }
    1516              : 
    1517              :         // If we just finished detaching all shards for a tenant, it might be time to drop it from memory.
    1518              :         if tenant.policy == PlacementPolicy::Detached {
    1519              :             // We may only drop a tenant from memory while holding the exclusive lock on the tenant ID: this protects us
    1520              :             // from concurrent execution wrt a request handler that might expect the tenant to remain in memory for the
    1521              :             // duration of the request.
    1522              :             let guard = self.tenant_op_locks.try_exclusive(
    1523              :                 tenant.tenant_shard_id.tenant_id,
    1524              :                 TenantOperations::DropDetached,
    1525              :             );
    1526              :             if let Some(guard) = guard {
    1527              :                 self.maybe_drop_tenant(tenant.tenant_shard_id.tenant_id, &mut locked, &guard);
    1528              :             }
    1529              :         }
    1530              : 
    1531              :         // Maybe some other work can proceed now that this job finished.
    1532              :         //
    1533              :         // Only bother with this if we have some semaphore units available in the normal-priority semaphore (these
    1534              :         // reconciles are scheduled at `[ReconcilerPriority::Normal]`).
    1535              :         if self.reconciler_concurrency.available_permits() > 0 {
    1536              :             while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
    1537              :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    1538              :                 if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
    1539              :                     shard.delayed_reconcile = false;
    1540              :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    1541              :                 }
    1542              : 
    1543              :                 if self.reconciler_concurrency.available_permits() == 0 {
    1544              :                     break;
    1545              :                 }
    1546              :             }
    1547              :         }
    1548              :     }
    1549              : 
    1550            0 :     async fn process_results(
    1551            0 :         &self,
    1552            0 :         mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResultRequest>,
    1553            0 :         mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
    1554            0 :             Result<(), (TenantShardId, NotifyError)>,
    1555            0 :         >,
    1556            0 :     ) {
    1557              :         loop {
    1558              :             // Wait for the next result, or for cancellation
    1559            0 :             tokio::select! {
    1560            0 :                 r = result_rx.recv() => {
    1561            0 :                     match r {
    1562            0 :                         Some(ReconcileResultRequest::ReconcileResult(result)) => {self.process_result(result);},
    1563            0 :                         None | Some(ReconcileResultRequest::Stop) => {break;}
    1564              :                     }
    1565              :                 }
    1566            0 :                 _ = async{
    1567            0 :                     match bg_compute_hook_result_rx.recv().await {
    1568            0 :                         Some(result) => {
    1569            0 :                             if let Err((tenant_shard_id, notify_error)) = result {
    1570            0 :                                 tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
    1571            0 :                                 let mut locked = self.inner.write().unwrap();
    1572            0 :                                 if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
    1573            0 :                                     shard.pending_compute_notification = true;
    1574            0 :                                 }
    1575              : 
    1576            0 :                             }
    1577              :                         },
    1578              :                         None => {
    1579              :                             // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
    1580            0 :                             self.cancel.cancelled().await;
    1581              :                         }
    1582              :                     }
    1583            0 :                 } => {},
    1584            0 :                 _ = self.cancel.cancelled() => {
    1585            0 :                     break;
    1586              :                 }
    1587              :             };
    1588              :         }
    1589            0 :     }
    1590              : 
    1591            0 :     async fn process_aborts(
    1592            0 :         &self,
    1593            0 :         mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
    1594            0 :     ) {
    1595              :         loop {
    1596              :             // Wait for the next result, or for cancellation
    1597            0 :             let op = tokio::select! {
    1598            0 :                 r = abort_rx.recv() => {
    1599            0 :                     match r {
    1600            0 :                         Some(op) => {op},
    1601            0 :                         None => {break;}
    1602              :                     }
    1603              :                 }
    1604            0 :                 _ = self.cancel.cancelled() => {
    1605            0 :                     break;
    1606              :                 }
    1607              :             };
    1608              : 
    1609              :             // Retry until shutdown: we must keep this request object alive until it is properly
    1610              :             // processed, as it holds a lock guard that prevents other operations trying to do things
    1611              :             // to the tenant while it is in a weird part-split state.
    1612            0 :             while !self.reconcilers_cancel.is_cancelled() {
    1613            0 :                 match self.abort_tenant_shard_split(&op).await {
    1614            0 :                     Ok(_) => break,
    1615            0 :                     Err(e) => {
    1616            0 :                         tracing::warn!(
    1617            0 :                             "Failed to abort shard split on {}, will retry: {e}",
    1618              :                             op.tenant_id
    1619              :                         );
    1620              : 
    1621              :                         // If a node is unavailable, we hope that it has been properly marked Offline
    1622              :                         // when we retry, so that the abort op will succeed.  If the abort op is failing
    1623              :                         // for some other reason, we will keep retrying forever, or until a human notices
    1624              :                         // and does something about it (either fixing a pageserver or restarting the controller).
    1625            0 :                         tokio::time::timeout(
    1626            0 :                             Duration::from_secs(5),
    1627            0 :                             self.reconcilers_cancel.cancelled(),
    1628            0 :                         )
    1629            0 :                         .await
    1630            0 :                         .ok();
    1631              :                     }
    1632              :                 }
    1633              :             }
    1634              :         }
    1635            0 :     }
    1636              : 
    1637            0 :     pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
    1638            0 :         let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
    1639            0 :         let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
    1640              : 
    1641            0 :         let leadership_cancel = CancellationToken::new();
    1642            0 :         let leadership = Leadership::new(persistence.clone(), config.clone(), leadership_cancel);
    1643            0 :         let (leader, leader_step_down_state) = leadership.step_down_current_leader().await?;
    1644              : 
    1645              :         // Apply the migrations **after** the current leader has stepped down
    1646              :         // (or we've given up waiting for it), but **before** reading from the
    1647              :         // database. The only exception is reading the current leader before
    1648              :         // migrating.
    1649            0 :         persistence.migration_run().await?;
    1650              : 
    1651            0 :         tracing::info!("Loading nodes from database...");
    1652            0 :         let nodes = persistence
    1653            0 :             .list_nodes()
    1654            0 :             .await?
    1655            0 :             .into_iter()
    1656            0 :             .map(|x| Node::from_persistent(x, config.use_https_pageserver_api))
    1657            0 :             .collect::<anyhow::Result<Vec<Node>>>()?;
    1658            0 :         let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
    1659            0 :         tracing::info!("Loaded {} nodes from database.", nodes.len());
    1660            0 :         metrics::METRICS_REGISTRY
    1661            0 :             .metrics_group
    1662            0 :             .storage_controller_pageserver_nodes
    1663            0 :             .set(nodes.len() as i64);
    1664            0 :         metrics::METRICS_REGISTRY
    1665            0 :             .metrics_group
    1666            0 :             .storage_controller_https_pageserver_nodes
    1667            0 :             .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    1668              : 
    1669            0 :         tracing::info!("Loading safekeepers from database...");
    1670            0 :         let safekeepers = persistence
    1671            0 :             .list_safekeepers()
    1672            0 :             .await?
    1673            0 :             .into_iter()
    1674            0 :             .map(|skp| {
    1675            0 :                 Safekeeper::from_persistence(
    1676            0 :                     skp,
    1677            0 :                     CancellationToken::new(),
    1678            0 :                     config.use_https_safekeeper_api,
    1679              :                 )
    1680            0 :             })
    1681            0 :             .collect::<anyhow::Result<Vec<_>>>()?;
    1682            0 :         let safekeepers: HashMap<NodeId, Safekeeper> =
    1683            0 :             safekeepers.into_iter().map(|n| (n.get_id(), n)).collect();
    1684            0 :         let count_policy = |policy| {
    1685            0 :             safekeepers
    1686            0 :                 .iter()
    1687            0 :                 .filter(|sk| sk.1.scheduling_policy() == policy)
    1688            0 :                 .count()
    1689            0 :         };
    1690            0 :         let active_sk_count = count_policy(SkSchedulingPolicy::Active);
    1691            0 :         let activating_sk_count = count_policy(SkSchedulingPolicy::Activating);
    1692            0 :         let pause_sk_count = count_policy(SkSchedulingPolicy::Pause);
    1693            0 :         let decom_sk_count = count_policy(SkSchedulingPolicy::Decomissioned);
    1694            0 :         tracing::info!(
    1695            0 :             "Loaded {} safekeepers from database. Active {active_sk_count}, activating {activating_sk_count}, \
    1696            0 :             paused {pause_sk_count}, decomissioned {decom_sk_count}.",
    1697            0 :             safekeepers.len()
    1698              :         );
    1699            0 :         metrics::METRICS_REGISTRY
    1700            0 :             .metrics_group
    1701            0 :             .storage_controller_safekeeper_nodes
    1702            0 :             .set(safekeepers.len() as i64);
    1703            0 :         metrics::METRICS_REGISTRY
    1704            0 :             .metrics_group
    1705            0 :             .storage_controller_https_safekeeper_nodes
    1706            0 :             .set(safekeepers.values().filter(|s| s.has_https_port()).count() as i64);
    1707              : 
    1708            0 :         tracing::info!("Loading shards from database...");
    1709            0 :         let mut tenant_shard_persistence = persistence.load_active_tenant_shards().await?;
    1710            0 :         tracing::info!(
    1711            0 :             "Loaded {} shards from database.",
    1712            0 :             tenant_shard_persistence.len()
    1713              :         );
    1714              : 
    1715              :         // If any shard splits were in progress, reset the database state to abort them
    1716            0 :         let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
    1717            0 :             HashMap::new();
    1718            0 :         for tsp in &mut tenant_shard_persistence {
    1719            0 :             let shard = tsp.get_shard_identity()?;
    1720            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1721            0 :             let entry = tenant_shard_count_min_max
    1722            0 :                 .entry(tenant_shard_id.tenant_id)
    1723            0 :                 .or_insert_with(|| (shard.count, shard.count));
    1724            0 :             entry.0 = std::cmp::min(entry.0, shard.count);
    1725            0 :             entry.1 = std::cmp::max(entry.1, shard.count);
    1726              :         }
    1727              : 
    1728            0 :         for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
    1729            0 :             if count_min != count_max {
    1730              :                 // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
    1731              :                 // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
    1732              :                 // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
    1733            0 :                 tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
    1734            0 :                 let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
    1735              : 
    1736              :                 // We may never see the Complete status here: if the split was complete, we wouldn't have
    1737              :                 // identified this tenant has having mismatching min/max counts.
    1738            0 :                 assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
    1739              : 
    1740              :                 // Clear the splitting status in-memory, to reflect that we just aborted in the database
    1741            0 :                 tenant_shard_persistence.iter_mut().for_each(|tsp| {
    1742              :                     // Set idle split state on those shards that we will retain.
    1743            0 :                     let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
    1744            0 :                     if tsp_tenant_id == tenant_id
    1745            0 :                         && tsp.get_shard_identity().unwrap().count == count_min
    1746            0 :                     {
    1747            0 :                         tsp.splitting = SplitState::Idle;
    1748            0 :                     } else if tsp_tenant_id == tenant_id {
    1749              :                         // Leave the splitting state on the child shards: this will be used next to
    1750              :                         // drop them.
    1751            0 :                         tracing::info!(
    1752            0 :                             "Shard {tsp_tenant_id} will be dropped after shard split abort",
    1753              :                         );
    1754            0 :                     }
    1755            0 :                 });
    1756              : 
    1757              :                 // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
    1758            0 :                 tenant_shard_persistence.retain(|tsp| {
    1759            0 :                     TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
    1760            0 :                         || tsp.splitting == SplitState::Idle
    1761            0 :                 });
    1762            0 :             }
    1763              :         }
    1764              : 
    1765            0 :         let mut tenants = BTreeMap::new();
    1766              : 
    1767            0 :         let mut scheduler = Scheduler::new(nodes.values());
    1768              : 
    1769              :         #[cfg(feature = "testing")]
    1770              :         {
    1771              :             use pageserver_api::controller_api::AvailabilityZone;
    1772              : 
    1773              :             // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
    1774              :             // tests only store the shards, not the nodes.  The nodes will be loaded shortly
    1775              :             // after when pageservers start up and register.
    1776            0 :             let mut node_ids = HashSet::new();
    1777            0 :             for tsp in &tenant_shard_persistence {
    1778            0 :                 if let Some(node_id) = tsp.generation_pageserver {
    1779            0 :                     node_ids.insert(node_id);
    1780            0 :                 }
    1781              :             }
    1782            0 :             for node_id in node_ids {
    1783            0 :                 tracing::info!("Creating node {} in scheduler for tests", node_id);
    1784            0 :                 let node = Node::new(
    1785            0 :                     NodeId(node_id as u64),
    1786            0 :                     "".to_string(),
    1787              :                     123,
    1788            0 :                     None,
    1789            0 :                     "".to_string(),
    1790              :                     123,
    1791            0 :                     None,
    1792            0 :                     None,
    1793            0 :                     AvailabilityZone("test_az".to_string()),
    1794              :                     false,
    1795              :                 )
    1796            0 :                 .unwrap();
    1797              : 
    1798            0 :                 scheduler.node_upsert(&node);
    1799              :             }
    1800              :         }
    1801            0 :         for tsp in tenant_shard_persistence {
    1802            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1803              : 
    1804              :             // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
    1805              :             // it with what we can infer: the node for which a generation was most recently issued.
    1806            0 :             let mut intent = IntentState::new(
    1807            0 :                 tsp.preferred_az_id
    1808            0 :                     .as_ref()
    1809            0 :                     .map(|az| AvailabilityZone(az.clone())),
    1810              :             );
    1811            0 :             if let Some(generation_pageserver) = tsp.generation_pageserver.map(|n| NodeId(n as u64))
    1812              :             {
    1813            0 :                 if nodes.contains_key(&generation_pageserver) {
    1814            0 :                     intent.set_attached(&mut scheduler, Some(generation_pageserver));
    1815            0 :                 } else {
    1816              :                     // If a node was removed before being completely drained, it is legal for it to leave behind a `generation_pageserver` referring
    1817              :                     // to a non-existent node, because node deletion doesn't block on completing the reconciliations that will issue new generations
    1818              :                     // on different pageservers.
    1819            0 :                     tracing::warn!(
    1820            0 :                         "Tenant shard {tenant_shard_id} references non-existent node {generation_pageserver} in database, will be rescheduled"
    1821              :                     );
    1822              :                 }
    1823            0 :             }
    1824            0 :             let new_tenant = TenantShard::from_persistent(tsp, intent)?;
    1825              : 
    1826            0 :             tenants.insert(tenant_shard_id, new_tenant);
    1827              :         }
    1828              : 
    1829            0 :         let (startup_completion, startup_complete) = utils::completion::channel();
    1830              : 
    1831              :         // This channel is continuously consumed by process_results, so doesn't need to be very large.
    1832            0 :         let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
    1833            0 :             tokio::sync::mpsc::channel(512);
    1834              : 
    1835            0 :         let (delayed_reconcile_tx, delayed_reconcile_rx) =
    1836            0 :             tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
    1837              : 
    1838            0 :         let cancel = CancellationToken::new();
    1839            0 :         let reconcilers_cancel = cancel.child_token();
    1840              : 
    1841            0 :         let mut http_client = reqwest::Client::builder();
    1842              :         // We intentionally disable the connection pool, so every request will create its own TCP connection.
    1843              :         // It's especially important for heartbeaters to notice more network problems.
    1844              :         //
    1845              :         // TODO: It makes sense to use this client only in heartbeaters and create a second one with
    1846              :         // connection pooling for everything else. But reqwest::Client may create a connection without
    1847              :         // ever using it (it uses hyper's Client under the hood):
    1848              :         // https://github.com/hyperium/hyper-util/blob/d51318df3461d40e5f5e5ca163cb3905ac960209/src/client/legacy/client.rs#L415
    1849              :         //
    1850              :         // Because of a bug in hyper0::Connection::graceful_shutdown such connections hang during
    1851              :         // graceful server shutdown: https://github.com/hyperium/hyper/issues/2730
    1852              :         //
    1853              :         // The bug has been fixed in hyper v1, so keep alive may be enabled only after we migrate to hyper1.
    1854            0 :         http_client = http_client.pool_max_idle_per_host(0);
    1855            0 :         for ssl_ca_cert in &config.ssl_ca_certs {
    1856            0 :             http_client = http_client.add_root_certificate(ssl_ca_cert.clone());
    1857            0 :         }
    1858            0 :         let http_client = http_client.build()?;
    1859              : 
    1860            0 :         let heartbeater_ps = Heartbeater::new(
    1861            0 :             http_client.clone(),
    1862            0 :             config.pageserver_jwt_token.clone(),
    1863            0 :             config.max_offline_interval,
    1864            0 :             config.max_warming_up_interval,
    1865            0 :             cancel.clone(),
    1866              :         );
    1867              : 
    1868            0 :         let heartbeater_sk = Heartbeater::new(
    1869            0 :             http_client.clone(),
    1870            0 :             config.safekeeper_jwt_token.clone(),
    1871            0 :             config.max_offline_interval,
    1872            0 :             config.max_warming_up_interval,
    1873            0 :             cancel.clone(),
    1874              :         );
    1875              : 
    1876            0 :         let initial_leadership_status = if config.start_as_candidate {
    1877            0 :             LeadershipStatus::Candidate
    1878              :         } else {
    1879            0 :             LeadershipStatus::Leader
    1880              :         };
    1881              : 
    1882            0 :         let this = Arc::new(Self {
    1883            0 :             inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
    1884            0 :                 nodes,
    1885            0 :                 safekeepers,
    1886            0 :                 tenants,
    1887            0 :                 scheduler,
    1888            0 :                 delayed_reconcile_rx,
    1889            0 :                 initial_leadership_status,
    1890            0 :                 reconcilers_cancel.clone(),
    1891              :             ))),
    1892            0 :             config: config.clone(),
    1893            0 :             persistence,
    1894            0 :             compute_hook: Arc::new(ComputeHook::new(config.clone())?),
    1895            0 :             result_tx,
    1896            0 :             heartbeater_ps,
    1897            0 :             heartbeater_sk,
    1898            0 :             reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1899            0 :                 config.reconciler_concurrency,
    1900              :             )),
    1901            0 :             priority_reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1902            0 :                 config.priority_reconciler_concurrency,
    1903              :             )),
    1904            0 :             delayed_reconcile_tx,
    1905            0 :             abort_tx,
    1906            0 :             startup_complete: startup_complete.clone(),
    1907            0 :             cancel,
    1908            0 :             reconcilers_cancel,
    1909            0 :             gate: Gate::default(),
    1910            0 :             reconcilers_gate: Gate::default(),
    1911            0 :             tenant_op_locks: Default::default(),
    1912            0 :             node_op_locks: Default::default(),
    1913            0 :             http_client,
    1914            0 :             step_down_barrier: Default::default(),
    1915              :         });
    1916              : 
    1917            0 :         let result_task_this = this.clone();
    1918            0 :         tokio::task::spawn(async move {
    1919              :             // Block shutdown until we're done (we must respect self.cancel)
    1920            0 :             if let Ok(_gate) = result_task_this.gate.enter() {
    1921            0 :                 result_task_this
    1922            0 :                     .process_results(result_rx, bg_compute_notify_result_rx)
    1923            0 :                     .await
    1924            0 :             }
    1925            0 :         });
    1926              : 
    1927            0 :         tokio::task::spawn({
    1928            0 :             let this = this.clone();
    1929            0 :             async move {
    1930              :                 // Block shutdown until we're done (we must respect self.cancel)
    1931            0 :                 if let Ok(_gate) = this.gate.enter() {
    1932            0 :                     this.process_aborts(abort_rx).await
    1933            0 :                 }
    1934            0 :             }
    1935              :         });
    1936              : 
    1937            0 :         tokio::task::spawn({
    1938            0 :             let this = this.clone();
    1939            0 :             async move {
    1940            0 :                 if let Ok(_gate) = this.gate.enter() {
    1941              :                     loop {
    1942            0 :                         tokio::select! {
    1943            0 :                             _ = this.cancel.cancelled() => {
    1944            0 :                                 break;
    1945              :                             },
    1946            0 :                             _ = tokio::time::sleep(Duration::from_secs(60)) => {}
    1947              :                         };
    1948            0 :                         this.tenant_op_locks.housekeeping();
    1949              :                     }
    1950            0 :                 }
    1951            0 :             }
    1952              :         });
    1953              : 
    1954            0 :         tokio::task::spawn({
    1955            0 :             let this = this.clone();
    1956              :             // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
    1957              :             // is done.
    1958            0 :             let startup_completion = startup_completion.clone();
    1959            0 :             async move {
    1960              :                 // Block shutdown until we're done (we must respect self.cancel)
    1961            0 :                 let Ok(_gate) = this.gate.enter() else {
    1962            0 :                     return;
    1963              :                 };
    1964              : 
    1965            0 :                 this.startup_reconcile(leader, leader_step_down_state, bg_compute_notify_result_tx)
    1966            0 :                     .await;
    1967              : 
    1968            0 :                 drop(startup_completion);
    1969            0 :             }
    1970              :         });
    1971              : 
    1972            0 :         tokio::task::spawn({
    1973            0 :             let this = this.clone();
    1974            0 :             let startup_complete = startup_complete.clone();
    1975            0 :             async move {
    1976            0 :                 startup_complete.wait().await;
    1977            0 :                 this.background_reconcile().await;
    1978            0 :             }
    1979              :         });
    1980              : 
    1981            0 :         tokio::task::spawn({
    1982            0 :             let this = this.clone();
    1983            0 :             let startup_complete = startup_complete.clone();
    1984            0 :             async move {
    1985            0 :                 startup_complete.wait().await;
    1986            0 :                 this.spawn_heartbeat_driver().await;
    1987            0 :             }
    1988              :         });
    1989              : 
    1990              :         // Check that there is enough safekeepers configured that we can create new timelines
    1991            0 :         let test_sk_res_str = match this.safekeepers_for_new_timeline().await {
    1992            0 :             Ok(v) => format!("Ok({v:?})"),
    1993            0 :             Err(v) => format!("Err({v:})"),
    1994              :         };
    1995            0 :         tracing::info!(
    1996              :             timeline_safekeeper_count = config.timeline_safekeeper_count,
    1997              :             timelines_onto_safekeepers = config.timelines_onto_safekeepers,
    1998            0 :             "viability test result (test timeline creation on safekeepers): {test_sk_res_str}",
    1999              :         );
    2000              : 
    2001            0 :         Ok(this)
    2002            0 :     }
    2003              : 
    2004            0 :     pub(crate) async fn attach_hook(
    2005            0 :         &self,
    2006            0 :         attach_req: AttachHookRequest,
    2007            0 :     ) -> anyhow::Result<AttachHookResponse> {
    2008            0 :         let _tenant_lock = trace_exclusive_lock(
    2009            0 :             &self.tenant_op_locks,
    2010            0 :             attach_req.tenant_shard_id.tenant_id,
    2011            0 :             TenantOperations::AttachHook,
    2012            0 :         )
    2013            0 :         .await;
    2014              : 
    2015              :         // This is a test hook.  To enable using it on tenants that were created directly with
    2016              :         // the pageserver API (not via this service), we will auto-create any missing tenant
    2017              :         // shards with default state.
    2018            0 :         let insert = {
    2019            0 :             match self
    2020            0 :                 .maybe_load_tenant(attach_req.tenant_shard_id.tenant_id, &_tenant_lock)
    2021            0 :                 .await
    2022              :             {
    2023            0 :                 Ok(_) => false,
    2024            0 :                 Err(ApiError::NotFound(_)) => true,
    2025            0 :                 Err(e) => return Err(e.into()),
    2026              :             }
    2027              :         };
    2028              : 
    2029            0 :         if insert {
    2030            0 :             let config = attach_req.config.clone().unwrap_or_default();
    2031            0 :             let tsp = TenantShardPersistence {
    2032            0 :                 tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
    2033            0 :                 shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
    2034            0 :                 shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
    2035            0 :                 shard_stripe_size: 0,
    2036            0 :                 generation: attach_req.generation_override.or(Some(0)),
    2037            0 :                 generation_pageserver: None,
    2038            0 :                 placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
    2039            0 :                 config: serde_json::to_string(&config).unwrap(),
    2040            0 :                 splitting: SplitState::default(),
    2041            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    2042            0 :                     .unwrap(),
    2043            0 :                 preferred_az_id: None,
    2044            0 :             };
    2045              : 
    2046            0 :             match self.persistence.insert_tenant_shards(vec![tsp]).await {
    2047            0 :                 Err(e) => match e {
    2048              :                     DatabaseError::Query(diesel::result::Error::DatabaseError(
    2049              :                         DatabaseErrorKind::UniqueViolation,
    2050              :                         _,
    2051              :                     )) => {
    2052            0 :                         tracing::info!(
    2053            0 :                             "Raced with another request to insert tenant {}",
    2054              :                             attach_req.tenant_shard_id
    2055              :                         )
    2056              :                     }
    2057            0 :                     _ => return Err(e.into()),
    2058              :                 },
    2059              :                 Ok(()) => {
    2060            0 :                     tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
    2061              : 
    2062            0 :                     let mut shard = TenantShard::new(
    2063            0 :                         attach_req.tenant_shard_id,
    2064            0 :                         ShardIdentity::unsharded(),
    2065            0 :                         PlacementPolicy::Attached(0),
    2066            0 :                         None,
    2067              :                     );
    2068            0 :                     shard.config = config;
    2069              : 
    2070            0 :                     let mut locked = self.inner.write().unwrap();
    2071            0 :                     locked.tenants.insert(attach_req.tenant_shard_id, shard);
    2072            0 :                     tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
    2073              :                 }
    2074              :             }
    2075            0 :         }
    2076              : 
    2077            0 :         let new_generation = if let Some(req_node_id) = attach_req.node_id {
    2078            0 :             let maybe_tenant_conf = {
    2079            0 :                 let locked = self.inner.write().unwrap();
    2080            0 :                 locked
    2081            0 :                     .tenants
    2082            0 :                     .get(&attach_req.tenant_shard_id)
    2083            0 :                     .map(|t| t.config.clone())
    2084              :             };
    2085              : 
    2086            0 :             match maybe_tenant_conf {
    2087            0 :                 Some(conf) => {
    2088            0 :                     let new_generation = self
    2089            0 :                         .persistence
    2090            0 :                         .increment_generation(attach_req.tenant_shard_id, req_node_id)
    2091            0 :                         .await?;
    2092              : 
    2093              :                     // Persist the placement policy update. This is required
    2094              :                     // when we reattaching a detached tenant.
    2095            0 :                     self.persistence
    2096            0 :                         .update_tenant_shard(
    2097            0 :                             TenantFilter::Shard(attach_req.tenant_shard_id),
    2098            0 :                             Some(PlacementPolicy::Attached(0)),
    2099            0 :                             Some(conf),
    2100            0 :                             None,
    2101            0 :                             None,
    2102            0 :                         )
    2103            0 :                         .await?;
    2104            0 :                     Some(new_generation)
    2105              :                 }
    2106              :                 None => {
    2107            0 :                     anyhow::bail!("Attach hook handling raced with tenant removal")
    2108              :                 }
    2109              :             }
    2110              :         } else {
    2111            0 :             self.persistence.detach(attach_req.tenant_shard_id).await?;
    2112            0 :             None
    2113              :         };
    2114              : 
    2115            0 :         let mut locked = self.inner.write().unwrap();
    2116            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    2117              : 
    2118            0 :         let tenant_shard = tenants
    2119            0 :             .get_mut(&attach_req.tenant_shard_id)
    2120            0 :             .expect("Checked for existence above");
    2121              : 
    2122            0 :         if let Some(new_generation) = new_generation {
    2123            0 :             tenant_shard.generation = Some(new_generation);
    2124            0 :             tenant_shard.policy = PlacementPolicy::Attached(0);
    2125            0 :         } else {
    2126              :             // This is a detach notification.  We must update placement policy to avoid re-attaching
    2127              :             // during background scheduling/reconciliation, or during storage controller restart.
    2128            0 :             assert!(attach_req.node_id.is_none());
    2129            0 :             tenant_shard.policy = PlacementPolicy::Detached;
    2130              :         }
    2131              : 
    2132            0 :         if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
    2133            0 :             tracing::info!(
    2134              :                 tenant_id = %attach_req.tenant_shard_id,
    2135              :                 ps_id = %attaching_pageserver,
    2136              :                 generation = ?tenant_shard.generation,
    2137            0 :                 "issuing",
    2138              :             );
    2139            0 :         } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
    2140            0 :             tracing::info!(
    2141              :                 tenant_id = %attach_req.tenant_shard_id,
    2142              :                 %ps_id,
    2143              :                 generation = ?tenant_shard.generation,
    2144            0 :                 "dropping",
    2145              :             );
    2146              :         } else {
    2147            0 :             tracing::info!(
    2148              :             tenant_id = %attach_req.tenant_shard_id,
    2149            0 :             "no-op: tenant already has no pageserver");
    2150              :         }
    2151            0 :         tenant_shard
    2152            0 :             .intent
    2153            0 :             .set_attached(scheduler, attach_req.node_id);
    2154              : 
    2155            0 :         tracing::info!(
    2156            0 :             "attach_hook: tenant {} set generation {:?}, pageserver {}, config {:?}",
    2157              :             attach_req.tenant_shard_id,
    2158              :             tenant_shard.generation,
    2159              :             // TODO: this is an odd number of 0xf's
    2160            0 :             attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff)),
    2161              :             attach_req.config,
    2162              :         );
    2163              : 
    2164              :         // Trick the reconciler into not doing anything for this tenant: this helps
    2165              :         // tests that manually configure a tenant on the pagesrever, and then call this
    2166              :         // attach hook: they don't want background reconciliation to modify what they
    2167              :         // did to the pageserver.
    2168              :         #[cfg(feature = "testing")]
    2169              :         {
    2170            0 :             if let Some(node_id) = attach_req.node_id {
    2171            0 :                 tenant_shard.observed.locations = HashMap::from([(
    2172            0 :                     node_id,
    2173            0 :                     ObservedStateLocation {
    2174            0 :                         conf: Some(attached_location_conf(
    2175            0 :                             tenant_shard.generation.unwrap(),
    2176            0 :                             &tenant_shard.shard,
    2177            0 :                             &tenant_shard.config,
    2178            0 :                             &PlacementPolicy::Attached(0),
    2179            0 :                             tenant_shard.intent.get_secondary().len(),
    2180            0 :                         )),
    2181            0 :                     },
    2182            0 :                 )]);
    2183            0 :             } else {
    2184            0 :                 tenant_shard.observed.locations.clear();
    2185            0 :             }
    2186              :         }
    2187              : 
    2188              :         Ok(AttachHookResponse {
    2189            0 :             generation: attach_req
    2190            0 :                 .node_id
    2191            0 :                 .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
    2192              :         })
    2193            0 :     }
    2194              : 
    2195            0 :     pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
    2196            0 :         let locked = self.inner.read().unwrap();
    2197              : 
    2198            0 :         let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
    2199              : 
    2200              :         InspectResponse {
    2201            0 :             attachment: tenant_shard.and_then(|s| {
    2202            0 :                 s.intent
    2203            0 :                     .get_attached()
    2204            0 :                     .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
    2205            0 :             }),
    2206              :         }
    2207            0 :     }
    2208              : 
    2209              :     // When the availability state of a node transitions to active, we must do a full reconciliation
    2210              :     // of LocationConfigs on that node.  This is because while a node was offline:
    2211              :     // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
    2212              :     // - aborting a tenant shard split might have left rogue child shards behind on this node.
    2213              :     //
    2214              :     // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
    2215              :     // Reconcilers might communicate with the node, and these must not overlap with the work we do in
    2216              :     // this function.
    2217              :     //
    2218              :     // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
    2219              :     // for written for a single node rather than as a batch job for all nodes.
    2220              :     #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
    2221              :     async fn node_activate_reconcile(
    2222              :         &self,
    2223              :         mut node: Node,
    2224              :         _lock: &TracingExclusiveGuard<NodeOperations>,
    2225              :     ) -> Result<(), ApiError> {
    2226              :         // This Node is a mutable local copy: we will set it active so that we can use its
    2227              :         // API client to reconcile with the node.  The Node in [`Self::nodes`] will get updated
    2228              :         // later.
    2229              :         node.set_availability(NodeAvailability::Active(PageserverUtilization::full()));
    2230              : 
    2231              :         let configs = match node
    2232              :             .with_client_retries(
    2233            0 :                 |client| async move { client.list_location_config().await },
    2234              :                 &self.http_client,
    2235              :                 &self.config.pageserver_jwt_token,
    2236              :                 1,
    2237              :                 5,
    2238              :                 SHORT_RECONCILE_TIMEOUT,
    2239              :                 &self.cancel,
    2240              :             )
    2241              :             .await
    2242              :         {
    2243              :             None => {
    2244              :                 // We're shutting down (the Node's cancellation token can't have fired, because
    2245              :                 // we're the only scope that has a reference to it, and we didn't fire it).
    2246              :                 return Err(ApiError::ShuttingDown);
    2247              :             }
    2248              :             Some(Err(e)) => {
    2249              :                 // This node didn't succeed listing its locations: it may not proceed to active state
    2250              :                 // as it is apparently unavailable.
    2251              :                 return Err(ApiError::PreconditionFailed(
    2252              :                     format!("Failed to query node location configs, cannot activate ({e})").into(),
    2253              :                 ));
    2254              :             }
    2255              :             Some(Ok(configs)) => configs,
    2256              :         };
    2257              :         tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
    2258              : 
    2259              :         let mut cleanup = Vec::new();
    2260              :         let mut mismatched_locations = 0;
    2261              :         {
    2262              :             let mut locked = self.inner.write().unwrap();
    2263              : 
    2264              :             for (tenant_shard_id, reported) in configs.tenant_shards {
    2265              :                 let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
    2266              :                     cleanup.push(tenant_shard_id);
    2267              :                     continue;
    2268              :                 };
    2269              : 
    2270              :                 let on_record = &mut tenant_shard
    2271              :                     .observed
    2272              :                     .locations
    2273              :                     .entry(node.get_id())
    2274            0 :                     .or_insert_with(|| ObservedStateLocation { conf: None })
    2275              :                     .conf;
    2276              : 
    2277              :                 // If the location reported by the node does not match our observed state,
    2278              :                 // then we mark it as uncertain and let the background reconciliation loop
    2279              :                 // deal with it.
    2280              :                 //
    2281              :                 // Note that this also covers net new locations reported by the node.
    2282              :                 if *on_record != reported {
    2283              :                     mismatched_locations += 1;
    2284              :                     *on_record = None;
    2285              :                 }
    2286              :             }
    2287              :         }
    2288              : 
    2289              :         if mismatched_locations > 0 {
    2290              :             tracing::info!(
    2291              :                 "Set observed state to None for {mismatched_locations} mismatched locations"
    2292              :             );
    2293              :         }
    2294              : 
    2295              :         for tenant_shard_id in cleanup {
    2296              :             tracing::info!("Detaching {tenant_shard_id}");
    2297              :             match node
    2298              :                 .with_client_retries(
    2299            0 :                     |client| async move {
    2300            0 :                         let config = LocationConfig {
    2301            0 :                             mode: LocationConfigMode::Detached,
    2302            0 :                             generation: None,
    2303            0 :                             secondary_conf: None,
    2304            0 :                             shard_number: tenant_shard_id.shard_number.0,
    2305            0 :                             shard_count: tenant_shard_id.shard_count.literal(),
    2306            0 :                             shard_stripe_size: 0,
    2307            0 :                             tenant_conf: models::TenantConfig::default(),
    2308            0 :                         };
    2309            0 :                         client
    2310            0 :                             .location_config(tenant_shard_id, config, None, false)
    2311            0 :                             .await
    2312            0 :                     },
    2313              :                     &self.http_client,
    2314              :                     &self.config.pageserver_jwt_token,
    2315              :                     1,
    2316              :                     5,
    2317              :                     SHORT_RECONCILE_TIMEOUT,
    2318              :                     &self.cancel,
    2319              :                 )
    2320              :                 .await
    2321              :             {
    2322              :                 None => {
    2323              :                     // We're shutting down (the Node's cancellation token can't have fired, because
    2324              :                     // we're the only scope that has a reference to it, and we didn't fire it).
    2325              :                     return Err(ApiError::ShuttingDown);
    2326              :                 }
    2327              :                 Some(Err(e)) => {
    2328              :                     // Do not let the node proceed to Active state if it is not responsive to requests
    2329              :                     // to detach.  This could happen if e.g. a shutdown bug in the pageserver is preventing
    2330              :                     // detach completing: we should not let this node back into the set of nodes considered
    2331              :                     // okay for scheduling.
    2332              :                     return Err(ApiError::Conflict(format!(
    2333              :                         "Node {node} failed to detach {tenant_shard_id}: {e}"
    2334              :                     )));
    2335              :                 }
    2336              :                 Some(Ok(_)) => {}
    2337              :             };
    2338              :         }
    2339              : 
    2340              :         Ok(())
    2341              :     }
    2342              : 
    2343            0 :     pub(crate) async fn re_attach(
    2344            0 :         &self,
    2345            0 :         reattach_req: ReAttachRequest,
    2346            0 :     ) -> Result<ReAttachResponse, ApiError> {
    2347            0 :         if let Some(register_req) = reattach_req.register {
    2348            0 :             self.node_register(register_req).await?;
    2349            0 :         }
    2350              : 
    2351              :         // Ordering: we must persist generation number updates before making them visible in the in-memory state
    2352            0 :         let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
    2353              : 
    2354            0 :         tracing::info!(
    2355              :             node_id=%reattach_req.node_id,
    2356            0 :             "Incremented {} tenant shards' generations",
    2357            0 :             incremented_generations.len()
    2358              :         );
    2359              : 
    2360              :         // Apply the updated generation to our in-memory state, and
    2361              :         // gather discover secondary locations.
    2362            0 :         let mut locked = self.inner.write().unwrap();
    2363            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    2364              : 
    2365            0 :         let mut response = ReAttachResponse {
    2366            0 :             tenants: Vec::new(),
    2367            0 :         };
    2368              : 
    2369              :         // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
    2370              :         // to call location_conf API with an old generation.  Wait for cancellation to complete
    2371              :         // before responding to this request.  Requires well implemented CancellationToken logic
    2372              :         // all the way to where we call location_conf.  Even then, there can still be a location_conf
    2373              :         // request in flight over the network: TODO handle that by making location_conf API refuse
    2374              :         // to go backward in generations.
    2375              : 
    2376              :         // Scan through all shards, applying updates for ones where we updated generation
    2377              :         // and identifying shards that intend to have a secondary location on this node.
    2378            0 :         for (tenant_shard_id, shard) in tenants {
    2379            0 :             if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
    2380            0 :                 let new_gen = *new_gen;
    2381            0 :                 response.tenants.push(ReAttachResponseTenant {
    2382            0 :                     id: *tenant_shard_id,
    2383            0 :                     r#gen: Some(new_gen.into().unwrap()),
    2384            0 :                     // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
    2385            0 :                     // execution.  If a pageserver is restarted during that process, then the reconcile pass will
    2386            0 :                     // fail, and start from scratch, so it doesn't make sense for us to try and preserve
    2387            0 :                     // the stale/multi states at this point.
    2388            0 :                     mode: LocationConfigMode::AttachedSingle,
    2389            0 :                     stripe_size: shard.shard.stripe_size,
    2390            0 :                 });
    2391              : 
    2392            0 :                 shard.generation = std::cmp::max(shard.generation, Some(new_gen));
    2393            0 :                 if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
    2394              :                     // Why can we update `observed` even though we're not sure our response will be received
    2395              :                     // by the pageserver?  Because the pageserver will not proceed with startup until
    2396              :                     // it has processed response: if it loses it, we'll see another request and increment
    2397              :                     // generation again, avoiding any uncertainty about dirtiness of tenant's state.
    2398            0 :                     if let Some(conf) = observed.conf.as_mut() {
    2399            0 :                         conf.generation = new_gen.into();
    2400            0 :                     }
    2401            0 :                 } else {
    2402            0 :                     // This node has no observed state for the shard: perhaps it was offline
    2403            0 :                     // when the pageserver restarted.  Insert a None, so that the Reconciler
    2404            0 :                     // will be prompted to learn the location's state before it makes changes.
    2405            0 :                     shard
    2406            0 :                         .observed
    2407            0 :                         .locations
    2408            0 :                         .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
    2409            0 :                 }
    2410            0 :             } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
    2411            0 :                 // Ordering: pageserver will not accept /location_config requests until it has
    2412            0 :                 // finished processing the response from re-attach.  So we can update our in-memory state
    2413            0 :                 // now, and be confident that we are not stamping on the result of some later location config.
    2414            0 :                 // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
    2415            0 :                 // so we might update observed state here, and then get over-written by some racing
    2416            0 :                 // ReconcileResult.  The impact is low however, since we have set state on pageserver something
    2417            0 :                 // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
    2418            0 : 
    2419            0 :                 response.tenants.push(ReAttachResponseTenant {
    2420            0 :                     id: *tenant_shard_id,
    2421            0 :                     r#gen: None,
    2422            0 :                     mode: LocationConfigMode::Secondary,
    2423            0 :                     stripe_size: shard.shard.stripe_size,
    2424            0 :                 });
    2425            0 : 
    2426            0 :                 // We must not update observed, because we have no guarantee that our
    2427            0 :                 // response will be received by the pageserver. This could leave it
    2428            0 :                 // falsely dirty, but the resulting reconcile should be idempotent.
    2429            0 :             }
    2430              :         }
    2431              : 
    2432              :         // We consider a node Active once we have composed a re-attach response, but we
    2433              :         // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
    2434              :         // implicitly synchronizes the LocationConfigs on the node.
    2435              :         //
    2436              :         // Setting a node active unblocks any Reconcilers that might write to the location config API,
    2437              :         // but those requests will not be accepted by the node until it has finished processing
    2438              :         // the re-attach response.
    2439              :         //
    2440              :         // Additionally, reset the nodes scheduling policy to match the conditional update done
    2441              :         // in [`Persistence::re_attach`].
    2442            0 :         if let Some(node) = nodes.get(&reattach_req.node_id) {
    2443            0 :             let reset_scheduling = matches!(
    2444            0 :                 node.get_scheduling(),
    2445              :                 NodeSchedulingPolicy::PauseForRestart
    2446              :                     | NodeSchedulingPolicy::Draining
    2447              :                     | NodeSchedulingPolicy::Filling
    2448              :                     | NodeSchedulingPolicy::Deleting
    2449              :             );
    2450              : 
    2451            0 :             let mut new_nodes = (**nodes).clone();
    2452            0 :             if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
    2453            0 :                 if reset_scheduling {
    2454            0 :                     node.set_scheduling(NodeSchedulingPolicy::Active);
    2455            0 :                 }
    2456              : 
    2457            0 :                 tracing::info!("Marking {} warming-up on reattach", reattach_req.node_id);
    2458            0 :                 node.set_availability(NodeAvailability::WarmingUp(std::time::Instant::now()));
    2459              : 
    2460            0 :                 scheduler.node_upsert(node);
    2461            0 :                 let new_nodes = Arc::new(new_nodes);
    2462            0 :                 *nodes = new_nodes;
    2463              :             } else {
    2464            0 :                 tracing::error!(
    2465            0 :                     "Reattaching node {} was removed while processing the request",
    2466              :                     reattach_req.node_id
    2467              :                 );
    2468              :             }
    2469            0 :         }
    2470              : 
    2471            0 :         Ok(response)
    2472            0 :     }
    2473              : 
    2474            0 :     pub(crate) async fn validate(
    2475            0 :         &self,
    2476            0 :         validate_req: ValidateRequest,
    2477            0 :     ) -> Result<ValidateResponse, DatabaseError> {
    2478              :         // Fast in-memory check: we may reject validation on anything that doesn't match our
    2479              :         // in-memory generation for a shard
    2480            0 :         let in_memory_result = {
    2481            0 :             let mut in_memory_result = Vec::new();
    2482            0 :             let locked = self.inner.read().unwrap();
    2483            0 :             for req_tenant in validate_req.tenants {
    2484            0 :                 if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
    2485            0 :                     let valid = tenant_shard.generation == Some(Generation::new(req_tenant.r#gen));
    2486            0 :                     tracing::info!(
    2487            0 :                         "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
    2488              :                         req_tenant.id,
    2489              :                         req_tenant.r#gen,
    2490              :                         tenant_shard.generation
    2491              :                     );
    2492              : 
    2493            0 :                     in_memory_result.push((
    2494            0 :                         req_tenant.id,
    2495            0 :                         Generation::new(req_tenant.r#gen),
    2496            0 :                         valid,
    2497            0 :                     ));
    2498              :                 } else {
    2499              :                     // This is legal: for example during a shard split the pageserver may still
    2500              :                     // have deletions in its queue from the old pre-split shard, or after deletion
    2501              :                     // of a tenant that was busy with compaction/gc while being deleted.
    2502            0 :                     tracing::info!(
    2503            0 :                         "Refusing deletion validation for missing shard {}",
    2504              :                         req_tenant.id
    2505              :                     );
    2506              :                 }
    2507              :             }
    2508              : 
    2509            0 :             in_memory_result
    2510              :         };
    2511              : 
    2512              :         // Database calls to confirm validity for anything that passed the in-memory check.  We must do this
    2513              :         // in case of controller split-brain, where some other controller process might have incremented the generation.
    2514            0 :         let db_generations = self
    2515            0 :             .persistence
    2516            0 :             .shard_generations(
    2517            0 :                 in_memory_result
    2518            0 :                     .iter()
    2519            0 :                     .filter_map(|i| if i.2 { Some(&i.0) } else { None }),
    2520              :             )
    2521            0 :             .await?;
    2522            0 :         let db_generations = db_generations.into_iter().collect::<HashMap<_, _>>();
    2523              : 
    2524            0 :         let mut response = ValidateResponse {
    2525            0 :             tenants: Vec::new(),
    2526            0 :         };
    2527            0 :         for (tenant_shard_id, validate_generation, valid) in in_memory_result.into_iter() {
    2528            0 :             let valid = if valid {
    2529            0 :                 let db_generation = db_generations.get(&tenant_shard_id);
    2530            0 :                 db_generation == Some(&Some(validate_generation))
    2531              :             } else {
    2532              :                 // If in-memory state says it's invalid, trust that.  It's always safe to fail a validation, at worst
    2533              :                 // this prevents a pageserver from cleaning up an object in S3.
    2534            0 :                 false
    2535              :             };
    2536              : 
    2537            0 :             response.tenants.push(ValidateResponseTenant {
    2538            0 :                 id: tenant_shard_id,
    2539            0 :                 valid,
    2540            0 :             })
    2541              :         }
    2542              : 
    2543            0 :         Ok(response)
    2544            0 :     }
    2545              : 
    2546            0 :     pub(crate) async fn tenant_create(
    2547            0 :         &self,
    2548            0 :         create_req: TenantCreateRequest,
    2549            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    2550            0 :         let tenant_id = create_req.new_tenant_id.tenant_id;
    2551              : 
    2552              :         // Exclude any concurrent attempts to create/access the same tenant ID
    2553            0 :         let _tenant_lock = trace_exclusive_lock(
    2554            0 :             &self.tenant_op_locks,
    2555            0 :             create_req.new_tenant_id.tenant_id,
    2556            0 :             TenantOperations::Create,
    2557            0 :         )
    2558            0 :         .await;
    2559            0 :         let (response, waiters) = self.do_tenant_create(create_req).await?;
    2560              : 
    2561            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    2562              :             // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
    2563              :             // accept compute notifications while it is in the process of creating.  Reconciliation will
    2564              :             // be retried in the background.
    2565            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
    2566            0 :         }
    2567            0 :         Ok(response)
    2568            0 :     }
    2569              : 
    2570            0 :     pub(crate) async fn do_tenant_create(
    2571            0 :         &self,
    2572            0 :         create_req: TenantCreateRequest,
    2573            0 :     ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
    2574            0 :         let placement_policy = create_req
    2575            0 :             .placement_policy
    2576            0 :             .clone()
    2577              :             // As a default, zero secondaries is convenient for tests that don't choose a policy.
    2578            0 :             .unwrap_or(PlacementPolicy::Attached(0));
    2579              : 
    2580              :         // This service expects to handle sharding itself: it is an error to try and directly create
    2581              :         // a particular shard here.
    2582            0 :         let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
    2583            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    2584            0 :                 "Attempted to create a specific shard, this API is for creating the whole tenant"
    2585            0 :             )));
    2586              :         } else {
    2587            0 :             create_req.new_tenant_id.tenant_id
    2588              :         };
    2589              : 
    2590            0 :         tracing::info!(
    2591            0 :             "Creating tenant {}, shard_count={:?}",
    2592              :             create_req.new_tenant_id,
    2593              :             create_req.shard_parameters.count,
    2594              :         );
    2595              : 
    2596            0 :         let create_ids = (0..create_req.shard_parameters.count.count())
    2597            0 :             .map(|i| TenantShardId {
    2598            0 :                 tenant_id,
    2599            0 :                 shard_number: ShardNumber(i),
    2600            0 :                 shard_count: create_req.shard_parameters.count,
    2601            0 :             })
    2602            0 :             .collect::<Vec<_>>();
    2603              : 
    2604              :         // If the caller specifies a None generation, it means "start from default".  This is different
    2605              :         // to [`Self::tenant_location_config`], where a None generation is used to represent
    2606              :         // an incompletely-onboarded tenant.
    2607            0 :         let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
    2608            0 :             tracing::info!(
    2609            0 :                 "tenant_create: secondary mode, generation is_some={}",
    2610            0 :                 create_req.generation.is_some()
    2611              :             );
    2612            0 :             create_req.generation.map(Generation::new)
    2613              :         } else {
    2614            0 :             tracing::info!(
    2615            0 :                 "tenant_create: not secondary mode, generation is_some={}",
    2616            0 :                 create_req.generation.is_some()
    2617              :             );
    2618            0 :             Some(
    2619            0 :                 create_req
    2620            0 :                     .generation
    2621            0 :                     .map(Generation::new)
    2622            0 :                     .unwrap_or(INITIAL_GENERATION),
    2623            0 :             )
    2624              :         };
    2625              : 
    2626            0 :         let preferred_az_id = {
    2627            0 :             let locked = self.inner.read().unwrap();
    2628              :             // Idempotency: take the existing value if the tenant already exists
    2629            0 :             if let Some(shard) = locked.tenants.get(create_ids.first().unwrap()) {
    2630            0 :                 shard.preferred_az().cloned()
    2631              :             } else {
    2632            0 :                 locked.scheduler.get_az_for_new_tenant()
    2633              :             }
    2634              :         };
    2635              : 
    2636              :         // Ordering: we persist tenant shards before creating them on the pageserver.  This enables a caller
    2637              :         // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
    2638              :         // during the creation, rather than risking leaving orphan objects in S3.
    2639            0 :         let persist_tenant_shards = create_ids
    2640            0 :             .iter()
    2641            0 :             .map(|tenant_shard_id| TenantShardPersistence {
    2642            0 :                 tenant_id: tenant_shard_id.tenant_id.to_string(),
    2643            0 :                 shard_number: tenant_shard_id.shard_number.0 as i32,
    2644            0 :                 shard_count: tenant_shard_id.shard_count.literal() as i32,
    2645            0 :                 shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
    2646            0 :                 generation: initial_generation.map(|g| g.into().unwrap() as i32),
    2647              :                 // The pageserver is not known until scheduling happens: we will set this column when
    2648              :                 // incrementing the generation the first time we attach to a pageserver.
    2649            0 :                 generation_pageserver: None,
    2650            0 :                 placement_policy: serde_json::to_string(&placement_policy).unwrap(),
    2651            0 :                 config: serde_json::to_string(&create_req.config).unwrap(),
    2652            0 :                 splitting: SplitState::default(),
    2653            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    2654            0 :                     .unwrap(),
    2655            0 :                 preferred_az_id: preferred_az_id.as_ref().map(|az| az.to_string()),
    2656            0 :             })
    2657            0 :             .collect();
    2658              : 
    2659            0 :         match self
    2660            0 :             .persistence
    2661            0 :             .insert_tenant_shards(persist_tenant_shards)
    2662            0 :             .await
    2663              :         {
    2664            0 :             Ok(_) => {}
    2665              :             Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
    2666              :                 DatabaseErrorKind::UniqueViolation,
    2667              :                 _,
    2668              :             ))) => {
    2669              :                 // Unique key violation: this is probably a retry.  Because the shard count is part of the unique key,
    2670              :                 // if we see a unique key violation it means that the creation request's shard count matches the previous
    2671              :                 // creation's shard count.
    2672            0 :                 tracing::info!(
    2673            0 :                     "Tenant shards already present in database, proceeding with idempotent creation..."
    2674              :                 );
    2675              :             }
    2676              :             // Any other database error is unexpected and a bug.
    2677            0 :             Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
    2678              :         };
    2679              : 
    2680            0 :         let mut schedule_context = ScheduleContext::default();
    2681            0 :         let mut schedule_error = None;
    2682            0 :         let mut response_shards = Vec::new();
    2683            0 :         for tenant_shard_id in create_ids {
    2684            0 :             tracing::info!("Creating shard {tenant_shard_id}...");
    2685              : 
    2686            0 :             let outcome = self
    2687            0 :                 .do_initial_shard_scheduling(
    2688            0 :                     tenant_shard_id,
    2689            0 :                     initial_generation,
    2690            0 :                     create_req.shard_parameters,
    2691            0 :                     create_req.config.clone(),
    2692            0 :                     placement_policy.clone(),
    2693            0 :                     preferred_az_id.as_ref(),
    2694            0 :                     &mut schedule_context,
    2695            0 :                 )
    2696            0 :                 .await;
    2697              : 
    2698            0 :             match outcome {
    2699            0 :                 InitialShardScheduleOutcome::Scheduled(resp) => response_shards.push(resp),
    2700            0 :                 InitialShardScheduleOutcome::NotScheduled => {}
    2701            0 :                 InitialShardScheduleOutcome::ShardScheduleError(err) => {
    2702            0 :                     schedule_error = Some(err);
    2703            0 :                 }
    2704              :             }
    2705              :         }
    2706              : 
    2707              :         // If we failed to schedule shards, then they are still created in the controller,
    2708              :         // but we return an error to the requester to avoid a silent failure when someone
    2709              :         // tries to e.g. create a tenant whose placement policy requires more nodes than
    2710              :         // are present in the system.  We do this here rather than in the above loop, to
    2711              :         // avoid situations where we only create a subset of shards in the tenant.
    2712            0 :         if let Some(e) = schedule_error {
    2713            0 :             return Err(ApiError::Conflict(format!(
    2714            0 :                 "Failed to schedule shard(s): {e}"
    2715            0 :             )));
    2716            0 :         }
    2717              : 
    2718            0 :         let waiters = {
    2719            0 :             let mut locked = self.inner.write().unwrap();
    2720            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    2721            0 :             let config = ReconcilerConfigBuilder::new(ReconcilerPriority::High)
    2722            0 :                 .tenant_creation_hint(true)
    2723            0 :                 .build();
    2724            0 :             tenants
    2725            0 :                 .range_mut(TenantShardId::tenant_range(tenant_id))
    2726            0 :                 .filter_map(|(_shard_id, shard)| {
    2727            0 :                     self.maybe_configured_reconcile_shard(shard, nodes, config)
    2728            0 :                 })
    2729            0 :                 .collect::<Vec<_>>()
    2730              :         };
    2731              : 
    2732            0 :         Ok((
    2733            0 :             TenantCreateResponse {
    2734            0 :                 shards: response_shards,
    2735            0 :             },
    2736            0 :             waiters,
    2737            0 :         ))
    2738            0 :     }
    2739              : 
    2740              :     /// Helper for tenant creation that does the scheduling for an individual shard. Covers both the
    2741              :     /// case of a new tenant and a pre-existing one.
    2742              :     #[allow(clippy::too_many_arguments)]
    2743            0 :     async fn do_initial_shard_scheduling(
    2744            0 :         &self,
    2745            0 :         tenant_shard_id: TenantShardId,
    2746            0 :         initial_generation: Option<Generation>,
    2747            0 :         shard_params: ShardParameters,
    2748            0 :         config: TenantConfig,
    2749            0 :         placement_policy: PlacementPolicy,
    2750            0 :         preferred_az_id: Option<&AvailabilityZone>,
    2751            0 :         schedule_context: &mut ScheduleContext,
    2752            0 :     ) -> InitialShardScheduleOutcome {
    2753            0 :         let mut locked = self.inner.write().unwrap();
    2754            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    2755              : 
    2756              :         use std::collections::btree_map::Entry;
    2757            0 :         match tenants.entry(tenant_shard_id) {
    2758            0 :             Entry::Occupied(mut entry) => {
    2759            0 :                 tracing::info!("Tenant shard {tenant_shard_id} already exists while creating");
    2760              : 
    2761            0 :                 if let Err(err) = entry.get_mut().schedule(scheduler, schedule_context) {
    2762            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(err);
    2763            0 :                 }
    2764              : 
    2765            0 :                 if let Some(node_id) = entry.get().intent.get_attached() {
    2766            0 :                     let generation = entry
    2767            0 :                         .get()
    2768            0 :                         .generation
    2769            0 :                         .expect("Generation is set when in attached mode");
    2770            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2771            0 :                         shard_id: tenant_shard_id,
    2772            0 :                         node_id: *node_id,
    2773            0 :                         generation: generation.into().unwrap(),
    2774            0 :                     })
    2775              :                 } else {
    2776            0 :                     InitialShardScheduleOutcome::NotScheduled
    2777              :                 }
    2778              :             }
    2779            0 :             Entry::Vacant(entry) => {
    2780            0 :                 let state = entry.insert(TenantShard::new(
    2781            0 :                     tenant_shard_id,
    2782            0 :                     ShardIdentity::from_params(tenant_shard_id.shard_number, shard_params),
    2783            0 :                     placement_policy,
    2784            0 :                     preferred_az_id.cloned(),
    2785              :                 ));
    2786              : 
    2787            0 :                 state.generation = initial_generation;
    2788            0 :                 state.config = config;
    2789            0 :                 if let Err(e) = state.schedule(scheduler, schedule_context) {
    2790            0 :                     return InitialShardScheduleOutcome::ShardScheduleError(e);
    2791            0 :                 }
    2792              : 
    2793              :                 // Only include shards in result if we are attaching: the purpose
    2794              :                 // of the response is to tell the caller where the shards are attached.
    2795            0 :                 if let Some(node_id) = state.intent.get_attached() {
    2796            0 :                     let generation = state
    2797            0 :                         .generation
    2798            0 :                         .expect("Generation is set when in attached mode");
    2799            0 :                     InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
    2800            0 :                         shard_id: tenant_shard_id,
    2801            0 :                         node_id: *node_id,
    2802            0 :                         generation: generation.into().unwrap(),
    2803            0 :                     })
    2804              :                 } else {
    2805            0 :                     InitialShardScheduleOutcome::NotScheduled
    2806              :                 }
    2807              :             }
    2808              :         }
    2809            0 :     }
    2810              : 
    2811              :     /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
    2812              :     /// wait for reconciliation to complete before responding.
    2813            0 :     async fn await_waiters(
    2814            0 :         &self,
    2815            0 :         waiters: Vec<ReconcilerWaiter>,
    2816            0 :         timeout: Duration,
    2817            0 :     ) -> Result<(), ReconcileWaitError> {
    2818            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2819            0 :         for waiter in waiters {
    2820            0 :             let timeout = deadline.duration_since(Instant::now());
    2821            0 :             waiter.wait_timeout(timeout).await?;
    2822              :         }
    2823              : 
    2824            0 :         Ok(())
    2825            0 :     }
    2826              : 
    2827              :     /// Same as [`Service::await_waiters`], but returns the waiters which are still
    2828              :     /// in progress
    2829            0 :     async fn await_waiters_remainder(
    2830            0 :         &self,
    2831            0 :         waiters: Vec<ReconcilerWaiter>,
    2832            0 :         timeout: Duration,
    2833            0 :     ) -> Vec<ReconcilerWaiter> {
    2834            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2835            0 :         for waiter in waiters.iter() {
    2836            0 :             let timeout = deadline.duration_since(Instant::now());
    2837            0 :             let _ = waiter.wait_timeout(timeout).await;
    2838              :         }
    2839              : 
    2840            0 :         waiters
    2841            0 :             .into_iter()
    2842            0 :             .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress))
    2843            0 :             .collect::<Vec<_>>()
    2844            0 :     }
    2845              : 
    2846              :     /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
    2847              :     /// and transform it into either a tenant creation of a series of shard updates.
    2848              :     ///
    2849              :     /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
    2850              :     /// still be returned.
    2851            0 :     fn tenant_location_config_prepare(
    2852            0 :         &self,
    2853            0 :         tenant_id: TenantId,
    2854            0 :         req: TenantLocationConfigRequest,
    2855            0 :     ) -> TenantCreateOrUpdate {
    2856            0 :         let mut updates = Vec::new();
    2857            0 :         let mut locked = self.inner.write().unwrap();
    2858            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    2859            0 :         let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2860              : 
    2861              :         // Use location config mode as an indicator of policy.
    2862            0 :         let placement_policy = match req.config.mode {
    2863            0 :             LocationConfigMode::Detached => PlacementPolicy::Detached,
    2864            0 :             LocationConfigMode::Secondary => PlacementPolicy::Secondary,
    2865              :             LocationConfigMode::AttachedMulti
    2866              :             | LocationConfigMode::AttachedSingle
    2867              :             | LocationConfigMode::AttachedStale => {
    2868            0 :                 if nodes.len() > 1 {
    2869            0 :                     PlacementPolicy::Attached(1)
    2870              :                 } else {
    2871              :                     // Convenience for dev/test: if we just have one pageserver, import
    2872              :                     // tenants into non-HA mode so that scheduling will succeed.
    2873            0 :                     PlacementPolicy::Attached(0)
    2874              :                 }
    2875              :             }
    2876              :         };
    2877              : 
    2878              :         // Ordinarily we do not update scheduling policy, but when making major changes
    2879              :         // like detaching or demoting to secondary-only, we need to force the scheduling
    2880              :         // mode to Active, or the caller's expected outcome (detach it) will not happen.
    2881            0 :         let scheduling_policy = match req.config.mode {
    2882              :             LocationConfigMode::Detached | LocationConfigMode::Secondary => {
    2883              :                 // Special case: when making major changes like detaching or demoting to secondary-only,
    2884              :                 // we need to force the scheduling mode to Active, or nothing will happen.
    2885            0 :                 Some(ShardSchedulingPolicy::Active)
    2886              :             }
    2887              :             LocationConfigMode::AttachedMulti
    2888              :             | LocationConfigMode::AttachedSingle
    2889              :             | LocationConfigMode::AttachedStale => {
    2890              :                 // While attached, continue to respect whatever the existing scheduling mode is.
    2891            0 :                 None
    2892              :             }
    2893              :         };
    2894              : 
    2895            0 :         let mut create = true;
    2896            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    2897              :             // Saw an existing shard: this is not a creation
    2898            0 :             create = false;
    2899              : 
    2900              :             // Shards may have initially been created by a Secondary request, where we
    2901              :             // would have left generation as None.
    2902              :             //
    2903              :             // We only update generation the first time we see an attached-mode request,
    2904              :             // and if there is no existing generation set. The caller is responsible for
    2905              :             // ensuring that no non-storage-controller pageserver ever uses a higher
    2906              :             // generation than they passed in here.
    2907              :             use LocationConfigMode::*;
    2908            0 :             let set_generation = match req.config.mode {
    2909            0 :                 AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
    2910            0 :                     req.config.generation.map(Generation::new)
    2911              :                 }
    2912            0 :                 _ => None,
    2913              :             };
    2914              : 
    2915            0 :             updates.push(ShardUpdate {
    2916            0 :                 tenant_shard_id: *shard_id,
    2917            0 :                 placement_policy: placement_policy.clone(),
    2918            0 :                 tenant_config: req.config.tenant_conf.clone(),
    2919            0 :                 generation: set_generation,
    2920            0 :                 scheduling_policy,
    2921            0 :             });
    2922              :         }
    2923              : 
    2924            0 :         if create {
    2925              :             use LocationConfigMode::*;
    2926            0 :             let generation = match req.config.mode {
    2927            0 :                 AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
    2928              :                 // If a caller provided a generation in a non-attached request, ignore it
    2929              :                 // and leave our generation as None: this enables a subsequent update to set
    2930              :                 // the generation when setting an attached mode for the first time.
    2931            0 :                 _ => None,
    2932              :             };
    2933              : 
    2934            0 :             TenantCreateOrUpdate::Create(
    2935            0 :                 // Synthesize a creation request
    2936            0 :                 TenantCreateRequest {
    2937            0 :                     new_tenant_id: tenant_shard_id,
    2938            0 :                     generation,
    2939            0 :                     shard_parameters: ShardParameters {
    2940            0 :                         count: tenant_shard_id.shard_count,
    2941            0 :                         // We only import un-sharded or single-sharded tenants, so stripe
    2942            0 :                         // size can be made up arbitrarily here.
    2943            0 :                         stripe_size: DEFAULT_STRIPE_SIZE,
    2944            0 :                     },
    2945            0 :                     placement_policy: Some(placement_policy),
    2946            0 :                     config: req.config.tenant_conf,
    2947            0 :                 },
    2948            0 :             )
    2949              :         } else {
    2950            0 :             assert!(!updates.is_empty());
    2951            0 :             TenantCreateOrUpdate::Update(updates)
    2952              :         }
    2953            0 :     }
    2954              : 
    2955              :     /// For APIs that might act on tenants with [`PlacementPolicy::Detached`], first check if
    2956              :     /// the tenant is present in memory. If not, load it from the database.  If it is found
    2957              :     /// in neither location, return a NotFound error.
    2958              :     ///
    2959              :     /// Caller must demonstrate they hold a lock guard, as otherwise two callers might try and load
    2960              :     /// it at the same time, or we might race with [`Self::maybe_drop_tenant`]
    2961            0 :     async fn maybe_load_tenant(
    2962            0 :         &self,
    2963            0 :         tenant_id: TenantId,
    2964            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    2965            0 :     ) -> Result<(), ApiError> {
    2966              :         // Check if the tenant is present in memory, and select an AZ to use when loading
    2967              :         // if we will load it.
    2968            0 :         let load_in_az = {
    2969            0 :             let locked = self.inner.read().unwrap();
    2970            0 :             let existing = locked
    2971            0 :                 .tenants
    2972            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    2973            0 :                 .next();
    2974              : 
    2975              :             // If the tenant is not present in memory, we expect to load it from database,
    2976              :             // so let's figure out what AZ to load it into while we have self.inner locked.
    2977            0 :             if existing.is_none() {
    2978            0 :                 locked
    2979            0 :                     .scheduler
    2980            0 :                     .get_az_for_new_tenant()
    2981            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    2982            0 :                         "No AZ with nodes found to load tenant"
    2983            0 :                     )))?
    2984              :             } else {
    2985              :                 // We already have this tenant in memory
    2986            0 :                 return Ok(());
    2987              :             }
    2988              :         };
    2989              : 
    2990            0 :         let tenant_shards = self.persistence.load_tenant(tenant_id).await?;
    2991            0 :         if tenant_shards.is_empty() {
    2992            0 :             return Err(ApiError::NotFound(
    2993            0 :                 anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    2994            0 :             ));
    2995            0 :         }
    2996              : 
    2997              :         // Update the persistent shards with the AZ that we are about to apply to in-memory state
    2998            0 :         self.persistence
    2999            0 :             .set_tenant_shard_preferred_azs(
    3000            0 :                 tenant_shards
    3001            0 :                     .iter()
    3002            0 :                     .map(|t| {
    3003            0 :                         (
    3004            0 :                             t.get_tenant_shard_id().expect("Corrupt shard in database"),
    3005            0 :                             Some(load_in_az.clone()),
    3006            0 :                         )
    3007            0 :                     })
    3008            0 :                     .collect(),
    3009              :             )
    3010            0 :             .await?;
    3011              : 
    3012            0 :         let mut locked = self.inner.write().unwrap();
    3013            0 :         tracing::info!(
    3014            0 :             "Loaded {} shards for tenant {}",
    3015            0 :             tenant_shards.len(),
    3016              :             tenant_id
    3017              :         );
    3018              : 
    3019            0 :         locked.tenants.extend(tenant_shards.into_iter().map(|p| {
    3020            0 :             let intent = IntentState::new(Some(load_in_az.clone()));
    3021            0 :             let shard =
    3022            0 :                 TenantShard::from_persistent(p, intent).expect("Corrupt shard row in database");
    3023              : 
    3024              :             // Sanity check: when loading on-demand, we should always be loaded something Detached
    3025            0 :             debug_assert!(shard.policy == PlacementPolicy::Detached);
    3026            0 :             if shard.policy != PlacementPolicy::Detached {
    3027            0 :                 tracing::error!(
    3028            0 :                     "Tenant shard {} loaded on-demand, but has non-Detached policy {:?}",
    3029              :                     shard.tenant_shard_id,
    3030              :                     shard.policy
    3031              :                 );
    3032            0 :             }
    3033              : 
    3034            0 :             (shard.tenant_shard_id, shard)
    3035            0 :         }));
    3036              : 
    3037            0 :         Ok(())
    3038            0 :     }
    3039              : 
    3040              :     /// If all shards for a tenant are detached, and in a fully quiescent state (no observed locations on pageservers),
    3041              :     /// and have no reconciler running, then we can drop the tenant from memory.  It will be reloaded on-demand
    3042              :     /// if we are asked to attach it again (see [`Self::maybe_load_tenant`]).
    3043              :     ///
    3044              :     /// Caller must demonstrate they hold a lock guard, as otherwise it is unsafe to drop a tenant from
    3045              :     /// memory while some other function might assume it continues to exist while not holding the lock on Self::inner.
    3046            0 :     fn maybe_drop_tenant(
    3047            0 :         &self,
    3048            0 :         tenant_id: TenantId,
    3049            0 :         locked: &mut std::sync::RwLockWriteGuard<ServiceState>,
    3050            0 :         _guard: &TracingExclusiveGuard<TenantOperations>,
    3051            0 :     ) {
    3052            0 :         let mut tenant_shards = locked.tenants.range(TenantShardId::tenant_range(tenant_id));
    3053            0 :         if tenant_shards.all(|(_id, shard)| {
    3054            0 :             shard.policy == PlacementPolicy::Detached
    3055            0 :                 && shard.reconciler.is_none()
    3056            0 :                 && shard.observed.is_empty()
    3057            0 :         }) {
    3058            0 :             let keys = locked
    3059            0 :                 .tenants
    3060            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3061            0 :                 .map(|(id, _)| id)
    3062            0 :                 .copied()
    3063            0 :                 .collect::<Vec<_>>();
    3064            0 :             for key in keys {
    3065            0 :                 tracing::info!("Dropping detached tenant shard {} from memory", key);
    3066            0 :                 locked.tenants.remove(&key);
    3067              :             }
    3068            0 :         }
    3069            0 :     }
    3070              : 
    3071              :     /// This API is used by the cloud control plane to migrate unsharded tenants that it created
    3072              :     /// directly with pageservers into this service.
    3073              :     ///
    3074              :     /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
    3075              :     /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
    3076              :     /// Think of the first attempt to call this API as a transfer of absolute authority over the
    3077              :     /// tenant's source of generation numbers.
    3078              :     ///
    3079              :     /// The mode in this request coarse-grained control of tenants:
    3080              :     /// - Call with mode Attached* to upsert the tenant.
    3081              :     /// - Call with mode Secondary to either onboard a tenant without attaching it, or
    3082              :     ///   to set an existing tenant to PolicyMode::Secondary
    3083              :     /// - Call with mode Detached to switch to PolicyMode::Detached
    3084            0 :     pub(crate) async fn tenant_location_config(
    3085            0 :         &self,
    3086            0 :         tenant_shard_id: TenantShardId,
    3087            0 :         req: TenantLocationConfigRequest,
    3088            0 :     ) -> Result<TenantLocationConfigResponse, ApiError> {
    3089              :         // We require an exclusive lock, because we are updating both persistent and in-memory state
    3090            0 :         let _tenant_lock = trace_exclusive_lock(
    3091            0 :             &self.tenant_op_locks,
    3092            0 :             tenant_shard_id.tenant_id,
    3093            0 :             TenantOperations::LocationConfig,
    3094            0 :         )
    3095            0 :         .await;
    3096              : 
    3097            0 :         let tenant_id = if !tenant_shard_id.is_unsharded() {
    3098            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    3099            0 :                 "This API is for importing single-sharded or unsharded tenants"
    3100            0 :             )));
    3101              :         } else {
    3102            0 :             tenant_shard_id.tenant_id
    3103              :         };
    3104              : 
    3105              :         // In case we are waking up a Detached tenant
    3106            0 :         match self.maybe_load_tenant(tenant_id, &_tenant_lock).await {
    3107            0 :             Ok(()) | Err(ApiError::NotFound(_)) => {
    3108            0 :                 // This is a creation or an update
    3109            0 :             }
    3110            0 :             Err(e) => {
    3111            0 :                 return Err(e);
    3112              :             }
    3113              :         };
    3114              : 
    3115              :         // First check if this is a creation or an update
    3116            0 :         let create_or_update = self.tenant_location_config_prepare(tenant_id, req);
    3117              : 
    3118            0 :         let mut result = TenantLocationConfigResponse {
    3119            0 :             shards: Vec::new(),
    3120            0 :             stripe_size: None,
    3121            0 :         };
    3122            0 :         let waiters = match create_or_update {
    3123            0 :             TenantCreateOrUpdate::Create(create_req) => {
    3124            0 :                 let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
    3125            0 :                 result.shards = create_resp
    3126            0 :                     .shards
    3127            0 :                     .into_iter()
    3128            0 :                     .map(|s| TenantShardLocation {
    3129            0 :                         node_id: s.node_id,
    3130            0 :                         shard_id: s.shard_id,
    3131            0 :                     })
    3132            0 :                     .collect();
    3133            0 :                 waiters
    3134              :             }
    3135            0 :             TenantCreateOrUpdate::Update(updates) => {
    3136              :                 // Persist updates
    3137              :                 // Ordering: write to the database before applying changes in-memory, so that
    3138              :                 // we will not appear time-travel backwards on a restart.
    3139              : 
    3140            0 :                 let mut schedule_context = ScheduleContext::default();
    3141              :                 for ShardUpdate {
    3142            0 :                     tenant_shard_id,
    3143            0 :                     placement_policy,
    3144            0 :                     tenant_config,
    3145            0 :                     generation,
    3146            0 :                     scheduling_policy,
    3147            0 :                 } in &updates
    3148              :                 {
    3149            0 :                     self.persistence
    3150            0 :                         .update_tenant_shard(
    3151            0 :                             TenantFilter::Shard(*tenant_shard_id),
    3152            0 :                             Some(placement_policy.clone()),
    3153            0 :                             Some(tenant_config.clone()),
    3154            0 :                             *generation,
    3155            0 :                             *scheduling_policy,
    3156            0 :                         )
    3157            0 :                         .await?;
    3158              :                 }
    3159              : 
    3160              :                 // Apply updates in-memory
    3161            0 :                 let mut waiters = Vec::new();
    3162              :                 {
    3163            0 :                     let mut locked = self.inner.write().unwrap();
    3164            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    3165              : 
    3166              :                     for ShardUpdate {
    3167            0 :                         tenant_shard_id,
    3168            0 :                         placement_policy,
    3169            0 :                         tenant_config,
    3170            0 :                         generation: update_generation,
    3171            0 :                         scheduling_policy,
    3172            0 :                     } in updates
    3173              :                     {
    3174            0 :                         let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    3175            0 :                             tracing::warn!("Shard {tenant_shard_id} removed while updating");
    3176            0 :                             continue;
    3177              :                         };
    3178              : 
    3179              :                         // Update stripe size
    3180            0 :                         if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
    3181            0 :                             result.stripe_size = Some(shard.shard.stripe_size);
    3182            0 :                         }
    3183              : 
    3184            0 :                         shard.policy = placement_policy;
    3185            0 :                         shard.config = tenant_config;
    3186            0 :                         if let Some(generation) = update_generation {
    3187            0 :                             shard.generation = Some(generation);
    3188            0 :                         }
    3189              : 
    3190            0 :                         if let Some(scheduling_policy) = scheduling_policy {
    3191            0 :                             shard.set_scheduling_policy(scheduling_policy);
    3192            0 :                         }
    3193              : 
    3194            0 :                         shard.schedule(scheduler, &mut schedule_context)?;
    3195              : 
    3196            0 :                         let maybe_waiter =
    3197            0 :                             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3198            0 :                         if let Some(waiter) = maybe_waiter {
    3199            0 :                             waiters.push(waiter);
    3200            0 :                         }
    3201              : 
    3202            0 :                         if let Some(node_id) = shard.intent.get_attached() {
    3203            0 :                             result.shards.push(TenantShardLocation {
    3204            0 :                                 shard_id: tenant_shard_id,
    3205            0 :                                 node_id: *node_id,
    3206            0 :                             })
    3207            0 :                         }
    3208              :                     }
    3209              :                 }
    3210            0 :                 waiters
    3211              :             }
    3212              :         };
    3213              : 
    3214            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3215              :             // Do not treat a reconcile error as fatal: we have already applied any requested
    3216              :             // Intent changes, and the reconcile can fail for external reasons like unavailable
    3217              :             // compute notification API.  In these cases, it is important that we do not
    3218              :             // cause the cloud control plane to retry forever on this API.
    3219            0 :             tracing::warn!(
    3220            0 :                 "Failed to reconcile after /location_config: {e}, returning success anyway"
    3221              :             );
    3222            0 :         }
    3223              : 
    3224              :         // Logging the full result is useful because it lets us cross-check what the cloud control
    3225              :         // plane's tenant_shards table should contain.
    3226            0 :         tracing::info!("Complete, returning {result:?}");
    3227              : 
    3228            0 :         Ok(result)
    3229            0 :     }
    3230              : 
    3231            0 :     pub(crate) async fn tenant_config_patch(
    3232            0 :         &self,
    3233            0 :         req: TenantConfigPatchRequest,
    3234            0 :     ) -> Result<(), ApiError> {
    3235            0 :         let _tenant_lock = trace_exclusive_lock(
    3236            0 :             &self.tenant_op_locks,
    3237            0 :             req.tenant_id,
    3238            0 :             TenantOperations::ConfigPatch,
    3239            0 :         )
    3240            0 :         .await;
    3241              : 
    3242            0 :         let tenant_id = req.tenant_id;
    3243            0 :         let patch = req.config;
    3244              : 
    3245            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3246              : 
    3247            0 :         let base = {
    3248            0 :             let locked = self.inner.read().unwrap();
    3249            0 :             let shards = locked
    3250            0 :                 .tenants
    3251            0 :                 .range(TenantShardId::tenant_range(req.tenant_id));
    3252              : 
    3253            0 :             let mut configs = shards.map(|(_sid, shard)| &shard.config).peekable();
    3254              : 
    3255            0 :             let first = match configs.peek() {
    3256            0 :                 Some(first) => (*first).clone(),
    3257              :                 None => {
    3258            0 :                     return Err(ApiError::NotFound(
    3259            0 :                         anyhow::anyhow!("Tenant {} not found", req.tenant_id).into(),
    3260            0 :                     ));
    3261              :                 }
    3262              :             };
    3263              : 
    3264            0 :             if !configs.all_equal() {
    3265            0 :                 tracing::error!("Tenant configs for {} are mismatched. ", req.tenant_id);
    3266              :                 // This can't happen because we atomically update the database records
    3267              :                 // of all shards to the new value in [`Self::set_tenant_config_and_reconcile`].
    3268            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3269            0 :                     "Tenant configs for {} are mismatched",
    3270            0 :                     req.tenant_id
    3271            0 :                 )));
    3272            0 :             }
    3273              : 
    3274            0 :             first
    3275              :         };
    3276              : 
    3277            0 :         let updated_config = base
    3278            0 :             .apply_patch(patch)
    3279            0 :             .map_err(|err| ApiError::BadRequest(anyhow::anyhow!(err)))?;
    3280            0 :         self.set_tenant_config_and_reconcile(tenant_id, updated_config)
    3281            0 :             .await
    3282            0 :     }
    3283              : 
    3284            0 :     pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
    3285              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3286            0 :         let _tenant_lock = trace_exclusive_lock(
    3287            0 :             &self.tenant_op_locks,
    3288            0 :             req.tenant_id,
    3289            0 :             TenantOperations::ConfigSet,
    3290            0 :         )
    3291            0 :         .await;
    3292              : 
    3293            0 :         self.maybe_load_tenant(req.tenant_id, &_tenant_lock).await?;
    3294              : 
    3295            0 :         self.set_tenant_config_and_reconcile(req.tenant_id, req.config)
    3296            0 :             .await
    3297            0 :     }
    3298              : 
    3299            0 :     async fn set_tenant_config_and_reconcile(
    3300            0 :         &self,
    3301            0 :         tenant_id: TenantId,
    3302            0 :         config: TenantConfig,
    3303            0 :     ) -> Result<(), ApiError> {
    3304            0 :         self.persistence
    3305            0 :             .update_tenant_shard(
    3306            0 :                 TenantFilter::Tenant(tenant_id),
    3307            0 :                 None,
    3308            0 :                 Some(config.clone()),
    3309            0 :                 None,
    3310            0 :                 None,
    3311            0 :             )
    3312            0 :             .await?;
    3313              : 
    3314            0 :         let waiters = {
    3315            0 :             let mut waiters = Vec::new();
    3316            0 :             let mut locked = self.inner.write().unwrap();
    3317            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    3318            0 :             for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3319            0 :                 shard.config = config.clone();
    3320            0 :                 if let Some(waiter) =
    3321            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3322            0 :                 {
    3323            0 :                     waiters.push(waiter);
    3324            0 :                 }
    3325              :             }
    3326            0 :             waiters
    3327              :         };
    3328              : 
    3329            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    3330              :             // Treat this as success because we have stored the configuration.  If e.g.
    3331              :             // a node was unavailable at this time, it should not stop us accepting a
    3332              :             // configuration change.
    3333            0 :             tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
    3334            0 :         }
    3335              : 
    3336            0 :         Ok(())
    3337            0 :     }
    3338              : 
    3339            0 :     pub(crate) fn tenant_config_get(
    3340            0 :         &self,
    3341            0 :         tenant_id: TenantId,
    3342            0 :     ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
    3343            0 :         let config = {
    3344            0 :             let locked = self.inner.read().unwrap();
    3345              : 
    3346            0 :             match locked
    3347            0 :                 .tenants
    3348            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3349            0 :                 .next()
    3350              :             {
    3351            0 :                 Some((_tenant_shard_id, shard)) => shard.config.clone(),
    3352              :                 None => {
    3353            0 :                     return Err(ApiError::NotFound(
    3354            0 :                         anyhow::anyhow!("Tenant not found").into(),
    3355            0 :                     ));
    3356              :                 }
    3357              :             }
    3358              :         };
    3359              : 
    3360              :         // Unlike the pageserver, we do not have a set of global defaults: the config is
    3361              :         // entirely per-tenant.  Therefore the distinction between `tenant_specific_overrides`
    3362              :         // and `effective_config` in the response is meaningless, but we retain that syntax
    3363              :         // in order to remain compatible with the pageserver API.
    3364              : 
    3365            0 :         let response = HashMap::from([
    3366              :             (
    3367              :                 "tenant_specific_overrides",
    3368            0 :                 serde_json::to_value(&config)
    3369            0 :                     .context("serializing tenant specific overrides")
    3370            0 :                     .map_err(ApiError::InternalServerError)?,
    3371              :             ),
    3372              :             (
    3373            0 :                 "effective_config",
    3374            0 :                 serde_json::to_value(&config)
    3375            0 :                     .context("serializing effective config")
    3376            0 :                     .map_err(ApiError::InternalServerError)?,
    3377              :             ),
    3378              :         ]);
    3379              : 
    3380            0 :         Ok(response)
    3381            0 :     }
    3382              : 
    3383            0 :     pub(crate) async fn tenant_time_travel_remote_storage(
    3384            0 :         &self,
    3385            0 :         time_travel_req: &TenantTimeTravelRequest,
    3386            0 :         tenant_id: TenantId,
    3387            0 :         timestamp: Cow<'_, str>,
    3388            0 :         done_if_after: Cow<'_, str>,
    3389            0 :     ) -> Result<(), ApiError> {
    3390            0 :         let _tenant_lock = trace_exclusive_lock(
    3391            0 :             &self.tenant_op_locks,
    3392            0 :             tenant_id,
    3393            0 :             TenantOperations::TimeTravelRemoteStorage,
    3394            0 :         )
    3395            0 :         .await;
    3396              : 
    3397            0 :         let node = {
    3398            0 :             let mut locked = self.inner.write().unwrap();
    3399              :             // Just a sanity check to prevent misuse: the API expects that the tenant is fully
    3400              :             // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
    3401              :             // but only at the start of the process, so it's really just to prevent operator
    3402              :             // mistakes.
    3403            0 :             for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
    3404            0 :                 if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
    3405              :                 {
    3406            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3407            0 :                         "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
    3408            0 :                     )));
    3409            0 :                 }
    3410            0 :                 let maybe_attached = shard
    3411            0 :                     .observed
    3412            0 :                     .locations
    3413            0 :                     .iter()
    3414            0 :                     .filter_map(|(node_id, observed_location)| {
    3415            0 :                         observed_location
    3416            0 :                             .conf
    3417            0 :                             .as_ref()
    3418            0 :                             .map(|loc| (node_id, observed_location, loc.mode))
    3419            0 :                     })
    3420            0 :                     .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
    3421            0 :                 if let Some((node_id, _observed_location, mode)) = maybe_attached {
    3422            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3423            0 :                         "We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}"
    3424            0 :                     )));
    3425            0 :                 }
    3426              :             }
    3427            0 :             let scheduler = &mut locked.scheduler;
    3428              :             // Right now we only perform the operation on a single node without parallelization
    3429              :             // TODO fan out the operation to multiple nodes for better performance
    3430            0 :             let node_id = scheduler.any_available_node()?;
    3431            0 :             let node = locked
    3432            0 :                 .nodes
    3433            0 :                 .get(&node_id)
    3434            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3435            0 :             node.clone()
    3436              :         };
    3437              : 
    3438              :         // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
    3439            0 :         let mut counts = time_travel_req
    3440            0 :             .shard_counts
    3441            0 :             .iter()
    3442            0 :             .copied()
    3443            0 :             .collect::<HashSet<_>>()
    3444            0 :             .into_iter()
    3445            0 :             .collect::<Vec<_>>();
    3446            0 :         counts.sort_unstable();
    3447              : 
    3448            0 :         for count in counts {
    3449            0 :             let shard_ids = (0..count.count())
    3450            0 :                 .map(|i| TenantShardId {
    3451            0 :                     tenant_id,
    3452            0 :                     shard_number: ShardNumber(i),
    3453            0 :                     shard_count: count,
    3454            0 :                 })
    3455            0 :                 .collect::<Vec<_>>();
    3456            0 :             for tenant_shard_id in shard_ids {
    3457            0 :                 let client = PageserverClient::new(
    3458            0 :                     node.get_id(),
    3459            0 :                     self.http_client.clone(),
    3460            0 :                     node.base_url(),
    3461            0 :                     self.config.pageserver_jwt_token.as_deref(),
    3462              :                 );
    3463              : 
    3464            0 :                 tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
    3465              : 
    3466            0 :                 client
    3467            0 :                     .tenant_time_travel_remote_storage(
    3468            0 :                         tenant_shard_id,
    3469            0 :                         &timestamp,
    3470            0 :                         &done_if_after,
    3471            0 :                     )
    3472            0 :                     .await
    3473            0 :                     .map_err(|e| {
    3474            0 :                         ApiError::InternalServerError(anyhow::anyhow!(
    3475            0 :                             "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
    3476            0 :                             node
    3477            0 :                         ))
    3478            0 :                     })?;
    3479              :             }
    3480              :         }
    3481            0 :         Ok(())
    3482            0 :     }
    3483              : 
    3484            0 :     pub(crate) async fn tenant_secondary_download(
    3485            0 :         &self,
    3486            0 :         tenant_id: TenantId,
    3487            0 :         wait: Option<Duration>,
    3488            0 :     ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
    3489            0 :         let _tenant_lock = trace_shared_lock(
    3490            0 :             &self.tenant_op_locks,
    3491            0 :             tenant_id,
    3492            0 :             TenantOperations::SecondaryDownload,
    3493            0 :         )
    3494            0 :         .await;
    3495              : 
    3496              :         // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
    3497            0 :         let targets = {
    3498            0 :             let locked = self.inner.read().unwrap();
    3499            0 :             let mut targets = Vec::new();
    3500              : 
    3501            0 :             for (tenant_shard_id, shard) in
    3502            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    3503              :             {
    3504            0 :                 for node_id in shard.intent.get_secondary() {
    3505            0 :                     let node = locked
    3506            0 :                         .nodes
    3507            0 :                         .get(node_id)
    3508            0 :                         .expect("Pageservers may not be deleted while referenced");
    3509            0 : 
    3510            0 :                     targets.push((*tenant_shard_id, node.clone()));
    3511            0 :                 }
    3512              :             }
    3513            0 :             targets
    3514              :         };
    3515              : 
    3516              :         // Issue concurrent requests to all shards' locations
    3517            0 :         let mut futs = FuturesUnordered::new();
    3518            0 :         for (tenant_shard_id, node) in targets {
    3519            0 :             let client = PageserverClient::new(
    3520            0 :                 node.get_id(),
    3521            0 :                 self.http_client.clone(),
    3522            0 :                 node.base_url(),
    3523            0 :                 self.config.pageserver_jwt_token.as_deref(),
    3524              :             );
    3525            0 :             futs.push(async move {
    3526            0 :                 let result = client
    3527            0 :                     .tenant_secondary_download(tenant_shard_id, wait)
    3528            0 :                     .await;
    3529            0 :                 (result, node, tenant_shard_id)
    3530            0 :             })
    3531              :         }
    3532              : 
    3533              :         // Handle any errors returned by pageservers.  This includes cases like this request racing with
    3534              :         // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
    3535              :         // well as more general cases like 503s, 500s, or timeouts.
    3536            0 :         let mut aggregate_progress = SecondaryProgress::default();
    3537            0 :         let mut aggregate_status: Option<StatusCode> = None;
    3538            0 :         let mut error: Option<mgmt_api::Error> = None;
    3539            0 :         while let Some((result, node, tenant_shard_id)) = futs.next().await {
    3540            0 :             match result {
    3541            0 :                 Err(e) => {
    3542              :                     // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
    3543              :                     // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
    3544              :                     // than they had hoped for.
    3545            0 :                     tracing::warn!("Secondary download error from pageserver {node}: {e}",);
    3546            0 :                     error = Some(e)
    3547              :                 }
    3548            0 :                 Ok((status_code, progress)) => {
    3549            0 :                     tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
    3550            0 :                     aggregate_progress.layers_downloaded += progress.layers_downloaded;
    3551            0 :                     aggregate_progress.layers_total += progress.layers_total;
    3552            0 :                     aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
    3553            0 :                     aggregate_progress.bytes_total += progress.bytes_total;
    3554            0 :                     aggregate_progress.heatmap_mtime =
    3555            0 :                         std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
    3556            0 :                     aggregate_status = match aggregate_status {
    3557            0 :                         None => Some(status_code),
    3558            0 :                         Some(StatusCode::OK) => Some(status_code),
    3559            0 :                         Some(cur) => {
    3560              :                             // Other status codes (e.g. 202) -- do not overwrite.
    3561            0 :                             Some(cur)
    3562              :                         }
    3563              :                     };
    3564              :                 }
    3565              :             }
    3566              :         }
    3567              : 
    3568              :         // If any of the shards return 202, indicate our result as 202.
    3569            0 :         match aggregate_status {
    3570              :             None => {
    3571            0 :                 match error {
    3572            0 :                     Some(e) => {
    3573              :                         // No successes, and an error: surface it
    3574            0 :                         Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
    3575              :                     }
    3576              :                     None => {
    3577              :                         // No shards found
    3578            0 :                         Err(ApiError::NotFound(
    3579            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    3580            0 :                         ))
    3581              :                     }
    3582              :                 }
    3583              :             }
    3584            0 :             Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
    3585              :         }
    3586            0 :     }
    3587              : 
    3588            0 :     pub(crate) async fn tenant_delete(
    3589            0 :         self: &Arc<Self>,
    3590            0 :         tenant_id: TenantId,
    3591            0 :     ) -> Result<StatusCode, ApiError> {
    3592            0 :         let _tenant_lock =
    3593            0 :             trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
    3594              : 
    3595            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3596              : 
    3597              :         // Detach all shards. This also deletes local pageserver shard data.
    3598            0 :         let (detach_waiters, node) = {
    3599            0 :             let mut detach_waiters = Vec::new();
    3600            0 :             let mut locked = self.inner.write().unwrap();
    3601            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    3602            0 :             for (_, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3603              :                 // Update the tenant's intent to remove all attachments
    3604            0 :                 shard.policy = PlacementPolicy::Detached;
    3605            0 :                 shard
    3606            0 :                     .schedule(scheduler, &mut ScheduleContext::default())
    3607            0 :                     .expect("De-scheduling is infallible");
    3608            0 :                 debug_assert!(shard.intent.get_attached().is_none());
    3609            0 :                 debug_assert!(shard.intent.get_secondary().is_empty());
    3610              : 
    3611            0 :                 if let Some(waiter) =
    3612            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    3613            0 :                 {
    3614            0 :                     detach_waiters.push(waiter);
    3615            0 :                 }
    3616              :             }
    3617              : 
    3618              :             // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant
    3619              :             // was attached, just has to be able to see the S3 content)
    3620            0 :             let node_id = scheduler.any_available_node()?;
    3621            0 :             let node = nodes
    3622            0 :                 .get(&node_id)
    3623            0 :                 .expect("Pageservers may not be deleted while lock is active");
    3624            0 :             (detach_waiters, node.clone())
    3625              :         };
    3626              : 
    3627              :         // This reconcile wait can fail in a few ways:
    3628              :         //  A there is a very long queue for the reconciler semaphore
    3629              :         //  B some pageserver is failing to handle a detach promptly
    3630              :         //  C some pageserver goes offline right at the moment we send it a request.
    3631              :         //
    3632              :         // A and C are transient: the semaphore will eventually become available, and once a node is marked offline
    3633              :         // the next attempt to reconcile will silently skip detaches for an offline node and succeed.  If B happens,
    3634              :         // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while
    3635              :         // deleting the underlying data).
    3636            0 :         self.await_waiters(detach_waiters, RECONCILE_TIMEOUT)
    3637            0 :             .await?;
    3638              : 
    3639              :         // Delete the entire tenant (all shards) from remote storage via a random pageserver.
    3640              :         // Passing an unsharded tenant ID will cause the pageserver to remove all remote paths with
    3641              :         // the tenant ID prefix, including all shards (even possibly stale ones).
    3642            0 :         match node
    3643            0 :             .with_client_retries(
    3644            0 :                 |client| async move {
    3645            0 :                     client
    3646            0 :                         .tenant_delete(TenantShardId::unsharded(tenant_id))
    3647            0 :                         .await
    3648            0 :                 },
    3649            0 :                 &self.http_client,
    3650            0 :                 &self.config.pageserver_jwt_token,
    3651              :                 1,
    3652              :                 3,
    3653              :                 RECONCILE_TIMEOUT,
    3654            0 :                 &self.cancel,
    3655              :             )
    3656            0 :             .await
    3657            0 :             .unwrap_or(Err(mgmt_api::Error::Cancelled))
    3658              :         {
    3659            0 :             Ok(_) => {}
    3660              :             Err(mgmt_api::Error::Cancelled) => {
    3661            0 :                 return Err(ApiError::ShuttingDown);
    3662              :             }
    3663            0 :             Err(e) => {
    3664              :                 // This is unexpected: remote deletion should be infallible, unless the object store
    3665              :                 // at large is unavailable.
    3666            0 :                 tracing::error!("Error deleting via node {node}: {e}");
    3667            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    3668              :             }
    3669              :         }
    3670              : 
    3671              :         // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
    3672              :         // our in-memory state and database state.
    3673              : 
    3674              :         // Ordering: we delete persistent state first: if we then
    3675              :         // crash, we will drop the in-memory state.
    3676              : 
    3677              :         // Drop persistent state.
    3678            0 :         self.persistence.delete_tenant(tenant_id).await?;
    3679              : 
    3680              :         // Drop in-memory state
    3681              :         {
    3682            0 :             let mut locked = self.inner.write().unwrap();
    3683            0 :             let (_nodes, tenants, scheduler) = locked.parts_mut();
    3684              : 
    3685              :             // Dereference Scheduler from shards before dropping them
    3686            0 :             for (_tenant_shard_id, shard) in
    3687            0 :                 tenants.range_mut(TenantShardId::tenant_range(tenant_id))
    3688            0 :             {
    3689            0 :                 shard.intent.clear(scheduler);
    3690            0 :             }
    3691              : 
    3692            0 :             tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
    3693            0 :             tracing::info!(
    3694            0 :                 "Deleted tenant {tenant_id}, now have {} tenants",
    3695            0 :                 locked.tenants.len()
    3696              :             );
    3697              :         };
    3698              : 
    3699              :         // Delete the tenant from safekeepers (if needed)
    3700            0 :         self.tenant_delete_safekeepers(tenant_id)
    3701            0 :             .instrument(tracing::info_span!("tenant_delete_safekeepers", %tenant_id))
    3702            0 :             .await?;
    3703              : 
    3704              :         // Success is represented as 404, to imitate the existing pageserver deletion API
    3705            0 :         Ok(StatusCode::NOT_FOUND)
    3706            0 :     }
    3707              : 
    3708              :     /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
    3709              :     /// for a tenant.  The TenantConfig is passed through to pageservers, whereas this function modifies
    3710              :     /// the tenant's policies (configuration) within the storage controller
    3711            0 :     pub(crate) async fn tenant_update_policy(
    3712            0 :         &self,
    3713            0 :         tenant_id: TenantId,
    3714            0 :         req: TenantPolicyRequest,
    3715            0 :     ) -> Result<(), ApiError> {
    3716              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    3717            0 :         let _tenant_lock = trace_exclusive_lock(
    3718            0 :             &self.tenant_op_locks,
    3719            0 :             tenant_id,
    3720            0 :             TenantOperations::UpdatePolicy,
    3721            0 :         )
    3722            0 :         .await;
    3723              : 
    3724            0 :         self.maybe_load_tenant(tenant_id, &_tenant_lock).await?;
    3725              : 
    3726            0 :         failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
    3727              : 
    3728              :         let TenantPolicyRequest {
    3729            0 :             placement,
    3730            0 :             mut scheduling,
    3731            0 :         } = req;
    3732              : 
    3733            0 :         if let Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) = placement {
    3734              :             // When someone configures a tenant to detach, we force the scheduling policy to enable
    3735              :             // this to take effect.
    3736            0 :             if scheduling.is_none() {
    3737            0 :                 scheduling = Some(ShardSchedulingPolicy::Active);
    3738            0 :             }
    3739            0 :         }
    3740              : 
    3741            0 :         self.persistence
    3742            0 :             .update_tenant_shard(
    3743            0 :                 TenantFilter::Tenant(tenant_id),
    3744            0 :                 placement.clone(),
    3745            0 :                 None,
    3746            0 :                 None,
    3747            0 :                 scheduling,
    3748            0 :             )
    3749            0 :             .await?;
    3750              : 
    3751            0 :         let mut schedule_context = ScheduleContext::default();
    3752            0 :         let mut locked = self.inner.write().unwrap();
    3753            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    3754            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    3755            0 :             if let Some(placement) = &placement {
    3756            0 :                 shard.policy = placement.clone();
    3757              : 
    3758            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3759            0 :                                "Updated placement policy to {placement:?}");
    3760            0 :             }
    3761              : 
    3762            0 :             if let Some(scheduling) = &scheduling {
    3763            0 :                 shard.set_scheduling_policy(*scheduling);
    3764              : 
    3765            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    3766            0 :                                "Updated scheduling policy to {scheduling:?}");
    3767            0 :             }
    3768              : 
    3769              :             // In case scheduling is being switched back on, try it now.
    3770            0 :             shard.schedule(scheduler, &mut schedule_context).ok();
    3771            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    3772              :         }
    3773              : 
    3774            0 :         Ok(())
    3775            0 :     }
    3776              : 
    3777            0 :     pub(crate) async fn tenant_timeline_create_pageservers(
    3778            0 :         &self,
    3779            0 :         tenant_id: TenantId,
    3780            0 :         mut create_req: TimelineCreateRequest,
    3781            0 :     ) -> Result<TimelineInfo, ApiError> {
    3782            0 :         tracing::info!(
    3783            0 :             "Creating timeline {}/{}",
    3784              :             tenant_id,
    3785              :             create_req.new_timeline_id,
    3786              :         );
    3787              : 
    3788            0 :         self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    3789            0 :             if targets.0.is_empty() {
    3790            0 :                 return Err(ApiError::NotFound(
    3791            0 :                     anyhow::anyhow!("Tenant not found").into(),
    3792            0 :                 ));
    3793            0 :             };
    3794              : 
    3795            0 :             let (shard_zero_tid, shard_zero_locations) =
    3796            0 :                 targets.0.pop_first().expect("Must have at least one shard");
    3797            0 :             assert!(shard_zero_tid.is_shard_zero());
    3798              : 
    3799            0 :             async fn create_one(
    3800            0 :                 tenant_shard_id: TenantShardId,
    3801            0 :                 locations: ShardMutationLocations,
    3802            0 :                 http_client: reqwest::Client,
    3803            0 :                 jwt: Option<String>,
    3804            0 :                 mut create_req: TimelineCreateRequest,
    3805            0 :             ) -> Result<TimelineInfo, ApiError> {
    3806            0 :                 let latest = locations.latest.node;
    3807              : 
    3808            0 :                 tracing::info!(
    3809            0 :                     "Creating timeline on shard {}/{}, attached to node {latest} in generation {:?}",
    3810              :                     tenant_shard_id,
    3811              :                     create_req.new_timeline_id,
    3812              :                     locations.latest.generation
    3813              :                 );
    3814              : 
    3815            0 :                 let client =
    3816            0 :                     PageserverClient::new(latest.get_id(), http_client.clone(), latest.base_url(), jwt.as_deref());
    3817              : 
    3818            0 :                 let timeline_info = client
    3819            0 :                     .timeline_create(tenant_shard_id, &create_req)
    3820            0 :                     .await
    3821            0 :                     .map_err(|e| passthrough_api_error(&latest, e))?;
    3822              : 
    3823              :                 // If we are going to create the timeline on some stale locations for shard 0, then ask them to re-use
    3824              :                 // the initdb generated by the latest location, rather than generating their own.  This avoids racing uploads
    3825              :                 // of initdb to S3 which might not be binary-identical if different pageservers have different postgres binaries.
    3826            0 :                 if tenant_shard_id.is_shard_zero() {
    3827            0 :                     if let models::TimelineCreateRequestMode::Bootstrap { existing_initdb_timeline_id, .. } = &mut create_req.mode {
    3828            0 :                         *existing_initdb_timeline_id = Some(create_req.new_timeline_id);
    3829            0 :                     }
    3830            0 :                 }
    3831              : 
    3832              :                 // We propagate timeline creations to all attached locations such that a compute
    3833              :                 // for the new timeline is able to start regardless of the current state of the
    3834              :                 // tenant shard reconciliation.
    3835            0 :                 for location in locations.other {
    3836            0 :                     tracing::info!(
    3837            0 :                         "Creating timeline on shard {}/{}, stale attached to node {} in generation {:?}",
    3838              :                         tenant_shard_id,
    3839              :                         create_req.new_timeline_id,
    3840              :                         location.node,
    3841              :                         location.generation
    3842              :                     );
    3843              : 
    3844            0 :                     let client = PageserverClient::new(
    3845            0 :                         location.node.get_id(),
    3846            0 :                         http_client.clone(),
    3847            0 :                         location.node.base_url(),
    3848            0 :                         jwt.as_deref(),
    3849              :                     );
    3850              : 
    3851            0 :                     let res = client
    3852            0 :                         .timeline_create(tenant_shard_id, &create_req)
    3853            0 :                         .await;
    3854              : 
    3855            0 :                     if let Err(e) = res {
    3856            0 :                         match e {
    3857            0 :                             mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _) => {
    3858            0 :                                 // Tenant might have been detached from the stale location,
    3859            0 :                                 // so ignore 404s.
    3860            0 :                             },
    3861              :                             _ => {
    3862            0 :                                 return Err(passthrough_api_error(&location.node, e));
    3863              :                             }
    3864              :                         }
    3865            0 :                     }
    3866              :                 }
    3867              : 
    3868            0 :                 Ok(timeline_info)
    3869            0 :             }
    3870              : 
    3871              :             // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
    3872              :             // use whatever LSN that shard picked when creating on subsequent shards.  We arbitrarily use shard zero as the shard
    3873              :             // that will get the first creation request, and propagate the LSN to all the >0 shards.
    3874              :             //
    3875              :             // This also enables non-zero shards to use the initdb that shard 0 generated and uploaded to S3, rather than
    3876              :             // independently generating their own initdb.  This guarantees that shards cannot end up with different initial
    3877              :             // states if e.g. they have different postgres binary versions.
    3878            0 :             let timeline_info = create_one(
    3879            0 :                 shard_zero_tid,
    3880            0 :                 shard_zero_locations,
    3881            0 :                 self.http_client.clone(),
    3882            0 :                 self.config.pageserver_jwt_token.clone(),
    3883            0 :                 create_req.clone(),
    3884            0 :             )
    3885            0 :             .await?;
    3886              : 
    3887              :             // Update the create request for shards >= 0
    3888            0 :             match &mut create_req.mode {
    3889            0 :                 models::TimelineCreateRequestMode::Branch { ancestor_start_lsn, .. } if ancestor_start_lsn.is_none() => {
    3890            0 :                     // Propagate the LSN that shard zero picked, if caller didn't provide one
    3891            0 :                     *ancestor_start_lsn = timeline_info.ancestor_lsn;
    3892            0 :                 },
    3893            0 :                 models::TimelineCreateRequestMode::Bootstrap { existing_initdb_timeline_id, .. } => {
    3894              :                     // For shards >= 0, do not run initdb: use the one that shard 0 uploaded to S3
    3895            0 :                     *existing_initdb_timeline_id = Some(create_req.new_timeline_id)
    3896              :                 }
    3897            0 :                 _ => {}
    3898              :             }
    3899              : 
    3900              :             // Create timeline on remaining shards with number >0
    3901            0 :             if !targets.0.is_empty() {
    3902              :                 // If we had multiple shards, issue requests for the remainder now.
    3903            0 :                 let jwt = &self.config.pageserver_jwt_token;
    3904            0 :                 self.tenant_for_shards(
    3905            0 :                     targets
    3906            0 :                         .0
    3907            0 :                         .iter()
    3908            0 :                         .map(|t| (*t.0, t.1.latest.node.clone()))
    3909            0 :                         .collect(),
    3910            0 :                     |tenant_shard_id: TenantShardId, _node: Node| {
    3911            0 :                         let create_req = create_req.clone();
    3912            0 :                         let mutation_locations = targets.0.remove(&tenant_shard_id).unwrap();
    3913            0 :                         Box::pin(create_one(
    3914            0 :                             tenant_shard_id,
    3915            0 :                             mutation_locations,
    3916            0 :                             self.http_client.clone(),
    3917            0 :                             jwt.clone(),
    3918            0 :                             create_req,
    3919            0 :                         ))
    3920            0 :                     },
    3921              :                 )
    3922            0 :                 .await?;
    3923            0 :             }
    3924              : 
    3925            0 :             Ok(timeline_info)
    3926            0 :         })
    3927            0 :         .await?
    3928            0 :     }
    3929              : 
    3930            0 :     pub(crate) async fn tenant_timeline_create(
    3931            0 :         self: &Arc<Self>,
    3932            0 :         tenant_id: TenantId,
    3933            0 :         create_req: TimelineCreateRequest,
    3934            0 :     ) -> Result<TimelineCreateResponseStorcon, ApiError> {
    3935            0 :         let safekeepers = self.config.timelines_onto_safekeepers;
    3936            0 :         let timeline_id = create_req.new_timeline_id;
    3937              : 
    3938            0 :         tracing::info!(
    3939            0 :             mode=%create_req.mode_tag(),
    3940              :             %safekeepers,
    3941            0 :             "Creating timeline {}/{}",
    3942              :             tenant_id,
    3943              :             timeline_id,
    3944              :         );
    3945              : 
    3946            0 :         let _tenant_lock = trace_shared_lock(
    3947            0 :             &self.tenant_op_locks,
    3948            0 :             tenant_id,
    3949            0 :             TenantOperations::TimelineCreate,
    3950            0 :         )
    3951            0 :         .await;
    3952            0 :         failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock");
    3953            0 :         let is_import = create_req.is_import();
    3954            0 :         let read_only = matches!(
    3955            0 :             create_req.mode,
    3956              :             models::TimelineCreateRequestMode::Branch {
    3957              :                 read_only: true,
    3958              :                 ..
    3959              :             }
    3960              :         );
    3961              : 
    3962            0 :         if is_import {
    3963              :             // Ensure that there is no split on-going.
    3964              :             // [`Self::tenant_shard_split`] holds the exclusive tenant lock
    3965              :             // for the duration of the split, but here we handle the case
    3966              :             // where we restarted and the split is being aborted.
    3967            0 :             let locked = self.inner.read().unwrap();
    3968            0 :             let splitting = locked
    3969            0 :                 .tenants
    3970            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3971            0 :                 .any(|(_id, shard)| shard.splitting != SplitState::Idle);
    3972              : 
    3973            0 :             if splitting {
    3974            0 :                 return Err(ApiError::Conflict("Tenant is splitting shard".to_string()));
    3975            0 :             }
    3976            0 :         }
    3977              : 
    3978            0 :         let timeline_info = self
    3979            0 :             .tenant_timeline_create_pageservers(tenant_id, create_req)
    3980            0 :             .await?;
    3981              : 
    3982            0 :         let selected_safekeepers = if is_import {
    3983            0 :             let shards = {
    3984            0 :                 let locked = self.inner.read().unwrap();
    3985            0 :                 locked
    3986            0 :                     .tenants
    3987            0 :                     .range(TenantShardId::tenant_range(tenant_id))
    3988            0 :                     .map(|(ts_id, _)| ts_id.to_index())
    3989            0 :                     .collect::<Vec<_>>()
    3990              :             };
    3991              : 
    3992            0 :             if !shards
    3993            0 :                 .iter()
    3994            0 :                 .map(|shard_index| shard_index.shard_count)
    3995            0 :                 .all_equal()
    3996              :             {
    3997            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3998            0 :                     "Inconsistent shard count"
    3999            0 :                 )));
    4000            0 :             }
    4001              : 
    4002            0 :             let import = TimelineImport {
    4003            0 :                 tenant_id,
    4004            0 :                 timeline_id,
    4005            0 :                 shard_statuses: ShardImportStatuses::new(shards),
    4006            0 :             };
    4007              : 
    4008            0 :             let inserted = self
    4009            0 :                 .persistence
    4010            0 :                 .insert_timeline_import(import.to_persistent())
    4011            0 :                 .await
    4012            0 :                 .context("timeline import insert")
    4013            0 :                 .map_err(ApiError::InternalServerError)?;
    4014              : 
    4015              :             // Set the importing flag on the tenant shards
    4016            0 :             self.inner
    4017            0 :                 .write()
    4018            0 :                 .unwrap()
    4019            0 :                 .tenants
    4020            0 :                 .range_mut(TenantShardId::tenant_range(tenant_id))
    4021            0 :                 .for_each(|(_id, shard)| shard.importing = TimelineImportState::Importing);
    4022              : 
    4023            0 :             match inserted {
    4024              :                 true => {
    4025            0 :                     tracing::info!(%tenant_id, %timeline_id, "Inserted timeline import");
    4026              :                 }
    4027              :                 false => {
    4028            0 :                     tracing::info!(%tenant_id, %timeline_id, "Timeline import entry already present");
    4029              :                 }
    4030              :             }
    4031              : 
    4032            0 :             None
    4033            0 :         } else if safekeepers || read_only {
    4034              :             // Note that for imported timelines, we do not create the timeline on the safekeepers
    4035              :             // straight away. Instead, we do it once the import finalized such that we know what
    4036              :             // start LSN to provide for the safekeepers. This is done in
    4037              :             // [`Self::finalize_timeline_import`].
    4038            0 :             let res = self
    4039            0 :                 .tenant_timeline_create_safekeepers(tenant_id, &timeline_info, read_only)
    4040            0 :                 .instrument(tracing::info_span!("timeline_create_safekeepers", %tenant_id, timeline_id=%timeline_info.timeline_id))
    4041            0 :                 .await?;
    4042            0 :             Some(res)
    4043              :         } else {
    4044            0 :             None
    4045              :         };
    4046              : 
    4047            0 :         Ok(TimelineCreateResponseStorcon {
    4048            0 :             timeline_info,
    4049            0 :             safekeepers: selected_safekeepers,
    4050            0 :         })
    4051            0 :     }
    4052              : 
    4053              :     #[instrument(skip_all, fields(
    4054              :         tenant_id=%req.tenant_shard_id.tenant_id,
    4055              :         shard_id=%req.tenant_shard_id.shard_slug(),
    4056              :         timeline_id=%req.timeline_id,
    4057              :     ))]
    4058              :     pub(crate) async fn handle_timeline_shard_import_progress(
    4059              :         self: &Arc<Self>,
    4060              :         req: TimelineImportStatusRequest,
    4061              :     ) -> Result<ShardImportStatus, ApiError> {
    4062              :         let validity = self
    4063              :             .validate_shard_generation(req.tenant_shard_id, req.generation)
    4064              :             .await?;
    4065              :         match validity {
    4066              :             ShardGenerationValidity::Valid => {
    4067              :                 // fallthrough
    4068              :             }
    4069              :             ShardGenerationValidity::Mismatched { claimed, actual } => {
    4070              :                 tracing::info!(
    4071              :                     claimed=?claimed.into(),
    4072            0 :                     actual=?actual.and_then(|g| g.into()),
    4073              :                     "Rejecting import progress fetch from stale generation"
    4074              :                 );
    4075              : 
    4076              :                 return Err(ApiError::BadRequest(anyhow::anyhow!("Invalid generation")));
    4077              :             }
    4078              :         }
    4079              : 
    4080              :         let maybe_import = self
    4081              :             .persistence
    4082              :             .get_timeline_import(req.tenant_shard_id.tenant_id, req.timeline_id)
    4083              :             .await?;
    4084              : 
    4085            0 :         let import = maybe_import.ok_or_else(|| {
    4086            0 :             ApiError::NotFound(
    4087            0 :                 format!(
    4088            0 :                     "import for {}/{} not found",
    4089            0 :                     req.tenant_shard_id.tenant_id, req.timeline_id
    4090            0 :                 )
    4091            0 :                 .into(),
    4092            0 :             )
    4093            0 :         })?;
    4094              : 
    4095              :         import
    4096              :             .shard_statuses
    4097              :             .0
    4098              :             .get(&req.tenant_shard_id.to_index())
    4099              :             .cloned()
    4100            0 :             .ok_or_else(|| {
    4101            0 :                 ApiError::NotFound(
    4102            0 :                     format!("shard {} not found", req.tenant_shard_id.shard_slug()).into(),
    4103            0 :                 )
    4104            0 :             })
    4105              :     }
    4106              : 
    4107              :     #[instrument(skip_all, fields(
    4108              :         tenant_id=%req.tenant_shard_id.tenant_id,
    4109              :         shard_id=%req.tenant_shard_id.shard_slug(),
    4110              :         timeline_id=%req.timeline_id,
    4111              :     ))]
    4112              :     pub(crate) async fn handle_timeline_shard_import_progress_upcall(
    4113              :         self: &Arc<Self>,
    4114              :         req: PutTimelineImportStatusRequest,
    4115              :     ) -> Result<(), ApiError> {
    4116              :         let validity = self
    4117              :             .validate_shard_generation(req.tenant_shard_id, req.generation)
    4118              :             .await?;
    4119              :         match validity {
    4120              :             ShardGenerationValidity::Valid => {
    4121              :                 // fallthrough
    4122              :             }
    4123              :             ShardGenerationValidity::Mismatched { claimed, actual } => {
    4124              :                 tracing::info!(
    4125              :                     claimed=?claimed.into(),
    4126            0 :                     actual=?actual.and_then(|g| g.into()),
    4127              :                     "Rejecting import progress update from stale generation"
    4128              :                 );
    4129              : 
    4130              :                 return Err(ApiError::PreconditionFailed("Invalid generation".into()));
    4131              :             }
    4132              :         }
    4133              : 
    4134              :         let res = self
    4135              :             .persistence
    4136              :             .update_timeline_import(req.tenant_shard_id, req.timeline_id, req.status)
    4137              :             .await;
    4138              :         let timeline_import = match res {
    4139              :             Ok(Ok(Some(timeline_import))) => timeline_import,
    4140              :             Ok(Ok(None)) => {
    4141              :                 // Idempotency: we've already seen and handled this update.
    4142              :                 return Ok(());
    4143              :             }
    4144              :             Ok(Err(logical_err)) => {
    4145              :                 return Err(logical_err.into());
    4146              :             }
    4147              :             Err(db_err) => {
    4148              :                 return Err(db_err.into());
    4149              :             }
    4150              :         };
    4151              : 
    4152              :         tracing::info!(
    4153              :             tenant_id=%req.tenant_shard_id.tenant_id,
    4154              :             timeline_id=%req.timeline_id,
    4155              :             shard_id=%req.tenant_shard_id.shard_slug(),
    4156              :             "Updated timeline import status to: {timeline_import:?}");
    4157              : 
    4158              :         if timeline_import.is_complete() {
    4159              :             tokio::task::spawn({
    4160              :                 let this = self.clone();
    4161            0 :                 async move { this.finalize_timeline_import(timeline_import).await }
    4162              :             });
    4163              :         }
    4164              : 
    4165              :         Ok(())
    4166              :     }
    4167              : 
    4168              :     /// Check that a provided generation for some tenant shard is the most recent one.
    4169              :     ///
    4170              :     /// Validate with the in-mem state first, and, if that passes, validate with the
    4171              :     /// database state which is authoritative.
    4172            0 :     async fn validate_shard_generation(
    4173            0 :         self: &Arc<Self>,
    4174            0 :         tenant_shard_id: TenantShardId,
    4175            0 :         generation: Generation,
    4176            0 :     ) -> Result<ShardGenerationValidity, ApiError> {
    4177              :         {
    4178            0 :             let locked = self.inner.read().unwrap();
    4179            0 :             let tenant_shard =
    4180            0 :                 locked
    4181            0 :                     .tenants
    4182            0 :                     .get(&tenant_shard_id)
    4183            0 :                     .ok_or(ApiError::InternalServerError(anyhow::anyhow!(
    4184            0 :                         "{} shard not found",
    4185            0 :                         tenant_shard_id
    4186            0 :                     )))?;
    4187              : 
    4188            0 :             if tenant_shard.generation != Some(generation) {
    4189            0 :                 return Ok(ShardGenerationValidity::Mismatched {
    4190            0 :                     claimed: generation,
    4191            0 :                     actual: tenant_shard.generation,
    4192            0 :                 });
    4193            0 :             }
    4194              :         }
    4195              : 
    4196            0 :         let mut db_generations = self
    4197            0 :             .persistence
    4198            0 :             .shard_generations(std::iter::once(&tenant_shard_id))
    4199            0 :             .await?;
    4200            0 :         let (_tid, db_generation) =
    4201            0 :             db_generations
    4202            0 :                 .pop()
    4203            0 :                 .ok_or(ApiError::InternalServerError(anyhow::anyhow!(
    4204            0 :                     "{} shard not found",
    4205            0 :                     tenant_shard_id
    4206            0 :                 )))?;
    4207              : 
    4208            0 :         if db_generation != Some(generation) {
    4209            0 :             return Ok(ShardGenerationValidity::Mismatched {
    4210            0 :                 claimed: generation,
    4211            0 :                 actual: db_generation,
    4212            0 :             });
    4213            0 :         }
    4214              : 
    4215            0 :         Ok(ShardGenerationValidity::Valid)
    4216            0 :     }
    4217              : 
    4218              :     /// Finalize the import of a timeline
    4219              :     ///
    4220              :     /// This method should be called once all shards have reported that the import is complete.
    4221              :     /// Firstly, it polls the post import timeline activation endpoint exposed by the pageserver.
    4222              :     /// Once the timeline is active on all shards, the timeline also gets created on the
    4223              :     /// safekeepers. Finally, notify cplane of the import completion (whether failed or
    4224              :     /// successful), and remove the import from the database and in-memory.
    4225              :     ///
    4226              :     /// If this method gets pre-empted by shut down, it will be called again at start-up (on-going
    4227              :     /// imports are stored in the database).
    4228              :     ///
    4229              :     /// # Cancel-Safety
    4230              :     /// Not cancel safe.
    4231              :     /// If the caller stops polling, the import will not be removed from
    4232              :     /// [`ServiceState::imports_finalizing`].
    4233              :     #[instrument(skip_all, fields(
    4234              :         tenant_id=%import.tenant_id,
    4235              :         timeline_id=%import.timeline_id,
    4236              :     ))]
    4237              : 
    4238              :     async fn finalize_timeline_import(
    4239              :         self: &Arc<Self>,
    4240              :         import: TimelineImport,
    4241              :     ) -> Result<(), TimelineImportFinalizeError> {
    4242              :         let tenant_timeline = (import.tenant_id, import.timeline_id);
    4243              : 
    4244              :         let (_finalize_import_guard, cancel) = {
    4245              :             let mut locked = self.inner.write().unwrap();
    4246              :             let gate = Gate::default();
    4247              :             let cancel = CancellationToken::default();
    4248              : 
    4249              :             let guard = gate.enter().unwrap();
    4250              : 
    4251              :             locked.imports_finalizing.insert(
    4252              :                 tenant_timeline,
    4253              :                 FinalizingImport {
    4254              :                     gate,
    4255              :                     cancel: cancel.clone(),
    4256              :                 },
    4257              :             );
    4258              : 
    4259              :             (guard, cancel)
    4260              :         };
    4261              : 
    4262              :         let res = tokio::select! {
    4263              :             res = self.finalize_timeline_import_impl(import) => {
    4264              :                 res
    4265              :             },
    4266              :             _ = cancel.cancelled() => {
    4267              :                 Err(TimelineImportFinalizeError::Cancelled)
    4268              :             }
    4269              :         };
    4270              : 
    4271              :         let mut locked = self.inner.write().unwrap();
    4272              :         locked.imports_finalizing.remove(&tenant_timeline);
    4273              : 
    4274              :         res
    4275              :     }
    4276              : 
    4277            0 :     async fn finalize_timeline_import_impl(
    4278            0 :         self: &Arc<Self>,
    4279            0 :         import: TimelineImport,
    4280            0 :     ) -> Result<(), TimelineImportFinalizeError> {
    4281            0 :         tracing::info!("Finalizing timeline import");
    4282              : 
    4283            0 :         pausable_failpoint!("timeline-import-pre-cplane-notification");
    4284              : 
    4285            0 :         let tenant_id = import.tenant_id;
    4286            0 :         let timeline_id = import.timeline_id;
    4287              : 
    4288            0 :         let import_error = import.completion_error();
    4289            0 :         match import_error {
    4290            0 :             Some(err) => {
    4291            0 :                 self.notify_cplane_and_delete_import(tenant_id, timeline_id, Err(err))
    4292            0 :                     .await?;
    4293            0 :                 tracing::warn!("Timeline import completed with shard errors");
    4294            0 :                 Ok(())
    4295              :             }
    4296            0 :             None => match self.activate_timeline_post_import(&import).await {
    4297            0 :                 Ok(timeline_info) => {
    4298            0 :                     tracing::info!("Post import timeline activation complete");
    4299              : 
    4300            0 :                     if self.config.timelines_onto_safekeepers {
    4301              :                         // Now that we know the start LSN of this timeline, create it on the
    4302              :                         // safekeepers.
    4303            0 :                         self.tenant_timeline_create_safekeepers_until_success(
    4304            0 :                             import.tenant_id,
    4305            0 :                             timeline_info,
    4306            0 :                         )
    4307            0 :                         .await?;
    4308            0 :                     }
    4309              : 
    4310            0 :                     self.notify_cplane_and_delete_import(tenant_id, timeline_id, Ok(()))
    4311            0 :                         .await?;
    4312              : 
    4313            0 :                     tracing::info!("Timeline import completed successfully");
    4314            0 :                     Ok(())
    4315              :                 }
    4316              :                 Err(TimelineImportFinalizeError::ShuttingDown) => {
    4317              :                     // We got pre-empted by shut down and will resume after the restart.
    4318            0 :                     Err(TimelineImportFinalizeError::ShuttingDown)
    4319              :                 }
    4320            0 :                 Err(err) => {
    4321              :                     // Any finalize error apart from shut down is permanent and requires us to notify
    4322              :                     // cplane such that it can clean up.
    4323            0 :                     tracing::error!("Import finalize failed with permanent error: {err}");
    4324            0 :                     self.notify_cplane_and_delete_import(
    4325            0 :                         tenant_id,
    4326            0 :                         timeline_id,
    4327            0 :                         Err(err.to_string()),
    4328            0 :                     )
    4329            0 :                     .await?;
    4330            0 :                     Err(err)
    4331              :                 }
    4332              :             },
    4333              :         }
    4334            0 :     }
    4335              : 
    4336            0 :     async fn notify_cplane_and_delete_import(
    4337            0 :         self: &Arc<Self>,
    4338            0 :         tenant_id: TenantId,
    4339            0 :         timeline_id: TimelineId,
    4340            0 :         import_result: ImportResult,
    4341            0 :     ) -> Result<(), TimelineImportFinalizeError> {
    4342            0 :         let import_failed = import_result.is_err();
    4343            0 :         tracing::info!(%import_failed, "Notifying cplane of import completion");
    4344              : 
    4345            0 :         let client = UpcallClient::new(self.get_config(), self.cancel.child_token());
    4346            0 :         client
    4347            0 :             .notify_import_complete(tenant_id, timeline_id, import_result)
    4348            0 :             .await
    4349            0 :             .map_err(|_err| TimelineImportFinalizeError::ShuttingDown)?;
    4350              : 
    4351            0 :         if let Err(err) = self
    4352            0 :             .persistence
    4353            0 :             .delete_timeline_import(tenant_id, timeline_id)
    4354            0 :             .await
    4355              :         {
    4356            0 :             tracing::warn!("Failed to delete timeline import entry from database: {err}");
    4357            0 :         }
    4358              : 
    4359            0 :         self.inner
    4360            0 :             .write()
    4361            0 :             .unwrap()
    4362            0 :             .tenants
    4363            0 :             .range_mut(TenantShardId::tenant_range(tenant_id))
    4364            0 :             .for_each(|(_id, shard)| shard.importing = TimelineImportState::Idle);
    4365              : 
    4366            0 :         Ok(())
    4367            0 :     }
    4368              : 
    4369              :     /// Activate an imported timeline on all shards once the import is complete.
    4370              :     /// Returns the [`TimelineInfo`] reported by shard zero.
    4371            0 :     async fn activate_timeline_post_import(
    4372            0 :         self: &Arc<Self>,
    4373            0 :         import: &TimelineImport,
    4374            0 :     ) -> Result<TimelineInfo, TimelineImportFinalizeError> {
    4375              :         const TIMELINE_ACTIVATE_TIMEOUT: Duration = Duration::from_millis(128);
    4376              : 
    4377            0 :         let mut shards_to_activate: HashSet<ShardIndex> =
    4378            0 :             import.shard_statuses.0.keys().cloned().collect();
    4379            0 :         let mut shard_zero_timeline_info = None;
    4380              : 
    4381            0 :         while !shards_to_activate.is_empty() {
    4382            0 :             if self.cancel.is_cancelled() {
    4383            0 :                 return Err(TimelineImportFinalizeError::ShuttingDown);
    4384            0 :             }
    4385              : 
    4386            0 :             let targets = {
    4387            0 :                 let locked = self.inner.read().unwrap();
    4388            0 :                 let mut targets = Vec::new();
    4389              : 
    4390            0 :                 for (tenant_shard_id, shard) in locked
    4391            0 :                     .tenants
    4392            0 :                     .range(TenantShardId::tenant_range(import.tenant_id))
    4393              :                 {
    4394            0 :                     if !import
    4395            0 :                         .shard_statuses
    4396            0 :                         .0
    4397            0 :                         .contains_key(&tenant_shard_id.to_index())
    4398              :                     {
    4399            0 :                         return Err(TimelineImportFinalizeError::MismatchedShards(
    4400            0 :                             tenant_shard_id.to_index(),
    4401            0 :                         ));
    4402            0 :                     }
    4403              : 
    4404            0 :                     if let Some(node_id) = shard.intent.get_attached() {
    4405            0 :                         let node = locked
    4406            0 :                             .nodes
    4407            0 :                             .get(node_id)
    4408            0 :                             .expect("Pageservers may not be deleted while referenced");
    4409            0 :                         targets.push((*tenant_shard_id, node.clone()));
    4410            0 :                     }
    4411              :                 }
    4412              : 
    4413            0 :                 targets
    4414              :             };
    4415              : 
    4416            0 :             let targeted_tenant_shards: Vec<_> = targets.iter().map(|(tid, _node)| *tid).collect();
    4417              : 
    4418            0 :             let results = self
    4419            0 :                 .tenant_for_shards_api(
    4420            0 :                     targets,
    4421            0 :                     |tenant_shard_id, client| async move {
    4422            0 :                         client
    4423            0 :                             .activate_post_import(
    4424            0 :                                 tenant_shard_id,
    4425            0 :                                 import.timeline_id,
    4426            0 :                                 TIMELINE_ACTIVATE_TIMEOUT,
    4427            0 :                             )
    4428            0 :                             .await
    4429            0 :                     },
    4430              :                     1,
    4431              :                     1,
    4432              :                     SHORT_RECONCILE_TIMEOUT,
    4433            0 :                     &self.cancel,
    4434              :                 )
    4435            0 :                 .await;
    4436              : 
    4437            0 :             let mut failed = 0;
    4438            0 :             for (tid, (_, result)) in targeted_tenant_shards.iter().zip(results.into_iter()) {
    4439            0 :                 match result {
    4440            0 :                     Ok(ok) => {
    4441            0 :                         if tid.is_shard_zero() {
    4442            0 :                             shard_zero_timeline_info = Some(ok);
    4443            0 :                         }
    4444              : 
    4445            0 :                         shards_to_activate.remove(&tid.to_index());
    4446              :                     }
    4447            0 :                     Err(_err) => {
    4448            0 :                         failed += 1;
    4449            0 :                     }
    4450              :                 }
    4451              :             }
    4452              : 
    4453            0 :             if failed > 0 {
    4454            0 :                 tracing::info!(
    4455            0 :                     "Failed to activate timeline on {failed} shards post import. Will retry"
    4456              :                 );
    4457            0 :             }
    4458              : 
    4459            0 :             tokio::select! {
    4460            0 :                 _ = tokio::time::sleep(Duration::from_millis(250)) => {},
    4461            0 :                 _ = self.cancel.cancelled() => {
    4462            0 :                     return Err(TimelineImportFinalizeError::ShuttingDown);
    4463              :                 }
    4464              :             }
    4465              :         }
    4466              : 
    4467            0 :         Ok(shard_zero_timeline_info.expect("All shards replied"))
    4468            0 :     }
    4469              : 
    4470            0 :     async fn finalize_timeline_imports(self: &Arc<Self>, imports: Vec<TimelineImport>) {
    4471            0 :         futures::future::join_all(
    4472            0 :             imports
    4473            0 :                 .into_iter()
    4474            0 :                 .map(|import| self.finalize_timeline_import(import)),
    4475              :         )
    4476            0 :         .await;
    4477            0 :     }
    4478              : 
    4479              :     /// Delete a timeline import if it exists
    4480              :     ///
    4481              :     /// Firstly, delete the entry from the database. Any updates
    4482              :     /// from pageservers after the update will fail with a 404, so the
    4483              :     /// import cannot progress into finalizing state if it's not there already.
    4484              :     /// Secondly, cancel the finalization if one is in progress.
    4485            0 :     pub(crate) async fn maybe_delete_timeline_import(
    4486            0 :         self: &Arc<Self>,
    4487            0 :         tenant_id: TenantId,
    4488            0 :         timeline_id: TimelineId,
    4489            0 :     ) -> Result<(), DatabaseError> {
    4490            0 :         let tenant_has_ongoing_import = {
    4491            0 :             let locked = self.inner.read().unwrap();
    4492            0 :             locked
    4493            0 :                 .tenants
    4494            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    4495            0 :                 .any(|(_tid, shard)| shard.importing == TimelineImportState::Importing)
    4496              :         };
    4497              : 
    4498            0 :         if !tenant_has_ongoing_import {
    4499            0 :             return Ok(());
    4500            0 :         }
    4501              : 
    4502            0 :         self.persistence
    4503            0 :             .delete_timeline_import(tenant_id, timeline_id)
    4504            0 :             .await?;
    4505              : 
    4506            0 :         let maybe_finalizing = {
    4507            0 :             let mut locked = self.inner.write().unwrap();
    4508            0 :             locked.imports_finalizing.remove(&(tenant_id, timeline_id))
    4509              :         };
    4510              : 
    4511            0 :         if let Some(finalizing) = maybe_finalizing {
    4512            0 :             finalizing.cancel.cancel();
    4513            0 :             finalizing.gate.close().await;
    4514            0 :         }
    4515              : 
    4516            0 :         Ok(())
    4517            0 :     }
    4518              : 
    4519            0 :     pub(crate) async fn tenant_timeline_archival_config(
    4520            0 :         &self,
    4521            0 :         tenant_id: TenantId,
    4522            0 :         timeline_id: TimelineId,
    4523            0 :         req: TimelineArchivalConfigRequest,
    4524            0 :     ) -> Result<(), ApiError> {
    4525            0 :         tracing::info!(
    4526            0 :             "Setting archival config of timeline {tenant_id}/{timeline_id} to '{:?}'",
    4527              :             req.state
    4528              :         );
    4529              : 
    4530            0 :         let _tenant_lock = trace_shared_lock(
    4531            0 :             &self.tenant_op_locks,
    4532            0 :             tenant_id,
    4533            0 :             TenantOperations::TimelineArchivalConfig,
    4534            0 :         )
    4535            0 :         .await;
    4536              : 
    4537            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    4538            0 :             if targets.0.is_empty() {
    4539            0 :                 return Err(ApiError::NotFound(
    4540            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4541            0 :                 ));
    4542            0 :             }
    4543            0 :             async fn config_one(
    4544            0 :                 tenant_shard_id: TenantShardId,
    4545            0 :                 timeline_id: TimelineId,
    4546            0 :                 node: Node,
    4547            0 :                 http_client: reqwest::Client,
    4548            0 :                 jwt: Option<String>,
    4549            0 :                 req: TimelineArchivalConfigRequest,
    4550            0 :             ) -> Result<(), ApiError> {
    4551            0 :                 tracing::info!(
    4552            0 :                     "Setting archival config of timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    4553              :                 );
    4554              : 
    4555            0 :                 let client = PageserverClient::new(node.get_id(),  http_client, node.base_url(), jwt.as_deref());
    4556              : 
    4557            0 :                 client
    4558            0 :                     .timeline_archival_config(tenant_shard_id, timeline_id, &req)
    4559            0 :                     .await
    4560            0 :                     .map_err(|e| match e {
    4561            0 :                         mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg) => {
    4562            0 :                             ApiError::PreconditionFailed(msg.into_boxed_str())
    4563              :                         }
    4564            0 :                         _ => passthrough_api_error(&node, e),
    4565            0 :                     })
    4566            0 :             }
    4567              : 
    4568              :             // no shard needs to go first/last; the operation should be idempotent
    4569              :             // TODO: it would be great to ensure that all shards return the same error
    4570            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    4571            0 :             let results = self
    4572            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    4573            0 :                     futures::FutureExt::boxed(config_one(
    4574            0 :                         tenant_shard_id,
    4575            0 :                         timeline_id,
    4576            0 :                         node,
    4577            0 :                         self.http_client.clone(),
    4578            0 :                         self.config.pageserver_jwt_token.clone(),
    4579            0 :                         req.clone(),
    4580            0 :                     ))
    4581            0 :                 })
    4582            0 :                 .await?;
    4583            0 :             assert!(!results.is_empty(), "must have at least one result");
    4584              : 
    4585            0 :             Ok(())
    4586            0 :         }).await?
    4587            0 :     }
    4588              : 
    4589            0 :     pub(crate) async fn tenant_timeline_detach_ancestor(
    4590            0 :         &self,
    4591            0 :         tenant_id: TenantId,
    4592            0 :         timeline_id: TimelineId,
    4593            0 :         behavior: Option<DetachBehavior>,
    4594            0 :     ) -> Result<models::detach_ancestor::AncestorDetached, ApiError> {
    4595            0 :         tracing::info!("Detaching timeline {tenant_id}/{timeline_id}",);
    4596              : 
    4597            0 :         let _tenant_lock = trace_shared_lock(
    4598            0 :             &self.tenant_op_locks,
    4599            0 :             tenant_id,
    4600            0 :             TenantOperations::TimelineDetachAncestor,
    4601            0 :         )
    4602            0 :         .await;
    4603              : 
    4604            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    4605            0 :             if targets.0.is_empty() {
    4606            0 :                 return Err(ApiError::NotFound(
    4607            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4608            0 :                 ));
    4609            0 :             }
    4610              : 
    4611            0 :             async fn detach_one(
    4612            0 :                 tenant_shard_id: TenantShardId,
    4613            0 :                 timeline_id: TimelineId,
    4614            0 :                 node: Node,
    4615            0 :                 http_client: reqwest::Client,
    4616            0 :                 jwt: Option<String>,
    4617            0 :                 behavior: Option<DetachBehavior>,
    4618            0 :             ) -> Result<(ShardNumber, models::detach_ancestor::AncestorDetached), ApiError> {
    4619            0 :                 tracing::info!(
    4620            0 :                     "Detaching timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    4621              :                 );
    4622              : 
    4623            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    4624              : 
    4625            0 :                 client
    4626            0 :                     .timeline_detach_ancestor(tenant_shard_id, timeline_id, behavior)
    4627            0 :                     .await
    4628            0 :                     .map_err(|e| {
    4629              :                         use mgmt_api::Error;
    4630              : 
    4631            0 :                         match e {
    4632              :                             // no ancestor (ever)
    4633            0 :                             Error::ApiError(StatusCode::CONFLICT, msg) => ApiError::Conflict(format!(
    4634            0 :                                 "{node}: {}",
    4635            0 :                                 msg.strip_prefix("Conflict: ").unwrap_or(&msg)
    4636            0 :                             )),
    4637              :                             // too many ancestors
    4638            0 :                             Error::ApiError(StatusCode::BAD_REQUEST, msg) => {
    4639            0 :                                 ApiError::BadRequest(anyhow::anyhow!("{node}: {msg}"))
    4640              :                             }
    4641            0 :                             Error::ApiError(StatusCode::INTERNAL_SERVER_ERROR, msg) => {
    4642              :                                 // avoid turning these into conflicts to remain compatible with
    4643              :                                 // pageservers, 500 errors are sadly retryable with timeline ancestor
    4644              :                                 // detach
    4645            0 :                                 ApiError::InternalServerError(anyhow::anyhow!("{node}: {msg}"))
    4646              :                             }
    4647              :                             // rest can be mapped as usual
    4648            0 :                             other => passthrough_api_error(&node, other),
    4649              :                         }
    4650            0 :                     })
    4651            0 :                     .map(|res| (tenant_shard_id.shard_number, res))
    4652            0 :             }
    4653              : 
    4654              :             // no shard needs to go first/last; the operation should be idempotent
    4655            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    4656            0 :             let mut results = self
    4657            0 :                 .tenant_for_shards(locations, |tenant_shard_id, node| {
    4658            0 :                     futures::FutureExt::boxed(detach_one(
    4659            0 :                         tenant_shard_id,
    4660            0 :                         timeline_id,
    4661            0 :                         node,
    4662            0 :                         self.http_client.clone(),
    4663            0 :                         self.config.pageserver_jwt_token.clone(),
    4664            0 :                         behavior,
    4665            0 :                     ))
    4666            0 :                 })
    4667            0 :                 .await?;
    4668              : 
    4669            0 :             let any = results.pop().expect("we must have at least one response");
    4670              : 
    4671            0 :             let mismatching = results
    4672            0 :                 .iter()
    4673            0 :                 .filter(|(_, res)| res != &any.1)
    4674            0 :                 .collect::<Vec<_>>();
    4675            0 :             if !mismatching.is_empty() {
    4676              :                 // this can be hit by races which should not happen because operation lock on cplane
    4677            0 :                 let matching = results.len() - mismatching.len();
    4678            0 :                 tracing::error!(
    4679              :                     matching,
    4680              :                     compared_against=?any,
    4681              :                     ?mismatching,
    4682            0 :                     "shards returned different results"
    4683              :                 );
    4684              : 
    4685            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!("pageservers returned mixed results for ancestor detach; manual intervention is required.")));
    4686            0 :             }
    4687              : 
    4688            0 :             Ok(any.1)
    4689            0 :         }).await?
    4690            0 :     }
    4691              : 
    4692            0 :     pub(crate) async fn tenant_timeline_block_unblock_gc(
    4693            0 :         &self,
    4694            0 :         tenant_id: TenantId,
    4695            0 :         timeline_id: TimelineId,
    4696            0 :         dir: BlockUnblock,
    4697            0 :     ) -> Result<(), ApiError> {
    4698            0 :         let _tenant_lock = trace_shared_lock(
    4699            0 :             &self.tenant_op_locks,
    4700            0 :             tenant_id,
    4701            0 :             TenantOperations::TimelineGcBlockUnblock,
    4702            0 :         )
    4703            0 :         .await;
    4704              : 
    4705            0 :         self.tenant_remote_mutation(tenant_id, move |targets| async move {
    4706            0 :             if targets.0.is_empty() {
    4707            0 :                 return Err(ApiError::NotFound(
    4708            0 :                     anyhow::anyhow!("Tenant not found").into(),
    4709            0 :                 ));
    4710            0 :             }
    4711              : 
    4712            0 :             async fn do_one(
    4713            0 :                 tenant_shard_id: TenantShardId,
    4714            0 :                 timeline_id: TimelineId,
    4715            0 :                 node: Node,
    4716            0 :                 http_client: reqwest::Client,
    4717            0 :                 jwt: Option<String>,
    4718            0 :                 dir: BlockUnblock,
    4719            0 :             ) -> Result<(), ApiError> {
    4720            0 :                 let client = PageserverClient::new(
    4721            0 :                     node.get_id(),
    4722            0 :                     http_client,
    4723            0 :                     node.base_url(),
    4724            0 :                     jwt.as_deref(),
    4725              :                 );
    4726              : 
    4727            0 :                 client
    4728            0 :                     .timeline_block_unblock_gc(tenant_shard_id, timeline_id, dir)
    4729            0 :                     .await
    4730            0 :                     .map_err(|e| passthrough_api_error(&node, e))
    4731            0 :             }
    4732              : 
    4733              :             // no shard needs to go first/last; the operation should be idempotent
    4734            0 :             let locations = targets
    4735            0 :                 .0
    4736            0 :                 .iter()
    4737            0 :                 .map(|t| (*t.0, t.1.latest.node.clone()))
    4738            0 :                 .collect();
    4739            0 :             self.tenant_for_shards(locations, |tenant_shard_id, node| {
    4740            0 :                 futures::FutureExt::boxed(do_one(
    4741            0 :                     tenant_shard_id,
    4742            0 :                     timeline_id,
    4743            0 :                     node,
    4744            0 :                     self.http_client.clone(),
    4745            0 :                     self.config.pageserver_jwt_token.clone(),
    4746            0 :                     dir,
    4747            0 :                 ))
    4748            0 :             })
    4749            0 :             .await
    4750            0 :         })
    4751            0 :         .await??;
    4752            0 :         Ok(())
    4753            0 :     }
    4754              : 
    4755            0 :     pub(crate) async fn tenant_timeline_lsn_lease(
    4756            0 :         &self,
    4757            0 :         tenant_id: TenantId,
    4758            0 :         timeline_id: TimelineId,
    4759            0 :         lsn: Lsn,
    4760            0 :     ) -> Result<LsnLease, ApiError> {
    4761            0 :         let _tenant_lock = trace_shared_lock(
    4762            0 :             &self.tenant_op_locks,
    4763            0 :             tenant_id,
    4764            0 :             TenantOperations::TimelineLsnLease,
    4765            0 :         )
    4766            0 :         .await;
    4767              : 
    4768            0 :         let mut retry_if_not_attached = false;
    4769            0 :         let targets = {
    4770            0 :             let locked = self.inner.read().unwrap();
    4771            0 :             let mut targets = Vec::new();
    4772              : 
    4773              :             // If the request got an unsharded tenant id, then apply
    4774              :             // the operation to all shards. Otherwise, apply it to a specific shard.
    4775            0 :             let shards_range = TenantShardId::tenant_range(tenant_id);
    4776              : 
    4777            0 :             for (tenant_shard_id, shard) in locked.tenants.range(shards_range) {
    4778            0 :                 if let Some(node_id) = shard.intent.get_attached() {
    4779            0 :                     let node = locked
    4780            0 :                         .nodes
    4781            0 :                         .get(node_id)
    4782            0 :                         .expect("Pageservers may not be deleted while referenced");
    4783              : 
    4784            0 :                     targets.push((*tenant_shard_id, node.clone()));
    4785              : 
    4786            0 :                     if let Some(location) = shard.observed.locations.get(node_id) {
    4787            0 :                         if let Some(ref conf) = location.conf {
    4788            0 :                             if conf.mode != LocationConfigMode::AttachedSingle
    4789            0 :                                 && conf.mode != LocationConfigMode::AttachedMulti
    4790            0 :                             {
    4791            0 :                                 // If the shard is attached as secondary, we need to retry if 404.
    4792            0 :                                 retry_if_not_attached = true;
    4793            0 :                             }
    4794              :                             // If the shard is attached as primary, we should succeed.
    4795            0 :                         } else {
    4796            0 :                             // Location conf is not available yet, retry if 404.
    4797            0 :                             retry_if_not_attached = true;
    4798            0 :                         }
    4799            0 :                     } else {
    4800            0 :                         // The shard is not attached to the intended pageserver yet, retry if 404.
    4801            0 :                         retry_if_not_attached = true;
    4802            0 :                     }
    4803            0 :                 }
    4804              :             }
    4805            0 :             targets
    4806              :         };
    4807              : 
    4808            0 :         let res = self
    4809            0 :             .tenant_for_shards_api(
    4810            0 :                 targets,
    4811            0 :                 |tenant_shard_id, client| async move {
    4812            0 :                     client
    4813            0 :                         .timeline_lease_lsn(tenant_shard_id, timeline_id, lsn)
    4814            0 :                         .await
    4815            0 :                 },
    4816              :                 1,
    4817              :                 1,
    4818              :                 SHORT_RECONCILE_TIMEOUT,
    4819            0 :                 &self.cancel,
    4820              :             )
    4821            0 :             .await;
    4822              : 
    4823            0 :         let mut valid_until = None;
    4824            0 :         for (node, r) in res {
    4825            0 :             match r {
    4826            0 :                 Ok(lease) => {
    4827            0 :                     if let Some(ref mut valid_until) = valid_until {
    4828            0 :                         *valid_until = std::cmp::min(*valid_until, lease.valid_until);
    4829            0 :                     } else {
    4830            0 :                         valid_until = Some(lease.valid_until);
    4831            0 :                     }
    4832              :                 }
    4833              :                 Err(mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, _))
    4834            0 :                     if retry_if_not_attached =>
    4835              :                 {
    4836              :                     // This is expected if the attach is not finished yet. Return 503 so that the client can retry.
    4837            0 :                     return Err(ApiError::ResourceUnavailable(
    4838            0 :                         format!(
    4839            0 :                             "Timeline is not attached to the pageserver {} yet, please retry",
    4840            0 :                             node.get_id()
    4841            0 :                         )
    4842            0 :                         .into(),
    4843            0 :                     ));
    4844              :                 }
    4845            0 :                 Err(e) => {
    4846            0 :                     return Err(passthrough_api_error(&node, e));
    4847              :                 }
    4848              :             }
    4849              :         }
    4850            0 :         Ok(LsnLease {
    4851            0 :             valid_until: valid_until.unwrap_or_else(SystemTime::now),
    4852            0 :         })
    4853            0 :     }
    4854              : 
    4855            0 :     pub(crate) async fn tenant_timeline_download_heatmap_layers(
    4856            0 :         &self,
    4857            0 :         tenant_shard_id: TenantShardId,
    4858            0 :         timeline_id: TimelineId,
    4859            0 :         concurrency: Option<usize>,
    4860            0 :         recurse: bool,
    4861            0 :     ) -> Result<(), ApiError> {
    4862            0 :         let _tenant_lock = trace_shared_lock(
    4863            0 :             &self.tenant_op_locks,
    4864            0 :             tenant_shard_id.tenant_id,
    4865            0 :             TenantOperations::DownloadHeatmapLayers,
    4866            0 :         )
    4867            0 :         .await;
    4868              : 
    4869            0 :         let targets = {
    4870            0 :             let locked = self.inner.read().unwrap();
    4871            0 :             let mut targets = Vec::new();
    4872              : 
    4873              :             // If the request got an unsharded tenant id, then apply
    4874              :             // the operation to all shards. Otherwise, apply it to a specific shard.
    4875            0 :             let shards_range = if tenant_shard_id.is_unsharded() {
    4876            0 :                 TenantShardId::tenant_range(tenant_shard_id.tenant_id)
    4877              :             } else {
    4878            0 :                 tenant_shard_id.range()
    4879              :             };
    4880              : 
    4881            0 :             for (tenant_shard_id, shard) in locked.tenants.range(shards_range) {
    4882            0 :                 if let Some(node_id) = shard.intent.get_attached() {
    4883            0 :                     let node = locked
    4884            0 :                         .nodes
    4885            0 :                         .get(node_id)
    4886            0 :                         .expect("Pageservers may not be deleted while referenced");
    4887            0 : 
    4888            0 :                     targets.push((*tenant_shard_id, node.clone()));
    4889            0 :                 }
    4890              :             }
    4891            0 :             targets
    4892              :         };
    4893              : 
    4894            0 :         self.tenant_for_shards_api(
    4895            0 :             targets,
    4896            0 :             |tenant_shard_id, client| async move {
    4897            0 :                 client
    4898            0 :                     .timeline_download_heatmap_layers(
    4899            0 :                         tenant_shard_id,
    4900            0 :                         timeline_id,
    4901            0 :                         concurrency,
    4902            0 :                         recurse,
    4903            0 :                     )
    4904            0 :                     .await
    4905            0 :             },
    4906              :             1,
    4907              :             1,
    4908              :             SHORT_RECONCILE_TIMEOUT,
    4909            0 :             &self.cancel,
    4910              :         )
    4911            0 :         .await;
    4912              : 
    4913            0 :         Ok(())
    4914            0 :     }
    4915              : 
    4916              :     /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
    4917              :     ///
    4918              :     /// On success, the returned vector contains exactly the same number of elements as the input `locations`
    4919              :     /// and returned element at index `i` is the result for `req_fn(op(locations[i])`.
    4920            0 :     async fn tenant_for_shards<F, R>(
    4921            0 :         &self,
    4922            0 :         locations: Vec<(TenantShardId, Node)>,
    4923            0 :         mut req_fn: F,
    4924            0 :     ) -> Result<Vec<R>, ApiError>
    4925            0 :     where
    4926            0 :         F: FnMut(
    4927            0 :             TenantShardId,
    4928            0 :             Node,
    4929            0 :         )
    4930            0 :             -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
    4931            0 :     {
    4932            0 :         let mut futs = FuturesUnordered::new();
    4933            0 :         let mut results = Vec::with_capacity(locations.len());
    4934              : 
    4935            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    4936            0 :             let fut = req_fn(tenant_shard_id, node);
    4937            0 :             futs.push(async move { (idx, fut.await) });
    4938              :         }
    4939              : 
    4940            0 :         while let Some((idx, r)) = futs.next().await {
    4941            0 :             results.push((idx, r?));
    4942              :         }
    4943              : 
    4944            0 :         results.sort_by_key(|(idx, _)| *idx);
    4945            0 :         Ok(results.into_iter().map(|(_, r)| r).collect())
    4946            0 :     }
    4947              : 
    4948              :     /// Concurrently invoke a pageserver API call on many shards at once.
    4949              :     ///
    4950              :     /// The returned Vec has the same length as the `locations` Vec,
    4951              :     /// and returned element at index `i` is the result for `op(locations[i])`.
    4952            0 :     pub(crate) async fn tenant_for_shards_api<T, O, F>(
    4953            0 :         &self,
    4954            0 :         locations: Vec<(TenantShardId, Node)>,
    4955            0 :         op: O,
    4956            0 :         warn_threshold: u32,
    4957            0 :         max_retries: u32,
    4958            0 :         timeout: Duration,
    4959            0 :         cancel: &CancellationToken,
    4960            0 :     ) -> Vec<(Node, mgmt_api::Result<T>)>
    4961            0 :     where
    4962            0 :         O: Fn(TenantShardId, PageserverClient) -> F + Copy,
    4963            0 :         F: std::future::Future<Output = mgmt_api::Result<T>>,
    4964            0 :     {
    4965            0 :         let mut futs = FuturesUnordered::new();
    4966            0 :         let mut results = Vec::with_capacity(locations.len());
    4967              : 
    4968            0 :         for (idx, (tenant_shard_id, node)) in locations.into_iter().enumerate() {
    4969            0 :             futs.push(async move {
    4970            0 :                 let r = node
    4971            0 :                     .with_client_retries(
    4972            0 :                         |client| op(tenant_shard_id, client),
    4973            0 :                         &self.http_client,
    4974            0 :                         &self.config.pageserver_jwt_token,
    4975            0 :                         warn_threshold,
    4976            0 :                         max_retries,
    4977            0 :                         timeout,
    4978            0 :                         cancel,
    4979              :                     )
    4980            0 :                     .await;
    4981            0 :                 (idx, node, r)
    4982            0 :             });
    4983              :         }
    4984              : 
    4985            0 :         while let Some((idx, node, r)) = futs.next().await {
    4986            0 :             results.push((idx, node, r.unwrap_or(Err(mgmt_api::Error::Cancelled))));
    4987            0 :         }
    4988              : 
    4989            0 :         results.sort_by_key(|(idx, _, _)| *idx);
    4990            0 :         results.into_iter().map(|(_, node, r)| (node, r)).collect()
    4991            0 :     }
    4992              : 
    4993              :     /// Helper for safely working with the shards in a tenant remotely on pageservers, for example
    4994              :     /// when creating and deleting timelines:
    4995              :     /// - Makes sure shards are attached somewhere if they weren't already
    4996              :     /// - Looks up the shards and the nodes where they were most recently attached
    4997              :     /// - Guarantees that after the inner function returns, the shards' generations haven't moved on: this
    4998              :     ///   ensures that the remote operation acted on the most recent generation, and is therefore durable.
    4999            0 :     async fn tenant_remote_mutation<R, O, F>(
    5000            0 :         &self,
    5001            0 :         tenant_id: TenantId,
    5002            0 :         op: O,
    5003            0 :     ) -> Result<R, ApiError>
    5004            0 :     where
    5005            0 :         O: FnOnce(TenantMutationLocations) -> F,
    5006            0 :         F: std::future::Future<Output = R>,
    5007            0 :     {
    5008            0 :         let mutation_locations = {
    5009            0 :             let mut locations = TenantMutationLocations::default();
    5010              : 
    5011              :             // Load the currently attached pageservers for the latest generation of each shard.  This can
    5012              :             // run concurrently with reconciliations, and it is not guaranteed that the node we find here
    5013              :             // will still be the latest when we're done: we will check generations again at the end of
    5014              :             // this function to handle that.
    5015            0 :             let generations = self.persistence.tenant_generations(tenant_id).await?;
    5016              : 
    5017            0 :             if generations
    5018            0 :                 .iter()
    5019            0 :                 .any(|i| i.generation.is_none() || i.generation_pageserver.is_none())
    5020              :             {
    5021            0 :                 let shard_generations = generations
    5022            0 :                     .into_iter()
    5023            0 :                     .map(|i| (i.tenant_shard_id, (i.generation, i.generation_pageserver)))
    5024            0 :                     .collect::<HashMap<_, _>>();
    5025              : 
    5026              :                 // One or more shards has not been attached to a pageserver.  Check if this is because it's configured
    5027              :                 // to be detached (409: caller should give up), or because it's meant to be attached but isn't yet (503: caller should retry)
    5028            0 :                 let locked = self.inner.read().unwrap();
    5029            0 :                 for (shard_id, shard) in
    5030            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5031              :                 {
    5032            0 :                     match shard.policy {
    5033              :                         PlacementPolicy::Attached(_) => {
    5034              :                             // This shard is meant to be attached: the caller is not wrong to try and
    5035              :                             // use this function, but we can't service the request right now.
    5036            0 :                             let Some(generation) = shard_generations.get(shard_id) else {
    5037              :                                 // This can only happen if there is a split brain controller modifying the database.  This should
    5038              :                                 // never happen when testing, and if it happens in production we can only log the issue.
    5039            0 :                                 debug_assert!(false);
    5040            0 :                                 tracing::error!(
    5041            0 :                                     "Shard {shard_id} not found in generation state!  Is another rogue controller running?"
    5042              :                                 );
    5043            0 :                                 continue;
    5044              :                             };
    5045            0 :                             let (generation, generation_pageserver) = generation;
    5046            0 :                             if let Some(generation) = generation {
    5047            0 :                                 if generation_pageserver.is_none() {
    5048              :                                     // This is legitimate only in a very narrow window where the shard was only just configured into
    5049              :                                     // Attached mode after being created in Secondary or Detached mode, and it has had its generation
    5050              :                                     // set but not yet had a Reconciler run (reconciler is the only thing that sets generation_pageserver).
    5051            0 :                                     tracing::warn!(
    5052            0 :                                         "Shard {shard_id} generation is set ({generation:?}) but generation_pageserver is None, reconciler not run yet?"
    5053              :                                     );
    5054            0 :                                 }
    5055              :                             } else {
    5056              :                                 // This should never happen: a shard with no generation is only permitted when it was created in some state
    5057              :                                 // other than PlacementPolicy::Attached (and generation is always written to DB before setting Attached in memory)
    5058            0 :                                 debug_assert!(false);
    5059            0 :                                 tracing::error!(
    5060            0 :                                     "Shard {shard_id} generation is None, but it is in PlacementPolicy::Attached mode!"
    5061              :                                 );
    5062            0 :                                 continue;
    5063              :                             }
    5064              :                         }
    5065              :                         PlacementPolicy::Secondary | PlacementPolicy::Detached => {
    5066            0 :                             return Err(ApiError::Conflict(format!(
    5067            0 :                                 "Shard {shard_id} tenant has policy {:?}",
    5068            0 :                                 shard.policy
    5069            0 :                             )));
    5070              :                         }
    5071              :                     }
    5072              :                 }
    5073              : 
    5074            0 :                 return Err(ApiError::ResourceUnavailable(
    5075            0 :                     "One or more shards in tenant is not yet attached".into(),
    5076            0 :                 ));
    5077            0 :             }
    5078              : 
    5079            0 :             let locked = self.inner.read().unwrap();
    5080              :             for ShardGenerationState {
    5081            0 :                 tenant_shard_id,
    5082            0 :                 generation,
    5083            0 :                 generation_pageserver,
    5084            0 :             } in generations
    5085              :             {
    5086            0 :                 let node_id = generation_pageserver.expect("We checked for None above");
    5087            0 :                 let node = locked
    5088            0 :                     .nodes
    5089            0 :                     .get(&node_id)
    5090            0 :                     .ok_or(ApiError::Conflict(format!(
    5091            0 :                         "Raced with removal of node {node_id}"
    5092            0 :                     )))?;
    5093            0 :                 let generation = generation.expect("Checked above");
    5094              : 
    5095            0 :                 let tenant = locked.tenants.get(&tenant_shard_id);
    5096              : 
    5097              :                 // TODO(vlad): Abstract the logic that finds stale attached locations
    5098              :                 // from observed state into a [`Service`] method.
    5099            0 :                 let other_locations = match tenant {
    5100            0 :                     Some(tenant) => {
    5101            0 :                         let mut other = tenant.attached_locations();
    5102            0 :                         let latest_location_index =
    5103            0 :                             other.iter().position(|&l| l == (node.get_id(), generation));
    5104            0 :                         if let Some(idx) = latest_location_index {
    5105            0 :                             other.remove(idx);
    5106            0 :                         }
    5107              : 
    5108            0 :                         other
    5109              :                     }
    5110            0 :                     None => Vec::default(),
    5111              :                 };
    5112              : 
    5113            0 :                 let location = ShardMutationLocations {
    5114            0 :                     latest: MutationLocation {
    5115            0 :                         node: node.clone(),
    5116            0 :                         generation,
    5117            0 :                     },
    5118            0 :                     other: other_locations
    5119            0 :                         .into_iter()
    5120            0 :                         .filter_map(|(node_id, generation)| {
    5121            0 :                             let node = locked.nodes.get(&node_id)?;
    5122              : 
    5123            0 :                             Some(MutationLocation {
    5124            0 :                                 node: node.clone(),
    5125            0 :                                 generation,
    5126            0 :                             })
    5127            0 :                         })
    5128            0 :                         .collect(),
    5129              :                 };
    5130            0 :                 locations.0.insert(tenant_shard_id, location);
    5131              :             }
    5132              : 
    5133            0 :             locations
    5134              :         };
    5135              : 
    5136            0 :         let result = op(mutation_locations.clone()).await;
    5137              : 
    5138              :         // Post-check: are all the generations of all the shards the same as they were initially?  This proves that
    5139              :         // our remote operation executed on the latest generation and is therefore persistent.
    5140              :         {
    5141            0 :             let latest_generations = self.persistence.tenant_generations(tenant_id).await?;
    5142            0 :             if latest_generations
    5143            0 :                 .into_iter()
    5144            0 :                 .map(
    5145              :                     |ShardGenerationState {
    5146              :                          tenant_shard_id,
    5147              :                          generation,
    5148              :                          generation_pageserver: _,
    5149            0 :                      }| (tenant_shard_id, generation),
    5150              :                 )
    5151            0 :                 .collect::<Vec<_>>()
    5152            0 :                 != mutation_locations
    5153            0 :                     .0
    5154            0 :                     .into_iter()
    5155            0 :                     .map(|i| (i.0, Some(i.1.latest.generation)))
    5156            0 :                     .collect::<Vec<_>>()
    5157              :             {
    5158              :                 // We raced with something that incremented the generation, and therefore cannot be
    5159              :                 // confident that our actions are persistent (they might have hit an old generation).
    5160              :                 //
    5161              :                 // This is safe but requires a retry: ask the client to do that by giving them a 503 response.
    5162            0 :                 return Err(ApiError::ResourceUnavailable(
    5163            0 :                     "Tenant attachment changed, please retry".into(),
    5164            0 :                 ));
    5165            0 :             }
    5166              :         }
    5167              : 
    5168            0 :         Ok(result)
    5169            0 :     }
    5170              : 
    5171            0 :     pub(crate) async fn tenant_timeline_delete(
    5172            0 :         self: &Arc<Self>,
    5173            0 :         tenant_id: TenantId,
    5174            0 :         timeline_id: TimelineId,
    5175            0 :     ) -> Result<StatusCode, ApiError> {
    5176            0 :         tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
    5177            0 :         let _tenant_lock = trace_shared_lock(
    5178            0 :             &self.tenant_op_locks,
    5179            0 :             tenant_id,
    5180            0 :             TenantOperations::TimelineDelete,
    5181            0 :         )
    5182            0 :         .await;
    5183              : 
    5184            0 :         let status_code = self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
    5185            0 :             if targets.0.is_empty() {
    5186            0 :                 return Err(ApiError::NotFound(
    5187            0 :                     anyhow::anyhow!("Tenant not found").into(),
    5188            0 :                 ));
    5189            0 :             }
    5190              : 
    5191            0 :             let (shard_zero_tid, shard_zero_locations) = targets.0.pop_first().expect("Must have at least one shard");
    5192            0 :             assert!(shard_zero_tid.is_shard_zero());
    5193              : 
    5194            0 :             async fn delete_one(
    5195            0 :                 tenant_shard_id: TenantShardId,
    5196            0 :                 timeline_id: TimelineId,
    5197            0 :                 node: Node,
    5198            0 :                 http_client: reqwest::Client,
    5199            0 :                 jwt: Option<String>,
    5200            0 :             ) -> Result<StatusCode, ApiError> {
    5201            0 :                 tracing::info!(
    5202            0 :                     "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    5203              :                 );
    5204              : 
    5205            0 :                 let client = PageserverClient::new(node.get_id(), http_client, node.base_url(), jwt.as_deref());
    5206            0 :                 let res = client
    5207            0 :                     .timeline_delete(tenant_shard_id, timeline_id)
    5208            0 :                     .await;
    5209              : 
    5210            0 :                 match res {
    5211            0 :                     Ok(ok) => Ok(ok),
    5212            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::CONFLICT, _)) => Ok(StatusCode::CONFLICT),
    5213            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg)) if msg.contains("Requested tenant is missing") => {
    5214            0 :                         Err(ApiError::ResourceUnavailable("Tenant migration in progress".into()))
    5215              :                     },
    5216            0 :                     Err(mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg)) => Err(ApiError::ResourceUnavailable(msg.into())),
    5217            0 :                     Err(e) => {
    5218            0 :                         Err(
    5219            0 :                             ApiError::InternalServerError(anyhow::anyhow!(
    5220            0 :                                 "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
    5221            0 :                             ))
    5222            0 :                         )
    5223              :                     }
    5224              :                 }
    5225            0 :             }
    5226              : 
    5227            0 :             let locations = targets.0.iter().map(|t| (*t.0, t.1.latest.node.clone())).collect();
    5228            0 :             let statuses = self
    5229            0 :                 .tenant_for_shards(locations, |tenant_shard_id: TenantShardId, node: Node| {
    5230            0 :                     Box::pin(delete_one(
    5231            0 :                         tenant_shard_id,
    5232            0 :                         timeline_id,
    5233            0 :                         node,
    5234            0 :                         self.http_client.clone(),
    5235            0 :                         self.config.pageserver_jwt_token.clone(),
    5236            0 :                     ))
    5237            0 :                 })
    5238            0 :                 .await?;
    5239              : 
    5240              :             // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero.
    5241              :             // We return 409 (Conflict) if deletion was already in progress on any of the shards
    5242              :             // and 202 (Accepted) if deletion was not already in progress on any of the shards.
    5243            0 :             if statuses.iter().any(|s| s == &StatusCode::CONFLICT) {
    5244            0 :                 return Ok(StatusCode::CONFLICT);
    5245            0 :             }
    5246              : 
    5247            0 :             if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
    5248            0 :                 return Ok(StatusCode::ACCEPTED);
    5249            0 :             }
    5250              : 
    5251              :             // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
    5252              :             // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
    5253            0 :             let shard_zero_status = delete_one(
    5254            0 :                 shard_zero_tid,
    5255            0 :                 timeline_id,
    5256            0 :                 shard_zero_locations.latest.node,
    5257            0 :                 self.http_client.clone(),
    5258            0 :                 self.config.pageserver_jwt_token.clone(),
    5259            0 :             )
    5260            0 :             .await?;
    5261            0 :             Ok(shard_zero_status)
    5262            0 :         }).await?;
    5263              : 
    5264            0 :         self.tenant_timeline_delete_safekeepers(tenant_id, timeline_id)
    5265            0 :             .await?;
    5266              : 
    5267            0 :         status_code
    5268            0 :     }
    5269              :     /// When you know the TenantId but not a specific shard, and would like to get the node holding shard 0.
    5270            0 :     pub(crate) async fn tenant_shard0_node(
    5271            0 :         &self,
    5272            0 :         tenant_id: TenantId,
    5273            0 :     ) -> Result<(Node, TenantShardId), ApiError> {
    5274            0 :         let tenant_shard_id = {
    5275            0 :             let locked = self.inner.read().unwrap();
    5276            0 :             let Some((tenant_shard_id, _shard)) = locked
    5277            0 :                 .tenants
    5278            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    5279            0 :                 .next()
    5280              :             else {
    5281            0 :                 return Err(ApiError::NotFound(
    5282            0 :                     anyhow::anyhow!("Tenant {tenant_id} not found").into(),
    5283            0 :                 ));
    5284              :             };
    5285              : 
    5286            0 :             *tenant_shard_id
    5287              :         };
    5288              : 
    5289            0 :         self.tenant_shard_node(tenant_shard_id)
    5290            0 :             .await
    5291            0 :             .map(|node| (node, tenant_shard_id))
    5292            0 :     }
    5293              : 
    5294              :     /// When you need to send an HTTP request to the pageserver that holds a shard of a tenant, this
    5295              :     /// function looks up and returns node. If the shard isn't found, returns Err(ApiError::NotFound)
    5296            0 :     pub(crate) async fn tenant_shard_node(
    5297            0 :         &self,
    5298            0 :         tenant_shard_id: TenantShardId,
    5299            0 :     ) -> Result<Node, ApiError> {
    5300              :         // Look up in-memory state and maybe use the node from there.
    5301              :         {
    5302            0 :             let locked = self.inner.read().unwrap();
    5303            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    5304            0 :                 return Err(ApiError::NotFound(
    5305            0 :                     anyhow::anyhow!("Tenant shard {tenant_shard_id} not found").into(),
    5306            0 :                 ));
    5307              :             };
    5308              : 
    5309            0 :             let Some(intent_node_id) = shard.intent.get_attached() else {
    5310            0 :                 tracing::warn!(
    5311            0 :                     tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
    5312            0 :                     "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
    5313              :                     shard.policy
    5314              :                 );
    5315            0 :                 return Err(ApiError::Conflict(
    5316            0 :                     "Cannot call timeline API on non-attached tenant".to_string(),
    5317            0 :                 ));
    5318              :             };
    5319              : 
    5320            0 :             if shard.reconciler.is_none() {
    5321              :                 // Optimization: while no reconcile is in flight, we may trust our in-memory state
    5322              :                 // to tell us which pageserver to use. Otherwise we will fall through and hit the database
    5323            0 :                 let Some(node) = locked.nodes.get(intent_node_id) else {
    5324              :                     // This should never happen
    5325            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5326            0 :                         "Shard refers to nonexistent node"
    5327            0 :                     )));
    5328              :                 };
    5329            0 :                 return Ok(node.clone());
    5330            0 :             }
    5331              :         };
    5332              : 
    5333              :         // Look up the latest attached pageserver location from the database
    5334              :         // generation state: this will reflect the progress of any ongoing migration.
    5335              :         // Note that it is not guaranteed to _stay_ here, our caller must still handle
    5336              :         // the case where they call through to the pageserver and get a 404.
    5337            0 :         let db_result = self
    5338            0 :             .persistence
    5339            0 :             .tenant_generations(tenant_shard_id.tenant_id)
    5340            0 :             .await?;
    5341              :         let Some(ShardGenerationState {
    5342              :             tenant_shard_id: _,
    5343              :             generation: _,
    5344            0 :             generation_pageserver: Some(node_id),
    5345            0 :         }) = db_result
    5346            0 :             .into_iter()
    5347            0 :             .find(|s| s.tenant_shard_id == tenant_shard_id)
    5348              :         else {
    5349              :             // This can happen if we raced with a tenant deletion or a shard split.  On a retry
    5350              :             // the caller will either succeed (shard split case), get a proper 404 (deletion case),
    5351              :             // or a conflict response (case where tenant was detached in background)
    5352            0 :             return Err(ApiError::ResourceUnavailable(
    5353            0 :                 format!("Shard {tenant_shard_id} not found in database, or is not attached").into(),
    5354            0 :             ));
    5355              :         };
    5356            0 :         let locked = self.inner.read().unwrap();
    5357            0 :         let Some(node) = locked.nodes.get(&node_id) else {
    5358              :             // This should never happen
    5359            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5360            0 :                 "Shard refers to nonexistent node"
    5361            0 :             )));
    5362              :         };
    5363              : 
    5364            0 :         Ok(node.clone())
    5365            0 :     }
    5366              : 
    5367            0 :     pub(crate) fn tenant_locate(
    5368            0 :         &self,
    5369            0 :         tenant_id: TenantId,
    5370            0 :     ) -> Result<TenantLocateResponse, ApiError> {
    5371            0 :         let locked = self.inner.read().unwrap();
    5372            0 :         tracing::info!("Locating shards for tenant {tenant_id}");
    5373              : 
    5374            0 :         let mut result = Vec::new();
    5375            0 :         let mut shard_params: Option<ShardParameters> = None;
    5376              : 
    5377            0 :         for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5378              :         {
    5379            0 :             let node_id =
    5380            0 :                 shard
    5381            0 :                     .intent
    5382            0 :                     .get_attached()
    5383            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    5384            0 :                         "Cannot locate a tenant that is not attached"
    5385            0 :                     )))?;
    5386              : 
    5387            0 :             let node = locked
    5388            0 :                 .nodes
    5389            0 :                 .get(&node_id)
    5390            0 :                 .expect("Pageservers may not be deleted while referenced");
    5391              : 
    5392            0 :             result.push(node.shard_location(*tenant_shard_id));
    5393              : 
    5394            0 :             match &shard_params {
    5395            0 :                 None => {
    5396            0 :                     shard_params = Some(ShardParameters {
    5397            0 :                         stripe_size: shard.shard.stripe_size,
    5398            0 :                         count: shard.shard.count,
    5399            0 :                     });
    5400            0 :                 }
    5401            0 :                 Some(params) => {
    5402            0 :                     if params.stripe_size != shard.shard.stripe_size {
    5403              :                         // This should never happen.  We enforce at runtime because it's simpler than
    5404              :                         // adding an extra per-tenant data structure to store the things that should be the same
    5405            0 :                         return Err(ApiError::InternalServerError(anyhow::anyhow!(
    5406            0 :                             "Inconsistent shard stripe size parameters!"
    5407            0 :                         )));
    5408            0 :                     }
    5409              :                 }
    5410              :             }
    5411              :         }
    5412              : 
    5413            0 :         if result.is_empty() {
    5414            0 :             return Err(ApiError::NotFound(
    5415            0 :                 anyhow::anyhow!("No shards for this tenant ID found").into(),
    5416            0 :             ));
    5417            0 :         }
    5418            0 :         let shard_params = shard_params.expect("result is non-empty, therefore this is set");
    5419            0 :         tracing::info!(
    5420            0 :             "Located tenant {} with params {:?} on shards {}",
    5421              :             tenant_id,
    5422              :             shard_params,
    5423            0 :             result
    5424            0 :                 .iter()
    5425            0 :                 .map(|s| format!("{s:?}"))
    5426            0 :                 .collect::<Vec<_>>()
    5427            0 :                 .join(",")
    5428              :         );
    5429              : 
    5430            0 :         Ok(TenantLocateResponse {
    5431            0 :             shards: result,
    5432            0 :             shard_params,
    5433            0 :         })
    5434            0 :     }
    5435              : 
    5436              :     /// Returns None if the input iterator of shards does not include a shard with number=0
    5437            0 :     fn tenant_describe_impl<'a>(
    5438            0 :         &self,
    5439            0 :         shards: impl Iterator<Item = &'a TenantShard>,
    5440            0 :     ) -> Option<TenantDescribeResponse> {
    5441            0 :         let mut shard_zero = None;
    5442            0 :         let mut describe_shards = Vec::new();
    5443              : 
    5444            0 :         for shard in shards {
    5445            0 :             if shard.tenant_shard_id.is_shard_zero() {
    5446            0 :                 shard_zero = Some(shard);
    5447            0 :             }
    5448              : 
    5449            0 :             describe_shards.push(TenantDescribeResponseShard {
    5450            0 :                 tenant_shard_id: shard.tenant_shard_id,
    5451            0 :                 node_attached: *shard.intent.get_attached(),
    5452            0 :                 node_secondary: shard.intent.get_secondary().to_vec(),
    5453            0 :                 last_error: shard
    5454            0 :                     .last_error
    5455            0 :                     .lock()
    5456            0 :                     .unwrap()
    5457            0 :                     .as_ref()
    5458            0 :                     .map(|e| format!("{e}"))
    5459            0 :                     .unwrap_or("".to_string())
    5460            0 :                     .clone(),
    5461            0 :                 is_reconciling: shard.reconciler.is_some(),
    5462            0 :                 is_pending_compute_notification: shard.pending_compute_notification,
    5463            0 :                 is_splitting: matches!(shard.splitting, SplitState::Splitting),
    5464            0 :                 is_importing: shard.importing == TimelineImportState::Importing,
    5465            0 :                 scheduling_policy: shard.get_scheduling_policy(),
    5466            0 :                 preferred_az_id: shard.preferred_az().map(ToString::to_string),
    5467              :             })
    5468              :         }
    5469              : 
    5470            0 :         let shard_zero = shard_zero?;
    5471              : 
    5472            0 :         Some(TenantDescribeResponse {
    5473            0 :             tenant_id: shard_zero.tenant_shard_id.tenant_id,
    5474            0 :             shards: describe_shards,
    5475            0 :             stripe_size: shard_zero.shard.stripe_size,
    5476            0 :             policy: shard_zero.policy.clone(),
    5477            0 :             config: shard_zero.config.clone(),
    5478            0 :         })
    5479            0 :     }
    5480              : 
    5481            0 :     pub(crate) fn tenant_describe(
    5482            0 :         &self,
    5483            0 :         tenant_id: TenantId,
    5484            0 :     ) -> Result<TenantDescribeResponse, ApiError> {
    5485            0 :         let locked = self.inner.read().unwrap();
    5486              : 
    5487            0 :         self.tenant_describe_impl(
    5488            0 :             locked
    5489            0 :                 .tenants
    5490            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    5491            0 :                 .map(|(_k, v)| v),
    5492              :         )
    5493            0 :         .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
    5494            0 :     }
    5495              : 
    5496              :     /* BEGIN_HADRON */
    5497            0 :     pub(crate) async fn tenant_timeline_describe(
    5498            0 :         &self,
    5499            0 :         tenant_id: TenantId,
    5500            0 :         timeline_id: TimelineId,
    5501            0 :     ) -> Result<TenantTimelineDescribeResponse, ApiError> {
    5502            0 :         self.tenant_remote_mutation(tenant_id, |locations| async move {
    5503            0 :             if locations.0.is_empty() {
    5504            0 :                 return Err(ApiError::NotFound(
    5505            0 :                     anyhow::anyhow!("Tenant not found").into(),
    5506            0 :                 ));
    5507            0 :             };
    5508              : 
    5509            0 :             let locations: Vec<(TenantShardId, Node)> = locations
    5510            0 :                 .0
    5511            0 :                 .iter()
    5512            0 :                 .map(|t| (*t.0, t.1.latest.node.clone()))
    5513            0 :                 .collect();
    5514            0 :             let mut futs = FuturesUnordered::new();
    5515              : 
    5516            0 :             for (shard_id, node) in locations {
    5517            0 :                 futs.push({
    5518            0 :                     async move {
    5519            0 :                         let result = node
    5520            0 :                             .with_client_retries(
    5521            0 :                                 |client| async move {
    5522            0 :                                     client
    5523            0 :                                         .tenant_timeline_describe(&shard_id, &timeline_id)
    5524            0 :                                         .await
    5525            0 :                                 },
    5526            0 :                                 &self.http_client,
    5527            0 :                                 &self.config.pageserver_jwt_token,
    5528              :                                 3,
    5529              :                                 3,
    5530            0 :                                 Duration::from_secs(30),
    5531            0 :                                 &self.cancel,
    5532              :                             )
    5533            0 :                             .await;
    5534            0 :                         (result, shard_id, node.get_id())
    5535            0 :                     }
    5536              :                 });
    5537              :             }
    5538              : 
    5539            0 :             let mut results: Vec<TimelineInfo> = Vec::new();
    5540            0 :             while let Some((result, tenant_shard_id, node_id)) = futs.next().await {
    5541            0 :                 match result {
    5542            0 :                     Some(Ok(timeline_info)) => results.push(timeline_info),
    5543            0 :                     Some(Err(e)) => {
    5544            0 :                         tracing::warn!(
    5545            0 :                             "Failed to describe tenant {} timeline {} for pageserver {}: {e}",
    5546              :                             tenant_shard_id,
    5547              :                             timeline_id,
    5548              :                             node_id,
    5549              :                         );
    5550            0 :                         return Err(ApiError::ResourceUnavailable(format!("{e}").into()));
    5551              :                     }
    5552            0 :                     None => return Err(ApiError::Cancelled),
    5553              :                 }
    5554              :             }
    5555            0 :             let mut image_consistent_lsn: Option<Lsn> = Some(Lsn::MAX);
    5556            0 :             for timeline_info in &results {
    5557            0 :                 if let Some(tline_image_consistent_lsn) = timeline_info.image_consistent_lsn {
    5558            0 :                     image_consistent_lsn = Some(std::cmp::min(
    5559            0 :                         image_consistent_lsn.unwrap(),
    5560            0 :                         tline_image_consistent_lsn,
    5561            0 :                     ));
    5562            0 :                 } else {
    5563            0 :                     tracing::warn!(
    5564            0 :                         "Timeline {} on shard {} does not have image consistent lsn",
    5565              :                         timeline_info.timeline_id,
    5566              :                         timeline_info.tenant_id
    5567              :                     );
    5568            0 :                     image_consistent_lsn = None;
    5569            0 :                     break;
    5570              :                 }
    5571              :             }
    5572              : 
    5573            0 :             Ok(TenantTimelineDescribeResponse {
    5574            0 :                 shards: results,
    5575            0 :                 image_consistent_lsn,
    5576            0 :             })
    5577            0 :         })
    5578            0 :         .await?
    5579            0 :     }
    5580              :     /* END_HADRON */
    5581              : 
    5582              :     /// limit & offset are pagination parameters. Since we are walking an in-memory HashMap, `offset` does not
    5583              :     /// avoid traversing data, it just avoid returning it. This is suitable for our purposes, since our in memory
    5584              :     /// maps are small enough to traverse fast, our pagination is just to avoid serializing huge JSON responses
    5585              :     /// in our external API.
    5586            0 :     pub(crate) fn tenant_list(
    5587            0 :         &self,
    5588            0 :         limit: Option<usize>,
    5589            0 :         start_after: Option<TenantId>,
    5590            0 :     ) -> Vec<TenantDescribeResponse> {
    5591            0 :         let locked = self.inner.read().unwrap();
    5592              : 
    5593              :         // Apply start_from parameter
    5594            0 :         let shard_range = match start_after {
    5595            0 :             None => locked.tenants.range(..),
    5596            0 :             Some(tenant_id) => locked.tenants.range(
    5597            0 :                 TenantShardId {
    5598            0 :                     tenant_id,
    5599            0 :                     shard_number: ShardNumber(u8::MAX),
    5600            0 :                     shard_count: ShardCount(u8::MAX),
    5601            0 :                 }..,
    5602              :             ),
    5603              :         };
    5604              : 
    5605            0 :         let mut result = Vec::new();
    5606            0 :         for (_tenant_id, tenant_shards) in &shard_range.group_by(|(id, _shard)| id.tenant_id) {
    5607            0 :             result.push(
    5608            0 :                 self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
    5609            0 :                     .expect("Groups are always non-empty"),
    5610              :             );
    5611              : 
    5612              :             // Enforce `limit` parameter
    5613            0 :             if let Some(limit) = limit {
    5614            0 :                 if result.len() >= limit {
    5615            0 :                     break;
    5616            0 :                 }
    5617            0 :             }
    5618              :         }
    5619              : 
    5620            0 :         result
    5621            0 :     }
    5622              : 
    5623              :     #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
    5624              :     async fn abort_tenant_shard_split(
    5625              :         &self,
    5626              :         op: &TenantShardSplitAbort,
    5627              :     ) -> Result<(), TenantShardSplitAbortError> {
    5628              :         // Cleaning up a split:
    5629              :         // - Parent shards are not destroyed during a split, just detached.
    5630              :         // - Failed pageserver split API calls can leave the remote node with just the parent attached,
    5631              :         //   just the children attached, or both.
    5632              :         //
    5633              :         // Therefore our work to do is to:
    5634              :         // 1. Clean up storage controller's internal state to just refer to parents, no children
    5635              :         // 2. Call out to pageservers to ensure that children are detached
    5636              :         // 3. Call out to pageservers to ensure that parents are attached.
    5637              :         //
    5638              :         // Crash safety:
    5639              :         // - If the storage controller stops running during this cleanup *after* clearing the splitting state
    5640              :         //   from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
    5641              :         //   and detach them.
    5642              :         // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
    5643              :         //   from our database, then we will re-enter this cleanup routine on startup.
    5644              : 
    5645              :         let TenantShardSplitAbort {
    5646              :             tenant_id,
    5647              :             new_shard_count,
    5648              :             new_stripe_size,
    5649              :             ..
    5650              :         } = op;
    5651              : 
    5652              :         // First abort persistent state, if any exists.
    5653              :         match self
    5654              :             .persistence
    5655              :             .abort_shard_split(*tenant_id, *new_shard_count)
    5656              :             .await?
    5657              :         {
    5658              :             AbortShardSplitStatus::Aborted => {
    5659              :                 // Proceed to roll back any child shards created on pageservers
    5660              :             }
    5661              :             AbortShardSplitStatus::Complete => {
    5662              :                 // The split completed (we might hit that path if e.g. our database transaction
    5663              :                 // to write the completion landed in the database, but we dropped connection
    5664              :                 // before seeing the result).
    5665              :                 //
    5666              :                 // We must update in-memory state to reflect the successful split.
    5667              :                 self.tenant_shard_split_commit_inmem(
    5668              :                     *tenant_id,
    5669              :                     *new_shard_count,
    5670              :                     *new_stripe_size,
    5671              :                 );
    5672              :                 return Ok(());
    5673              :             }
    5674              :         }
    5675              : 
    5676              :         // Clean up in-memory state, and accumulate the list of child locations that need detaching
    5677              :         let detach_locations: Vec<(Node, TenantShardId)> = {
    5678              :             let mut detach_locations = Vec::new();
    5679              :             let mut locked = self.inner.write().unwrap();
    5680              :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5681              : 
    5682              :             for (tenant_shard_id, shard) in
    5683              :                 tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
    5684              :             {
    5685              :                 if shard.shard.count == op.new_shard_count {
    5686              :                     // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
    5687              :                     // is infallible, so if we got an error we shouldn't have got that far.
    5688              :                     tracing::warn!(
    5689              :                         "During split abort, child shard {tenant_shard_id} found in-memory"
    5690              :                     );
    5691              :                     continue;
    5692              :                 }
    5693              : 
    5694              :                 // Add the children of this shard to this list of things to detach
    5695              :                 if let Some(node_id) = shard.intent.get_attached() {
    5696              :                     for child_id in tenant_shard_id.split(*new_shard_count) {
    5697              :                         detach_locations.push((
    5698              :                             nodes
    5699              :                                 .get(node_id)
    5700              :                                 .expect("Intent references nonexistent node")
    5701              :                                 .clone(),
    5702              :                             child_id,
    5703              :                         ));
    5704              :                     }
    5705              :                 } else {
    5706              :                     tracing::warn!(
    5707              :                         "During split abort, shard {tenant_shard_id} has no attached location"
    5708              :                     );
    5709              :                 }
    5710              : 
    5711              :                 tracing::info!("Restoring parent shard {tenant_shard_id}");
    5712              : 
    5713              :                 // Drop any intents that refer to unavailable nodes, to enable this abort to proceed even
    5714              :                 // if the original attachment location is offline.
    5715              :                 if let Some(node_id) = shard.intent.get_attached() {
    5716              :                     if !nodes.get(node_id).unwrap().is_available() {
    5717              :                         tracing::info!(
    5718              :                             "Demoting attached intent for {tenant_shard_id} on unavailable node {node_id}"
    5719              :                         );
    5720              :                         shard.intent.demote_attached(scheduler, *node_id);
    5721              :                     }
    5722              :                 }
    5723              :                 for node_id in shard.intent.get_secondary().clone() {
    5724              :                     if !nodes.get(&node_id).unwrap().is_available() {
    5725              :                         tracing::info!(
    5726              :                             "Dropping secondary intent for {tenant_shard_id} on unavailable node {node_id}"
    5727              :                         );
    5728              :                         shard.intent.remove_secondary(scheduler, node_id);
    5729              :                     }
    5730              :                 }
    5731              : 
    5732              :                 shard.splitting = SplitState::Idle;
    5733              :                 if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
    5734              :                     // If this shard can't be scheduled now (perhaps due to offline nodes or
    5735              :                     // capacity issues), that must not prevent us rolling back a split.  In this
    5736              :                     // case it should be eventually scheduled in the background.
    5737              :                     tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
    5738              :                 }
    5739              : 
    5740              :                 self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High);
    5741              :             }
    5742              : 
    5743              :             // We don't expect any new_shard_count shards to exist here, but drop them just in case
    5744              :             tenants
    5745            0 :                 .retain(|id, s| !(id.tenant_id == *tenant_id && s.shard.count == *new_shard_count));
    5746              : 
    5747              :             detach_locations
    5748              :         };
    5749              : 
    5750              :         for (node, child_id) in detach_locations {
    5751              :             if !node.is_available() {
    5752              :                 // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
    5753              :                 // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
    5754              :                 // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
    5755              :                 // them from the node.
    5756              :                 tracing::warn!(
    5757              :                     "Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated."
    5758              :                 );
    5759              :                 continue;
    5760              :             }
    5761              : 
    5762              :             // Detach the remote child.  If the pageserver split API call is still in progress, this call will get
    5763              :             // a 503 and retry, up to our limit.
    5764              :             tracing::info!("Detaching {child_id} on {node}...");
    5765              :             match node
    5766              :                 .with_client_retries(
    5767            0 :                     |client| async move {
    5768            0 :                         let config = LocationConfig {
    5769            0 :                             mode: LocationConfigMode::Detached,
    5770            0 :                             generation: None,
    5771            0 :                             secondary_conf: None,
    5772            0 :                             shard_number: child_id.shard_number.0,
    5773            0 :                             shard_count: child_id.shard_count.literal(),
    5774            0 :                             // Stripe size and tenant config don't matter when detaching
    5775            0 :                             shard_stripe_size: 0,
    5776            0 :                             tenant_conf: TenantConfig::default(),
    5777            0 :                         };
    5778              : 
    5779            0 :                         client.location_config(child_id, config, None, false).await
    5780            0 :                     },
    5781              :                     &self.http_client,
    5782              :                     &self.config.pageserver_jwt_token,
    5783              :                     1,
    5784              :                     10,
    5785              :                     Duration::from_secs(5),
    5786              :                     &self.reconcilers_cancel,
    5787              :                 )
    5788              :                 .await
    5789              :             {
    5790              :                 Some(Ok(_)) => {}
    5791              :                 Some(Err(e)) => {
    5792              :                     // We failed to communicate with the remote node.  This is problematic: we may be
    5793              :                     // leaving it with a rogue child shard.
    5794              :                     tracing::warn!(
    5795              :                         "Failed to detach child {child_id} from node {node} during abort"
    5796              :                     );
    5797              :                     return Err(e.into());
    5798              :                 }
    5799              :                 None => {
    5800              :                     // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
    5801              :                     // clean up on restart. The node going offline requires a retry.
    5802              :                     return Err(TenantShardSplitAbortError::Unavailable);
    5803              :                 }
    5804              :             };
    5805              :         }
    5806              : 
    5807              :         tracing::info!("Successfully aborted split");
    5808              :         Ok(())
    5809              :     }
    5810              : 
    5811              :     /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
    5812              :     /// of the tenant map to reflect the child shards that exist after the split.
    5813            0 :     fn tenant_shard_split_commit_inmem(
    5814            0 :         &self,
    5815            0 :         tenant_id: TenantId,
    5816            0 :         new_shard_count: ShardCount,
    5817            0 :         new_stripe_size: Option<ShardStripeSize>,
    5818            0 :     ) -> (
    5819            0 :         TenantShardSplitResponse,
    5820            0 :         Vec<(TenantShardId, NodeId, ShardStripeSize)>,
    5821            0 :         Vec<ReconcilerWaiter>,
    5822            0 :     ) {
    5823            0 :         let mut response = TenantShardSplitResponse {
    5824            0 :             new_shards: Vec::new(),
    5825            0 :         };
    5826            0 :         let mut child_locations = Vec::new();
    5827            0 :         let mut waiters = Vec::new();
    5828              : 
    5829              :         {
    5830            0 :             let mut locked = self.inner.write().unwrap();
    5831              : 
    5832            0 :             let parent_ids = locked
    5833            0 :                 .tenants
    5834            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    5835            0 :                 .map(|(shard_id, _)| *shard_id)
    5836            0 :                 .collect::<Vec<_>>();
    5837              : 
    5838            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    5839            0 :             for parent_id in parent_ids {
    5840            0 :                 let child_ids = parent_id.split(new_shard_count);
    5841              : 
    5842              :                 let (
    5843            0 :                     pageserver,
    5844            0 :                     generation,
    5845            0 :                     policy,
    5846            0 :                     parent_ident,
    5847            0 :                     config,
    5848            0 :                     preferred_az,
    5849            0 :                     secondary_count,
    5850              :                 ) = {
    5851            0 :                     let mut old_state = tenants
    5852            0 :                         .remove(&parent_id)
    5853            0 :                         .expect("It was present, we just split it");
    5854              : 
    5855              :                     // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
    5856              :                     // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
    5857              :                     // nothing else can clear this.
    5858            0 :                     assert!(matches!(old_state.splitting, SplitState::Splitting));
    5859              : 
    5860            0 :                     let old_attached = old_state.intent.get_attached().unwrap();
    5861            0 :                     old_state.intent.clear(scheduler);
    5862            0 :                     let generation = old_state.generation.expect("Shard must have been attached");
    5863            0 :                     (
    5864            0 :                         old_attached,
    5865            0 :                         generation,
    5866            0 :                         old_state.policy.clone(),
    5867            0 :                         old_state.shard,
    5868            0 :                         old_state.config.clone(),
    5869            0 :                         old_state.preferred_az().cloned(),
    5870            0 :                         old_state.intent.get_secondary().len(),
    5871            0 :                     )
    5872              :                 };
    5873              : 
    5874            0 :                 let mut schedule_context = ScheduleContext::default();
    5875            0 :                 for child in child_ids {
    5876            0 :                     let mut child_shard = parent_ident;
    5877            0 :                     child_shard.number = child.shard_number;
    5878            0 :                     child_shard.count = child.shard_count;
    5879            0 :                     if let Some(stripe_size) = new_stripe_size {
    5880            0 :                         child_shard.stripe_size = stripe_size;
    5881            0 :                     }
    5882              : 
    5883            0 :                     let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
    5884            0 :                     child_observed.insert(
    5885            0 :                         pageserver,
    5886            0 :                         ObservedStateLocation {
    5887            0 :                             conf: Some(attached_location_conf(
    5888            0 :                                 generation,
    5889            0 :                                 &child_shard,
    5890            0 :                                 &config,
    5891            0 :                                 &policy,
    5892            0 :                                 secondary_count,
    5893            0 :                             )),
    5894            0 :                         },
    5895              :                     );
    5896              : 
    5897            0 :                     let mut child_state =
    5898            0 :                         TenantShard::new(child, child_shard, policy.clone(), preferred_az.clone());
    5899            0 :                     child_state.intent =
    5900            0 :                         IntentState::single(scheduler, Some(pageserver), preferred_az.clone());
    5901            0 :                     child_state.observed = ObservedState {
    5902            0 :                         locations: child_observed,
    5903            0 :                     };
    5904            0 :                     child_state.generation = Some(generation);
    5905            0 :                     child_state.config = config.clone();
    5906              : 
    5907              :                     // The child's TenantShard::splitting is intentionally left at the default value of Idle,
    5908              :                     // as at this point in the split process we have succeeded and this part is infallible:
    5909              :                     // we will never need to do any special recovery from this state.
    5910              : 
    5911            0 :                     child_locations.push((child, pageserver, child_shard.stripe_size));
    5912              : 
    5913            0 :                     if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
    5914              :                         // This is not fatal, because we've implicitly already got an attached
    5915              :                         // location for the child shard.  Failure here just means we couldn't
    5916              :                         // find a secondary (e.g. because cluster is overloaded).
    5917            0 :                         tracing::warn!("Failed to schedule child shard {child}: {e}");
    5918            0 :                     }
    5919              :                     // In the background, attach secondary locations for the new shards
    5920            0 :                     if let Some(waiter) = self.maybe_reconcile_shard(
    5921            0 :                         &mut child_state,
    5922            0 :                         nodes,
    5923            0 :                         ReconcilerPriority::High,
    5924            0 :                     ) {
    5925            0 :                         waiters.push(waiter);
    5926            0 :                     }
    5927              : 
    5928            0 :                     tenants.insert(child, child_state);
    5929            0 :                     response.new_shards.push(child);
    5930              :                 }
    5931              :             }
    5932            0 :             (response, child_locations, waiters)
    5933              :         }
    5934            0 :     }
    5935              : 
    5936            0 :     async fn tenant_shard_split_start_secondaries(
    5937            0 :         &self,
    5938            0 :         tenant_id: TenantId,
    5939            0 :         waiters: Vec<ReconcilerWaiter>,
    5940            0 :     ) {
    5941              :         // Wait for initial reconcile of child shards, this creates the secondary locations
    5942            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    5943              :             // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
    5944              :             // their secondaries couldn't be attached.
    5945            0 :             tracing::warn!("Failed to reconcile after split: {e}");
    5946            0 :             return;
    5947            0 :         }
    5948              : 
    5949              :         // Take the state lock to discover the attached & secondary intents for all shards
    5950            0 :         let (attached, secondary) = {
    5951            0 :             let locked = self.inner.read().unwrap();
    5952            0 :             let mut attached = Vec::new();
    5953            0 :             let mut secondary = Vec::new();
    5954              : 
    5955            0 :             for (tenant_shard_id, shard) in
    5956            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    5957              :             {
    5958            0 :                 let Some(node_id) = shard.intent.get_attached() else {
    5959              :                     // Unexpected.  Race with a PlacementPolicy change?
    5960            0 :                     tracing::warn!(
    5961            0 :                         "No attached node on {tenant_shard_id} immediately after shard split!"
    5962              :                     );
    5963            0 :                     continue;
    5964              :                 };
    5965              : 
    5966            0 :                 let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
    5967              :                     // No secondary location.  Nothing for us to do.
    5968            0 :                     continue;
    5969              :                 };
    5970              : 
    5971            0 :                 let attached_node = locked
    5972            0 :                     .nodes
    5973            0 :                     .get(node_id)
    5974            0 :                     .expect("Pageservers may not be deleted while referenced");
    5975              : 
    5976            0 :                 let secondary_node = locked
    5977            0 :                     .nodes
    5978            0 :                     .get(secondary_node_id)
    5979            0 :                     .expect("Pageservers may not be deleted while referenced");
    5980              : 
    5981            0 :                 attached.push((*tenant_shard_id, attached_node.clone()));
    5982            0 :                 secondary.push((*tenant_shard_id, secondary_node.clone()));
    5983              :             }
    5984            0 :             (attached, secondary)
    5985              :         };
    5986              : 
    5987            0 :         if secondary.is_empty() {
    5988              :             // No secondary locations; nothing for us to do
    5989            0 :             return;
    5990            0 :         }
    5991              : 
    5992            0 :         for (_, result) in self
    5993            0 :             .tenant_for_shards_api(
    5994            0 :                 attached,
    5995            0 :                 |tenant_shard_id, client| async move {
    5996            0 :                     client.tenant_heatmap_upload(tenant_shard_id).await
    5997            0 :                 },
    5998              :                 1,
    5999              :                 1,
    6000              :                 SHORT_RECONCILE_TIMEOUT,
    6001            0 :                 &self.cancel,
    6002              :             )
    6003            0 :             .await
    6004              :         {
    6005            0 :             if let Err(e) = result {
    6006            0 :                 tracing::warn!("Error calling heatmap upload after shard split: {e}");
    6007            0 :                 return;
    6008            0 :             }
    6009              :         }
    6010              : 
    6011            0 :         for (_, result) in self
    6012            0 :             .tenant_for_shards_api(
    6013            0 :                 secondary,
    6014            0 :                 |tenant_shard_id, client| async move {
    6015            0 :                     client
    6016            0 :                         .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
    6017            0 :                         .await
    6018            0 :                 },
    6019              :                 1,
    6020              :                 1,
    6021              :                 SHORT_RECONCILE_TIMEOUT,
    6022            0 :                 &self.cancel,
    6023              :             )
    6024            0 :             .await
    6025              :         {
    6026            0 :             if let Err(e) = result {
    6027            0 :                 tracing::warn!("Error calling secondary download after shard split: {e}");
    6028            0 :                 return;
    6029            0 :             }
    6030              :         }
    6031            0 :     }
    6032              : 
    6033            0 :     pub(crate) async fn tenant_shard_split(
    6034            0 :         &self,
    6035            0 :         tenant_id: TenantId,
    6036            0 :         split_req: TenantShardSplitRequest,
    6037            0 :     ) -> Result<TenantShardSplitResponse, ApiError> {
    6038              :         // TODO: return 503 if we get stuck waiting for this lock
    6039              :         // (issue https://github.com/neondatabase/neon/issues/7108)
    6040            0 :         let _tenant_lock = trace_exclusive_lock(
    6041            0 :             &self.tenant_op_locks,
    6042            0 :             tenant_id,
    6043            0 :             TenantOperations::ShardSplit,
    6044            0 :         )
    6045            0 :         .await;
    6046              : 
    6047            0 :         let _gate = self
    6048            0 :             .reconcilers_gate
    6049            0 :             .enter()
    6050            0 :             .map_err(|_| ApiError::ShuttingDown)?;
    6051              : 
    6052              :         // Timeline imports on the pageserver side can't handle shard-splits.
    6053              :         // If the tenant is importing a timeline, dont't shard split it.
    6054            0 :         match self
    6055            0 :             .persistence
    6056            0 :             .is_tenant_importing_timeline(tenant_id)
    6057            0 :             .await
    6058              :         {
    6059            0 :             Ok(importing) => {
    6060            0 :                 if importing {
    6061            0 :                     return Err(ApiError::Conflict(
    6062            0 :                         "Cannot shard split during timeline import".to_string(),
    6063            0 :                     ));
    6064            0 :                 }
    6065              :             }
    6066            0 :             Err(err) => {
    6067            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6068            0 :                     "Failed to check for running imports: {err}"
    6069            0 :                 )));
    6070              :             }
    6071              :         }
    6072              : 
    6073            0 :         let new_shard_count = ShardCount::new(split_req.new_shard_count);
    6074            0 :         let new_stripe_size = split_req.new_stripe_size;
    6075              : 
    6076              :         // Validate the request and construct parameters.  This phase is fallible, but does not require
    6077              :         // rollback on errors, as it does no I/O and mutates no state.
    6078            0 :         let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
    6079            0 :             ShardSplitAction::NoOp(resp) => return Ok(resp),
    6080            0 :             ShardSplitAction::Split(params) => params,
    6081              :         };
    6082              : 
    6083              :         // Execute this split: this phase mutates state and does remote I/O on pageservers.  If it fails,
    6084              :         // we must roll back.
    6085            0 :         let r = self
    6086            0 :             .do_tenant_shard_split(tenant_id, shard_split_params)
    6087            0 :             .await;
    6088              : 
    6089            0 :         let (response, waiters) = match r {
    6090            0 :             Ok(r) => r,
    6091            0 :             Err(e) => {
    6092              :                 // Split might be part-done, we must do work to abort it.
    6093            0 :                 tracing::warn!("Enqueuing background abort of split on {tenant_id}");
    6094            0 :                 self.abort_tx
    6095            0 :                     .send(TenantShardSplitAbort {
    6096            0 :                         tenant_id,
    6097            0 :                         new_shard_count,
    6098            0 :                         new_stripe_size,
    6099            0 :                         _tenant_lock,
    6100            0 :                         _gate,
    6101            0 :                     })
    6102              :                     // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
    6103            0 :                     .ok();
    6104            0 :                 return Err(e);
    6105              :             }
    6106              :         };
    6107              : 
    6108              :         // The split is now complete.  As an optimization, we will trigger all the child shards to upload
    6109              :         // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
    6110              :         // for the background heatmap/download interval before secondaries get warm enough to migrate shards
    6111              :         // in [`Self::optimize_all`]
    6112            0 :         self.tenant_shard_split_start_secondaries(tenant_id, waiters)
    6113            0 :             .await;
    6114            0 :         Ok(response)
    6115            0 :     }
    6116              : 
    6117            0 :     fn prepare_tenant_shard_split(
    6118            0 :         &self,
    6119            0 :         tenant_id: TenantId,
    6120            0 :         split_req: TenantShardSplitRequest,
    6121            0 :     ) -> Result<ShardSplitAction, ApiError> {
    6122            0 :         fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
    6123            0 :             anyhow::anyhow!("failpoint")
    6124            0 :         )));
    6125              : 
    6126            0 :         let mut policy = None;
    6127            0 :         let mut config = None;
    6128            0 :         let mut shard_ident = None;
    6129            0 :         let mut preferred_az_id = None;
    6130              :         // Validate input, and calculate which shards we will create
    6131            0 :         let (old_shard_count, targets) =
    6132              :             {
    6133            0 :                 let locked = self.inner.read().unwrap();
    6134              : 
    6135            0 :                 let pageservers = locked.nodes.clone();
    6136              : 
    6137            0 :                 let mut targets = Vec::new();
    6138              : 
    6139              :                 // In case this is a retry, count how many already-split shards we found
    6140            0 :                 let mut children_found = Vec::new();
    6141            0 :                 let mut old_shard_count = None;
    6142              : 
    6143            0 :                 for (tenant_shard_id, shard) in
    6144            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    6145              :                 {
    6146            0 :                     match shard.shard.count.count().cmp(&split_req.new_shard_count) {
    6147              :                         Ordering::Equal => {
    6148              :                             //  Already split this
    6149            0 :                             children_found.push(*tenant_shard_id);
    6150            0 :                             continue;
    6151              :                         }
    6152              :                         Ordering::Greater => {
    6153            0 :                             return Err(ApiError::BadRequest(anyhow::anyhow!(
    6154            0 :                                 "Requested count {} but already have shards at count {}",
    6155            0 :                                 split_req.new_shard_count,
    6156            0 :                                 shard.shard.count.count()
    6157            0 :                             )));
    6158              :                         }
    6159            0 :                         Ordering::Less => {
    6160            0 :                             // Fall through: this shard has lower count than requested,
    6161            0 :                             // is a candidate for splitting.
    6162            0 :                         }
    6163              :                     }
    6164              : 
    6165            0 :                     match old_shard_count {
    6166            0 :                         None => old_shard_count = Some(shard.shard.count),
    6167            0 :                         Some(old_shard_count) => {
    6168            0 :                             if old_shard_count != shard.shard.count {
    6169              :                                 // We may hit this case if a caller asked for two splits to
    6170              :                                 // different sizes, before the first one is complete.
    6171              :                                 // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
    6172              :                                 // of shard_count=1 and shard_count=2 shards in the map.
    6173            0 :                                 return Err(ApiError::Conflict(
    6174            0 :                                     "Cannot split, currently mid-split".to_string(),
    6175            0 :                                 ));
    6176            0 :                             }
    6177              :                         }
    6178              :                     }
    6179            0 :                     if policy.is_none() {
    6180            0 :                         policy = Some(shard.policy.clone());
    6181            0 :                     }
    6182            0 :                     if shard_ident.is_none() {
    6183            0 :                         shard_ident = Some(shard.shard);
    6184            0 :                     }
    6185            0 :                     if config.is_none() {
    6186            0 :                         config = Some(shard.config.clone());
    6187            0 :                     }
    6188            0 :                     if preferred_az_id.is_none() {
    6189            0 :                         preferred_az_id = shard.preferred_az().cloned();
    6190            0 :                     }
    6191              : 
    6192            0 :                     if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
    6193            0 :                         tracing::info!(
    6194            0 :                             "Tenant shard {} already has shard count {}",
    6195              :                             tenant_shard_id,
    6196              :                             split_req.new_shard_count
    6197              :                         );
    6198            0 :                         continue;
    6199            0 :                     }
    6200              : 
    6201            0 :                     let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
    6202            0 :                         anyhow::anyhow!("Cannot split a tenant that is not attached"),
    6203            0 :                     ))?;
    6204              : 
    6205            0 :                     let node = pageservers
    6206            0 :                         .get(&node_id)
    6207            0 :                         .expect("Pageservers may not be deleted while referenced");
    6208              : 
    6209            0 :                     targets.push(ShardSplitTarget {
    6210            0 :                         parent_id: *tenant_shard_id,
    6211            0 :                         node: node.clone(),
    6212            0 :                         child_ids: tenant_shard_id
    6213            0 :                             .split(ShardCount::new(split_req.new_shard_count)),
    6214            0 :                     });
    6215              :                 }
    6216              : 
    6217            0 :                 if targets.is_empty() {
    6218            0 :                     if children_found.len() == split_req.new_shard_count as usize {
    6219            0 :                         return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
    6220            0 :                             new_shards: children_found,
    6221            0 :                         }));
    6222              :                     } else {
    6223              :                         // No shards found to split, and no existing children found: the
    6224              :                         // tenant doesn't exist at all.
    6225            0 :                         return Err(ApiError::NotFound(
    6226            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    6227            0 :                         ));
    6228              :                     }
    6229            0 :                 }
    6230              : 
    6231            0 :                 (old_shard_count, targets)
    6232              :             };
    6233              : 
    6234              :         // unwrap safety: we would have returned above if we didn't find at least one shard to split
    6235            0 :         let old_shard_count = old_shard_count.unwrap();
    6236            0 :         let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
    6237              :             // This ShardIdentity will be used as the template for all children, so this implicitly
    6238              :             // applies the new stripe size to the children.
    6239            0 :             let mut shard_ident = shard_ident.unwrap();
    6240            0 :             if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
    6241            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6242            0 :                     "Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards",
    6243            0 :                     shard_ident.stripe_size
    6244            0 :                 )));
    6245            0 :             }
    6246              : 
    6247            0 :             shard_ident.stripe_size = new_stripe_size;
    6248            0 :             tracing::info!("applied  stripe size {}", shard_ident.stripe_size.0);
    6249            0 :             shard_ident
    6250              :         } else {
    6251            0 :             shard_ident.unwrap()
    6252              :         };
    6253            0 :         let policy = policy.unwrap();
    6254            0 :         let config = config.unwrap();
    6255              : 
    6256            0 :         Ok(ShardSplitAction::Split(Box::new(ShardSplitParams {
    6257            0 :             old_shard_count,
    6258            0 :             new_shard_count: ShardCount::new(split_req.new_shard_count),
    6259            0 :             new_stripe_size: split_req.new_stripe_size,
    6260            0 :             targets,
    6261            0 :             policy,
    6262            0 :             config,
    6263            0 :             shard_ident,
    6264            0 :             preferred_az_id,
    6265            0 :         })))
    6266            0 :     }
    6267              : 
    6268            0 :     async fn do_tenant_shard_split(
    6269            0 :         &self,
    6270            0 :         tenant_id: TenantId,
    6271            0 :         params: Box<ShardSplitParams>,
    6272            0 :     ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
    6273              :         // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
    6274              :         // request could occur here, deleting or mutating the tenant.  begin_shard_split checks that the
    6275              :         // parent shards exist as expected, but it would be neater to do the above pre-checks within the
    6276              :         // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
    6277              :         // (https://github.com/neondatabase/neon/issues/6676)
    6278              : 
    6279              :         let ShardSplitParams {
    6280            0 :             old_shard_count,
    6281            0 :             new_shard_count,
    6282            0 :             new_stripe_size,
    6283            0 :             mut targets,
    6284            0 :             policy,
    6285            0 :             config,
    6286            0 :             shard_ident,
    6287            0 :             preferred_az_id,
    6288            0 :         } = *params;
    6289              : 
    6290              :         // Drop any secondary locations: pageservers do not support splitting these, and in any case the
    6291              :         // end-state for a split tenant will usually be to have secondary locations on different nodes.
    6292              :         // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
    6293              :         // at the time of split.
    6294            0 :         let waiters = {
    6295            0 :             let mut locked = self.inner.write().unwrap();
    6296            0 :             let mut waiters = Vec::new();
    6297            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6298            0 :             for target in &mut targets {
    6299            0 :                 let Some(shard) = tenants.get_mut(&target.parent_id) else {
    6300              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    6301            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6302            0 :                         "Shard {} not found",
    6303            0 :                         target.parent_id
    6304            0 :                     )));
    6305              :                 };
    6306              : 
    6307            0 :                 if shard.intent.get_attached() != &Some(target.node.get_id()) {
    6308              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    6309            0 :                     return Err(ApiError::Conflict(format!(
    6310            0 :                         "Shard {} unexpectedly rescheduled during split",
    6311            0 :                         target.parent_id
    6312            0 :                     )));
    6313            0 :                 }
    6314              : 
    6315              :                 // Irrespective of PlacementPolicy, clear secondary locations from intent
    6316            0 :                 shard.intent.clear_secondary(scheduler);
    6317              : 
    6318              :                 // Run Reconciler to execute detach fo secondary locations.
    6319            0 :                 if let Some(waiter) =
    6320            0 :                     self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    6321            0 :                 {
    6322            0 :                     waiters.push(waiter);
    6323            0 :                 }
    6324              :             }
    6325            0 :             waiters
    6326              :         };
    6327            0 :         self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
    6328              : 
    6329              :         // Before creating any new child shards in memory or on the pageservers, persist them: this
    6330              :         // enables us to ensure that we will always be able to clean up if something goes wrong.  This also
    6331              :         // acts as the protection against two concurrent attempts to split: one of them will get a database
    6332              :         // error trying to insert the child shards.
    6333            0 :         let mut child_tsps = Vec::new();
    6334            0 :         for target in &targets {
    6335            0 :             let mut this_child_tsps = Vec::new();
    6336            0 :             for child in &target.child_ids {
    6337            0 :                 let mut child_shard = shard_ident;
    6338            0 :                 child_shard.number = child.shard_number;
    6339            0 :                 child_shard.count = child.shard_count;
    6340              : 
    6341            0 :                 tracing::info!(
    6342            0 :                     "Create child shard persistence with stripe size {}",
    6343              :                     shard_ident.stripe_size.0
    6344              :                 );
    6345              : 
    6346            0 :                 this_child_tsps.push(TenantShardPersistence {
    6347            0 :                     tenant_id: child.tenant_id.to_string(),
    6348            0 :                     shard_number: child.shard_number.0 as i32,
    6349            0 :                     shard_count: child.shard_count.literal() as i32,
    6350            0 :                     shard_stripe_size: shard_ident.stripe_size.0 as i32,
    6351              :                     // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
    6352              :                     // populate the correct generation as part of its transaction, to protect us
    6353              :                     // against racing with changes in the state of the parent.
    6354            0 :                     generation: None,
    6355            0 :                     generation_pageserver: Some(target.node.get_id().0 as i64),
    6356            0 :                     placement_policy: serde_json::to_string(&policy).unwrap(),
    6357            0 :                     config: serde_json::to_string(&config).unwrap(),
    6358            0 :                     splitting: SplitState::Splitting,
    6359              : 
    6360              :                     // Scheduling policies and preferred AZ do not carry through to children
    6361            0 :                     scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    6362            0 :                         .unwrap(),
    6363            0 :                     preferred_az_id: preferred_az_id.as_ref().map(|az| az.0.clone()),
    6364              :                 });
    6365              :             }
    6366              : 
    6367            0 :             child_tsps.push((target.parent_id, this_child_tsps));
    6368              :         }
    6369              : 
    6370            0 :         if let Err(e) = self
    6371            0 :             .persistence
    6372            0 :             .begin_shard_split(old_shard_count, tenant_id, child_tsps)
    6373            0 :             .await
    6374              :         {
    6375            0 :             match e {
    6376              :                 DatabaseError::Query(diesel::result::Error::DatabaseError(
    6377              :                     DatabaseErrorKind::UniqueViolation,
    6378              :                     _,
    6379              :                 )) => {
    6380              :                     // Inserting a child shard violated a unique constraint: we raced with another call to
    6381              :                     // this function
    6382            0 :                     tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
    6383            0 :                     return Err(ApiError::Conflict("Tenant is already splitting".into()));
    6384              :                 }
    6385            0 :                 _ => return Err(ApiError::InternalServerError(e.into())),
    6386              :             }
    6387            0 :         }
    6388            0 :         fail::fail_point!("shard-split-post-begin", |_| Err(
    6389            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    6390              :         ));
    6391              : 
    6392              :         // Now that I have persisted the splitting state, apply it in-memory.  This is infallible, so
    6393              :         // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
    6394              :         // is not set in memory, then it was not persisted.
    6395              :         {
    6396            0 :             let mut locked = self.inner.write().unwrap();
    6397            0 :             for target in &targets {
    6398            0 :                 if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
    6399            0 :                     parent_shard.splitting = SplitState::Splitting;
    6400            0 :                     // Put the observed state to None, to reflect that it is indeterminate once we start the
    6401            0 :                     // split operation.
    6402            0 :                     parent_shard
    6403            0 :                         .observed
    6404            0 :                         .locations
    6405            0 :                         .insert(target.node.get_id(), ObservedStateLocation { conf: None });
    6406            0 :                 }
    6407              :             }
    6408              :         }
    6409              : 
    6410              :         // TODO: issue split calls concurrently (this only matters once we're splitting
    6411              :         // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
    6412              : 
    6413              :         // HADRON: set a timeout for splitting individual shards on page servers.
    6414              :         // Currently we do not perform any retry because it's not clear if page server can handle
    6415              :         // partially split shards correctly.
    6416            0 :         let shard_split_timeout =
    6417            0 :             if let Some(env::DeploymentMode::Local) = env::get_deployment_mode() {
    6418            0 :                 Duration::from_secs(30)
    6419              :             } else {
    6420            0 :                 self.config.shard_split_request_timeout
    6421              :             };
    6422            0 :         let mut http_client_builder = reqwest::ClientBuilder::new()
    6423            0 :             .pool_max_idle_per_host(0)
    6424            0 :             .timeout(shard_split_timeout);
    6425              : 
    6426            0 :         for ssl_ca_cert in &self.config.ssl_ca_certs {
    6427            0 :             http_client_builder = http_client_builder.add_root_certificate(ssl_ca_cert.clone());
    6428            0 :         }
    6429            0 :         let http_client = http_client_builder
    6430            0 :             .build()
    6431            0 :             .expect("Failed to construct HTTP client");
    6432            0 :         for target in &targets {
    6433              :             let ShardSplitTarget {
    6434            0 :                 parent_id,
    6435            0 :                 node,
    6436            0 :                 child_ids,
    6437            0 :             } = target;
    6438              : 
    6439            0 :             let client = PageserverClient::new(
    6440            0 :                 node.get_id(),
    6441            0 :                 http_client.clone(),
    6442            0 :                 node.base_url(),
    6443            0 :                 self.config.pageserver_jwt_token.as_deref(),
    6444              :             );
    6445              : 
    6446            0 :             let response = client
    6447            0 :                 .tenant_shard_split(
    6448            0 :                     *parent_id,
    6449            0 :                     TenantShardSplitRequest {
    6450            0 :                         new_shard_count: new_shard_count.literal(),
    6451            0 :                         new_stripe_size,
    6452            0 :                     },
    6453            0 :                 )
    6454            0 :                 .await
    6455            0 :                 .map_err(|e| ApiError::Conflict(format!("Failed to split {parent_id}: {e}")))?;
    6456              : 
    6457            0 :             fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
    6458            0 :                 "failpoint".to_string()
    6459            0 :             )));
    6460              : 
    6461            0 :             failpoint_support::sleep_millis_async!(
    6462              :                 "shard-split-post-remote-sleep",
    6463            0 :                 &self.reconcilers_cancel
    6464              :             );
    6465              : 
    6466            0 :             tracing::info!(
    6467            0 :                 "Split {} into {}",
    6468              :                 parent_id,
    6469            0 :                 response
    6470            0 :                     .new_shards
    6471            0 :                     .iter()
    6472            0 :                     .map(|s| format!("{s:?}"))
    6473            0 :                     .collect::<Vec<_>>()
    6474            0 :                     .join(",")
    6475              :             );
    6476              : 
    6477            0 :             if &response.new_shards != child_ids {
    6478              :                 // This should never happen: the pageserver should agree with us on how shard splits work.
    6479            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    6480            0 :                     "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
    6481            0 :                     parent_id,
    6482            0 :                     response.new_shards,
    6483            0 :                     child_ids
    6484            0 :                 )));
    6485            0 :             }
    6486              :         }
    6487              : 
    6488            0 :         fail::fail_point!("shard-split-pre-complete", |_| Err(ApiError::Conflict(
    6489            0 :             "failpoint".to_string()
    6490            0 :         )));
    6491              : 
    6492            0 :         pausable_failpoint!("shard-split-pre-complete-pause");
    6493              : 
    6494              :         // TODO: if the pageserver restarted concurrently with our split API call,
    6495              :         // the actual generation of the child shard might differ from the generation
    6496              :         // we expect it to have.  In order for our in-database generation to end up
    6497              :         // correct, we should carry the child generation back in the response and apply it here
    6498              :         // in complete_shard_split (and apply the correct generation in memory)
    6499              :         // (or, we can carry generation in the request and reject the request if
    6500              :         //  it doesn't match, but that requires more retry logic on this side)
    6501              : 
    6502            0 :         self.persistence
    6503            0 :             .complete_shard_split(tenant_id, old_shard_count, new_shard_count)
    6504            0 :             .await?;
    6505              : 
    6506            0 :         fail::fail_point!("shard-split-post-complete", |_| Err(
    6507            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    6508              :         ));
    6509              : 
    6510              :         // Replace all the shards we just split with their children: this phase is infallible.
    6511            0 :         let (response, child_locations, waiters) =
    6512            0 :             self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
    6513              : 
    6514              :         // Notify all page servers to detach and clean up the old shards because they will no longer
    6515              :         // be needed. This is best-effort: if it fails, it will be cleaned up on a subsequent
    6516              :         // Pageserver re-attach/startup.
    6517            0 :         let shards_to_cleanup = targets
    6518            0 :             .iter()
    6519            0 :             .map(|target| (target.parent_id, target.node.get_id()))
    6520            0 :             .collect();
    6521            0 :         self.cleanup_locations(shards_to_cleanup).await;
    6522              : 
    6523              :         // Send compute notifications for all the new shards
    6524            0 :         let mut failed_notifications = Vec::new();
    6525            0 :         for (child_id, child_ps, stripe_size) in child_locations {
    6526            0 :             if let Err(e) = self
    6527            0 :                 .compute_hook
    6528            0 :                 .notify_attach(
    6529            0 :                     compute_hook::ShardUpdate {
    6530            0 :                         tenant_shard_id: child_id,
    6531            0 :                         node_id: child_ps,
    6532            0 :                         stripe_size,
    6533            0 :                         preferred_az: preferred_az_id.as_ref().map(Cow::Borrowed),
    6534            0 :                     },
    6535            0 :                     &self.reconcilers_cancel,
    6536            0 :                 )
    6537            0 :                 .await
    6538              :             {
    6539            0 :                 tracing::warn!(
    6540            0 :                     "Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
    6541              :                     child_id,
    6542              :                     child_ps
    6543              :                 );
    6544            0 :                 failed_notifications.push(child_id);
    6545            0 :             }
    6546              :         }
    6547              : 
    6548              :         // If we failed any compute notifications, make a note to retry later.
    6549            0 :         if !failed_notifications.is_empty() {
    6550            0 :             let mut locked = self.inner.write().unwrap();
    6551            0 :             for failed in failed_notifications {
    6552            0 :                 if let Some(shard) = locked.tenants.get_mut(&failed) {
    6553            0 :                     shard.pending_compute_notification = true;
    6554            0 :                 }
    6555              :             }
    6556            0 :         }
    6557              : 
    6558            0 :         Ok((response, waiters))
    6559            0 :     }
    6560              : 
    6561              :     /// A graceful migration: update the preferred node and let optimisation handle the migration
    6562              :     /// in the background (may take a long time as it will fully warm up a location before cutting over)
    6563              :     ///
    6564              :     /// Our external API calls this a 'prewarm=true' migration, but internally it isn't a special prewarm step: it's
    6565              :     /// just a migration that uses the same graceful procedure as our background scheduling optimisations would use.
    6566            0 :     fn tenant_shard_migrate_with_prewarm(
    6567            0 :         &self,
    6568            0 :         migrate_req: &TenantShardMigrateRequest,
    6569            0 :         shard: &mut TenantShard,
    6570            0 :         scheduler: &mut Scheduler,
    6571            0 :         schedule_context: ScheduleContext,
    6572            0 :     ) -> Result<Option<ScheduleOptimization>, ApiError> {
    6573            0 :         shard.set_preferred_node(Some(migrate_req.node_id));
    6574              : 
    6575              :         // Generate whatever the initial change to the intent is: this could be creation of a secondary, or
    6576              :         // cutting over to an existing secondary.  Caller is responsible for validating this before applying it,
    6577              :         // e.g. by checking secondary is warm enough.
    6578            0 :         Ok(shard.optimize_attachment(scheduler, &schedule_context))
    6579            0 :     }
    6580              : 
    6581              :     /// Immediate migration: directly update the intent state and kick off a reconciler
    6582            0 :     fn tenant_shard_migrate_immediate(
    6583            0 :         &self,
    6584            0 :         migrate_req: &TenantShardMigrateRequest,
    6585            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    6586            0 :         shard: &mut TenantShard,
    6587            0 :         scheduler: &mut Scheduler,
    6588            0 :     ) -> Result<Option<ReconcilerWaiter>, ApiError> {
    6589              :         // Non-graceful migration: update the intent state immediately
    6590            0 :         let old_attached = *shard.intent.get_attached();
    6591            0 :         match shard.policy {
    6592            0 :             PlacementPolicy::Attached(n) => {
    6593              :                 // If our new attached node was a secondary, it no longer should be.
    6594            0 :                 shard
    6595            0 :                     .intent
    6596            0 :                     .remove_secondary(scheduler, migrate_req.node_id);
    6597              : 
    6598            0 :                 shard
    6599            0 :                     .intent
    6600            0 :                     .set_attached(scheduler, Some(migrate_req.node_id));
    6601              : 
    6602              :                 // If we were already attached to something, demote that to a secondary
    6603            0 :                 if let Some(old_attached) = old_attached {
    6604            0 :                     if n > 0 {
    6605              :                         // Remove other secondaries to make room for the location we'll demote
    6606            0 :                         while shard.intent.get_secondary().len() >= n {
    6607            0 :                             shard.intent.pop_secondary(scheduler);
    6608            0 :                         }
    6609              : 
    6610            0 :                         shard.intent.push_secondary(scheduler, old_attached);
    6611            0 :                     }
    6612            0 :                 }
    6613              :             }
    6614            0 :             PlacementPolicy::Secondary => {
    6615            0 :                 shard.intent.clear(scheduler);
    6616            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    6617            0 :             }
    6618              :             PlacementPolicy::Detached => {
    6619            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6620            0 :                     "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
    6621            0 :                 )));
    6622              :             }
    6623              :         }
    6624              : 
    6625            0 :         tracing::info!("Migrating: new intent {:?}", shard.intent);
    6626            0 :         shard.sequence = shard.sequence.next();
    6627            0 :         shard.set_preferred_node(None); // Abort any in-flight graceful migration
    6628            0 :         Ok(self.maybe_configured_reconcile_shard(
    6629            0 :             shard,
    6630            0 :             nodes,
    6631            0 :             (&migrate_req.migration_config).into(),
    6632            0 :         ))
    6633            0 :     }
    6634              : 
    6635            0 :     pub(crate) async fn tenant_shard_migrate(
    6636            0 :         &self,
    6637            0 :         tenant_shard_id: TenantShardId,
    6638            0 :         migrate_req: TenantShardMigrateRequest,
    6639            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    6640              :         // Depending on whether the migration is a change and whether it's graceful or immediate, we might
    6641              :         // get a different outcome to handle
    6642              :         enum MigrationOutcome {
    6643              :             Optimization(Option<ScheduleOptimization>),
    6644              :             Reconcile(Option<ReconcilerWaiter>),
    6645              :         }
    6646              : 
    6647            0 :         let outcome = {
    6648            0 :             let mut locked = self.inner.write().unwrap();
    6649            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6650              : 
    6651            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    6652            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6653            0 :                     "Node {} not found",
    6654            0 :                     migrate_req.node_id
    6655            0 :                 )));
    6656              :             };
    6657              : 
    6658              :             // Migration to unavavailable node requires force flag
    6659            0 :             if !node.is_available() {
    6660            0 :                 if migrate_req.migration_config.override_scheduler {
    6661              :                     // Warn but proceed: the caller may intend to manually adjust the placement of
    6662              :                     // a shard even if the node is down, e.g. if intervening during an incident.
    6663            0 :                     tracing::warn!("Forcibly migrating to unavailable node {node}");
    6664              :                 } else {
    6665            0 :                     tracing::warn!("Node {node} is unavailable, refusing migration");
    6666            0 :                     return Err(ApiError::PreconditionFailed(
    6667            0 :                         format!("Node {node} is unavailable").into_boxed_str(),
    6668            0 :                     ));
    6669              :                 }
    6670            0 :             }
    6671              : 
    6672              :             // Calculate the ScheduleContext for this tenant
    6673            0 :             let mut schedule_context = ScheduleContext::default();
    6674            0 :             for (_shard_id, shard) in
    6675            0 :                 tenants.range(TenantShardId::tenant_range(tenant_shard_id.tenant_id))
    6676            0 :             {
    6677            0 :                 schedule_context.avoid(&shard.intent.all_pageservers());
    6678            0 :             }
    6679              : 
    6680              :             // Look up the specific shard we will migrate
    6681            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    6682            0 :                 return Err(ApiError::NotFound(
    6683            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    6684            0 :                 ));
    6685              :             };
    6686              : 
    6687              :             // Migration to a node with unfavorable scheduling score requires a force flag, because it might just
    6688              :             // be migrated back by the optimiser.
    6689            0 :             if let Some(better_node) = shard.find_better_location::<AttachedShardTag>(
    6690            0 :                 scheduler,
    6691            0 :                 &schedule_context,
    6692            0 :                 migrate_req.node_id,
    6693            0 :                 &[],
    6694            0 :             ) {
    6695            0 :                 if !migrate_req.migration_config.override_scheduler {
    6696            0 :                     return Err(ApiError::PreconditionFailed(
    6697            0 :                         "Migration to a worse-scoring node".into(),
    6698            0 :                     ));
    6699              :                 } else {
    6700            0 :                     tracing::info!(
    6701            0 :                         "Migrating to a worse-scoring node {} (optimiser would prefer {better_node})",
    6702              :                         migrate_req.node_id
    6703              :                     );
    6704              :                 }
    6705            0 :             }
    6706              : 
    6707            0 :             if let Some(origin_node_id) = migrate_req.origin_node_id {
    6708            0 :                 if shard.intent.get_attached() != &Some(origin_node_id) {
    6709            0 :                     return Err(ApiError::PreconditionFailed(
    6710            0 :                         format!(
    6711            0 :                             "Migration expected to originate from {} but shard is on {:?}",
    6712            0 :                             origin_node_id,
    6713            0 :                             shard.intent.get_attached()
    6714            0 :                         )
    6715            0 :                         .into(),
    6716            0 :                     ));
    6717            0 :                 }
    6718            0 :             }
    6719              : 
    6720            0 :             if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    6721              :                 // No-op case: we will still proceed to wait for reconciliation in case it is
    6722              :                 // incomplete from an earlier update to the intent.
    6723            0 :                 tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
    6724              : 
    6725              :                 // An instruction to migrate to the currently attached node should
    6726              :                 // cancel any pending graceful migration
    6727            0 :                 shard.set_preferred_node(None);
    6728              : 
    6729            0 :                 MigrationOutcome::Reconcile(self.maybe_configured_reconcile_shard(
    6730            0 :                     shard,
    6731            0 :                     nodes,
    6732            0 :                     (&migrate_req.migration_config).into(),
    6733            0 :                 ))
    6734            0 :             } else if migrate_req.migration_config.prewarm {
    6735            0 :                 MigrationOutcome::Optimization(self.tenant_shard_migrate_with_prewarm(
    6736            0 :                     &migrate_req,
    6737            0 :                     shard,
    6738            0 :                     scheduler,
    6739            0 :                     schedule_context,
    6740            0 :                 )?)
    6741              :             } else {
    6742            0 :                 MigrationOutcome::Reconcile(self.tenant_shard_migrate_immediate(
    6743            0 :                     &migrate_req,
    6744            0 :                     nodes,
    6745            0 :                     shard,
    6746            0 :                     scheduler,
    6747            0 :                 )?)
    6748              :             }
    6749              :         };
    6750              : 
    6751              :         // We may need to validate + apply an optimisation, or we may need to just retrive a reconcile waiter
    6752            0 :         let waiter = match outcome {
    6753            0 :             MigrationOutcome::Optimization(Some(optimization)) => {
    6754              :                 // Validate and apply the optimization -- this would happen anyway in background reconcile loop, but
    6755              :                 // we might as well do it more promptly as this is a direct external request.
    6756            0 :                 let mut validated = self
    6757            0 :                     .optimize_all_validate(vec![(tenant_shard_id, optimization)])
    6758            0 :                     .await;
    6759            0 :                 if let Some((_shard_id, optimization)) = validated.pop() {
    6760            0 :                     let mut locked = self.inner.write().unwrap();
    6761            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    6762            0 :                     let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    6763              :                         // Rare but possible: tenant is removed between generating optimisation and validating it.
    6764            0 :                         return Err(ApiError::NotFound(
    6765            0 :                             anyhow::anyhow!("Tenant shard not found").into(),
    6766            0 :                         ));
    6767              :                     };
    6768              : 
    6769            0 :                     if !shard.apply_optimization(scheduler, optimization) {
    6770              :                         // This can happen but is unusual enough to warn on: something else changed in the shard that made the optimisation stale
    6771              :                         // and therefore not applied.
    6772            0 :                         tracing::warn!(
    6773            0 :                             "Schedule optimisation generated during graceful migration was not applied, shard changed?"
    6774              :                         );
    6775            0 :                     }
    6776            0 :                     self.maybe_configured_reconcile_shard(
    6777            0 :                         shard,
    6778            0 :                         nodes,
    6779            0 :                         (&migrate_req.migration_config).into(),
    6780              :                     )
    6781              :                 } else {
    6782            0 :                     None
    6783              :                 }
    6784              :             }
    6785            0 :             MigrationOutcome::Optimization(None) => None,
    6786            0 :             MigrationOutcome::Reconcile(waiter) => waiter,
    6787              :         };
    6788              : 
    6789              :         // Finally, wait for any reconcile we started to complete.  In the case of immediate-mode migrations to cold
    6790              :         // locations, this has a good chance of timing out.
    6791            0 :         if let Some(waiter) = waiter {
    6792            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    6793              :         } else {
    6794            0 :             tracing::info!("Migration is a no-op");
    6795              :         }
    6796              : 
    6797            0 :         Ok(TenantShardMigrateResponse {})
    6798            0 :     }
    6799              : 
    6800            0 :     pub(crate) async fn tenant_shard_migrate_secondary(
    6801            0 :         &self,
    6802            0 :         tenant_shard_id: TenantShardId,
    6803            0 :         migrate_req: TenantShardMigrateRequest,
    6804            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    6805            0 :         let waiter = {
    6806            0 :             let mut locked = self.inner.write().unwrap();
    6807            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    6808              : 
    6809            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    6810            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    6811            0 :                     "Node {} not found",
    6812            0 :                     migrate_req.node_id
    6813            0 :                 )));
    6814              :             };
    6815              : 
    6816            0 :             if !node.is_available() {
    6817              :                 // Warn but proceed: the caller may intend to manually adjust the placement of
    6818              :                 // a shard even if the node is down, e.g. if intervening during an incident.
    6819            0 :                 tracing::warn!("Migrating to unavailable node {node}");
    6820            0 :             }
    6821              : 
    6822            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    6823            0 :                 return Err(ApiError::NotFound(
    6824            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    6825            0 :                 ));
    6826              :             };
    6827              : 
    6828            0 :             if shard.intent.get_secondary().len() == 1
    6829            0 :                 && shard.intent.get_secondary()[0] == migrate_req.node_id
    6830              :             {
    6831            0 :                 tracing::info!(
    6832            0 :                     "Migrating secondary to {node}: intent is unchanged {:?}",
    6833              :                     shard.intent
    6834              :                 );
    6835            0 :             } else if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    6836            0 :                 tracing::info!(
    6837            0 :                     "Migrating secondary to {node}: already attached where we were asked to create a secondary"
    6838              :                 );
    6839              :             } else {
    6840            0 :                 let old_secondaries = shard.intent.get_secondary().clone();
    6841            0 :                 for secondary in old_secondaries {
    6842            0 :                     shard.intent.remove_secondary(scheduler, secondary);
    6843            0 :                 }
    6844              : 
    6845            0 :                 shard.intent.push_secondary(scheduler, migrate_req.node_id);
    6846            0 :                 shard.sequence = shard.sequence.next();
    6847            0 :                 tracing::info!(
    6848            0 :                     "Migrating secondary to {node}: new intent {:?}",
    6849              :                     shard.intent
    6850              :                 );
    6851              :             }
    6852              : 
    6853            0 :             self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::High)
    6854              :         };
    6855              : 
    6856            0 :         if let Some(waiter) = waiter {
    6857            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    6858              :         } else {
    6859            0 :             tracing::info!("Migration is a no-op");
    6860              :         }
    6861              : 
    6862            0 :         Ok(TenantShardMigrateResponse {})
    6863            0 :     }
    6864              : 
    6865              :     /// 'cancel' in this context means cancel any ongoing reconcile
    6866            0 :     pub(crate) async fn tenant_shard_cancel_reconcile(
    6867            0 :         &self,
    6868            0 :         tenant_shard_id: TenantShardId,
    6869            0 :     ) -> Result<(), ApiError> {
    6870              :         // Take state lock and fire the cancellation token, after which we drop lock and wait for any ongoing reconcile to complete
    6871            0 :         let waiter = {
    6872            0 :             let locked = self.inner.write().unwrap();
    6873            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    6874            0 :                 return Err(ApiError::NotFound(
    6875            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    6876            0 :                 ));
    6877              :             };
    6878              : 
    6879            0 :             let waiter = shard.get_waiter();
    6880            0 :             match waiter {
    6881              :                 None => {
    6882            0 :                     tracing::info!("Shard does not have an ongoing Reconciler");
    6883            0 :                     return Ok(());
    6884              :                 }
    6885            0 :                 Some(waiter) => {
    6886            0 :                     tracing::info!("Cancelling Reconciler");
    6887            0 :                     shard.cancel_reconciler();
    6888            0 :                     waiter
    6889              :                 }
    6890              :             }
    6891              :         };
    6892              : 
    6893              :         // Cancellation should be prompt.  If this fails we have still done our job of firing the
    6894              :         // cancellation token, but by returning an ApiError we will indicate to the caller that
    6895              :         // the Reconciler is misbehaving and not respecting the cancellation token
    6896            0 :         self.await_waiters(vec![waiter], SHORT_RECONCILE_TIMEOUT)
    6897            0 :             .await?;
    6898              : 
    6899            0 :         Ok(())
    6900            0 :     }
    6901              : 
    6902              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    6903              :     /// detaching or deleting it on pageservers.
    6904            0 :     pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
    6905            0 :         self.persistence.delete_tenant(tenant_id).await?;
    6906              : 
    6907            0 :         let mut locked = self.inner.write().unwrap();
    6908            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    6909            0 :         let mut shards = Vec::new();
    6910            0 :         for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
    6911            0 :             shards.push(*tenant_shard_id);
    6912            0 :         }
    6913              : 
    6914            0 :         for shard_id in shards {
    6915            0 :             if let Some(mut shard) = tenants.remove(&shard_id) {
    6916            0 :                 shard.intent.clear(scheduler);
    6917            0 :             }
    6918              :         }
    6919              : 
    6920            0 :         Ok(())
    6921            0 :     }
    6922              : 
    6923              :     /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
    6924              :     /// tenant with a very high generation number so that it will see the existing data.
    6925              :     /// It does not create timelines on safekeepers, because they might already exist on some
    6926              :     /// safekeeper set. So, the timelines are not storcon-managed after the import.
    6927            0 :     pub(crate) async fn tenant_import(
    6928            0 :         &self,
    6929            0 :         tenant_id: TenantId,
    6930            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    6931              :         // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
    6932            0 :         let maybe_node = {
    6933            0 :             self.inner
    6934            0 :                 .read()
    6935            0 :                 .unwrap()
    6936            0 :                 .nodes
    6937            0 :                 .values()
    6938            0 :                 .find(|n| n.is_available())
    6939            0 :                 .cloned()
    6940              :         };
    6941            0 :         let Some(node) = maybe_node else {
    6942            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
    6943              :         };
    6944              : 
    6945            0 :         let client = PageserverClient::new(
    6946            0 :             node.get_id(),
    6947            0 :             self.http_client.clone(),
    6948            0 :             node.base_url(),
    6949            0 :             self.config.pageserver_jwt_token.as_deref(),
    6950              :         );
    6951              : 
    6952            0 :         let scan_result = client
    6953            0 :             .tenant_scan_remote_storage(tenant_id)
    6954            0 :             .await
    6955            0 :             .map_err(|e| passthrough_api_error(&node, e))?;
    6956              : 
    6957              :         // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
    6958            0 :         let Some(shard_count) = scan_result
    6959            0 :             .shards
    6960            0 :             .iter()
    6961            0 :             .map(|s| s.tenant_shard_id.shard_count)
    6962            0 :             .max()
    6963              :         else {
    6964            0 :             return Err(ApiError::NotFound(
    6965            0 :                 anyhow::anyhow!("No shards found").into(),
    6966            0 :             ));
    6967              :         };
    6968              : 
    6969              :         // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
    6970              :         // to
    6971            0 :         let generation = scan_result
    6972            0 :             .shards
    6973            0 :             .iter()
    6974            0 :             .map(|s| s.generation)
    6975            0 :             .max()
    6976            0 :             .expect("We already validated >0 shards");
    6977              : 
    6978              :         // Find the tenant's stripe size. This wasn't always persisted in the tenant manifest, so
    6979              :         // fall back to the original default stripe size of 32768 (256 MB) if it's not specified.
    6980              :         const ORIGINAL_STRIPE_SIZE: ShardStripeSize = ShardStripeSize(32768);
    6981            0 :         let stripe_size = scan_result
    6982            0 :             .shards
    6983            0 :             .iter()
    6984            0 :             .find(|s| s.tenant_shard_id.shard_count == shard_count && s.generation == generation)
    6985            0 :             .expect("we validated >0 shards above")
    6986              :             .stripe_size
    6987            0 :             .unwrap_or_else(|| {
    6988            0 :                 if shard_count.count() > 1 {
    6989            0 :                     warn!("unknown stripe size, assuming {ORIGINAL_STRIPE_SIZE}");
    6990            0 :                 }
    6991            0 :                 ORIGINAL_STRIPE_SIZE
    6992            0 :             });
    6993              : 
    6994            0 :         let (response, waiters) = self
    6995            0 :             .do_tenant_create(TenantCreateRequest {
    6996            0 :                 new_tenant_id: TenantShardId::unsharded(tenant_id),
    6997            0 :                 generation,
    6998            0 : 
    6999            0 :                 shard_parameters: ShardParameters {
    7000            0 :                     count: shard_count,
    7001            0 :                     stripe_size,
    7002            0 :                 },
    7003            0 :                 placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
    7004            0 :                 config: TenantConfig::default(),
    7005            0 :             })
    7006            0 :             .await?;
    7007              : 
    7008            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    7009              :             // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
    7010              :             // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
    7011              :             // reconcile, as reconciliation includes notifying compute.
    7012            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
    7013            0 :         }
    7014              : 
    7015            0 :         Ok(response)
    7016            0 :     }
    7017              : 
    7018              :     /// For debug/support: a full JSON dump of TenantShards.  Returns a response so that
    7019              :     /// we don't have to make TenantShard clonable in the return path.
    7020            0 :     pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    7021            0 :         let serialized = {
    7022            0 :             let locked = self.inner.read().unwrap();
    7023            0 :             let result = locked.tenants.values().collect::<Vec<_>>();
    7024            0 :             serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
    7025              :         };
    7026              : 
    7027            0 :         hyper::Response::builder()
    7028            0 :             .status(hyper::StatusCode::OK)
    7029            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    7030            0 :             .body(hyper::Body::from(serialized))
    7031            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    7032            0 :     }
    7033              : 
    7034              :     /// Check the consistency of in-memory state vs. persistent state, and check that the
    7035              :     /// scheduler's statistics are up to date.
    7036              :     ///
    7037              :     /// These consistency checks expect an **idle** system.  If changes are going on while
    7038              :     /// we run, then we can falsely indicate a consistency issue.  This is sufficient for end-of-test
    7039              :     /// checks, but not suitable for running continuously in the background in the field.
    7040            0 :     pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
    7041            0 :         let (mut expect_nodes, mut expect_shards) = {
    7042            0 :             let locked = self.inner.read().unwrap();
    7043              : 
    7044            0 :             locked
    7045            0 :                 .scheduler
    7046            0 :                 .consistency_check(locked.nodes.values(), locked.tenants.values())
    7047            0 :                 .context("Scheduler checks")
    7048            0 :                 .map_err(ApiError::InternalServerError)?;
    7049              : 
    7050            0 :             let expect_nodes = locked
    7051            0 :                 .nodes
    7052            0 :                 .values()
    7053            0 :                 .map(|n| n.to_persistent())
    7054            0 :                 .collect::<Vec<_>>();
    7055              : 
    7056            0 :             let expect_shards = locked
    7057            0 :                 .tenants
    7058            0 :                 .values()
    7059            0 :                 .map(|t| t.to_persistent())
    7060            0 :                 .collect::<Vec<_>>();
    7061              : 
    7062              :             // This method can only validate the state of an idle system: if a reconcile is in
    7063              :             // progress, fail out early to avoid giving false errors on state that won't match
    7064              :             // between database and memory under a ReconcileResult is processed.
    7065            0 :             for t in locked.tenants.values() {
    7066            0 :                 if t.reconciler.is_some() {
    7067            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    7068            0 :                         "Shard {} reconciliation in progress",
    7069            0 :                         t.tenant_shard_id
    7070            0 :                     )));
    7071            0 :                 }
    7072              :             }
    7073              : 
    7074            0 :             (expect_nodes, expect_shards)
    7075              :         };
    7076              : 
    7077            0 :         let mut nodes = self.persistence.list_nodes().await?;
    7078            0 :         expect_nodes.sort_by_key(|n| n.node_id);
    7079            0 :         nodes.sort_by_key(|n| n.node_id);
    7080              : 
    7081              :         // Errors relating to nodes are deferred so that we don't skip the shard checks below if we have a node error
    7082            0 :         let node_result = if nodes != expect_nodes {
    7083            0 :             tracing::error!("Consistency check failed on nodes.");
    7084            0 :             tracing::error!(
    7085            0 :                 "Nodes in memory: {}",
    7086            0 :                 serde_json::to_string(&expect_nodes)
    7087            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    7088              :             );
    7089            0 :             tracing::error!(
    7090            0 :                 "Nodes in database: {}",
    7091            0 :                 serde_json::to_string(&nodes)
    7092            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    7093              :             );
    7094            0 :             Err(ApiError::InternalServerError(anyhow::anyhow!(
    7095            0 :                 "Node consistency failure"
    7096            0 :             )))
    7097              :         } else {
    7098            0 :             Ok(())
    7099              :         };
    7100              : 
    7101            0 :         let mut persistent_shards = self.persistence.load_active_tenant_shards().await?;
    7102            0 :         persistent_shards
    7103            0 :             .sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    7104              : 
    7105            0 :         expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    7106              : 
    7107              :         // Because JSON contents of persistent tenants might disagree with the fields in current `TenantConfig`
    7108              :         // definition, we will do an encode/decode cycle to ensure any legacy fields are dropped and any new
    7109              :         // fields are added, before doing a comparison.
    7110            0 :         for tsp in &mut persistent_shards {
    7111            0 :             let config: TenantConfig = serde_json::from_str(&tsp.config)
    7112            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?;
    7113            0 :             tsp.config = serde_json::to_string(&config).expect("Encoding config is infallible");
    7114              :         }
    7115              : 
    7116            0 :         if persistent_shards != expect_shards {
    7117            0 :             tracing::error!("Consistency check failed on shards.");
    7118              : 
    7119            0 :             tracing::error!(
    7120            0 :                 "Shards in memory: {}",
    7121            0 :                 serde_json::to_string(&expect_shards)
    7122            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    7123              :             );
    7124            0 :             tracing::error!(
    7125            0 :                 "Shards in database: {}",
    7126            0 :                 serde_json::to_string(&persistent_shards)
    7127            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    7128              :             );
    7129              : 
    7130              :             // The total dump log lines above are useful in testing but in the field grafana will
    7131              :             // usually just drop them because they're so large. So we also do some explicit logging
    7132              :             // of just the diffs.
    7133            0 :             let persistent_shards = persistent_shards
    7134            0 :                 .into_iter()
    7135            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    7136            0 :                 .collect::<HashMap<_, _>>();
    7137            0 :             let expect_shards = expect_shards
    7138            0 :                 .into_iter()
    7139            0 :                 .map(|tsp| (tsp.get_tenant_shard_id().unwrap(), tsp))
    7140            0 :                 .collect::<HashMap<_, _>>();
    7141            0 :             for (tenant_shard_id, persistent_tsp) in &persistent_shards {
    7142            0 :                 match expect_shards.get(tenant_shard_id) {
    7143              :                     None => {
    7144            0 :                         tracing::error!(
    7145            0 :                             "Shard {} found in database but not in memory",
    7146              :                             tenant_shard_id
    7147              :                         );
    7148              :                     }
    7149            0 :                     Some(expect_tsp) => {
    7150            0 :                         if expect_tsp != persistent_tsp {
    7151            0 :                             tracing::error!(
    7152            0 :                                 "Shard {} is inconsistent.  In memory: {}, database has: {}",
    7153              :                                 tenant_shard_id,
    7154            0 :                                 serde_json::to_string(expect_tsp).unwrap(),
    7155            0 :                                 serde_json::to_string(&persistent_tsp).unwrap()
    7156              :                             );
    7157            0 :                         }
    7158              :                     }
    7159              :                 }
    7160              :             }
    7161              : 
    7162              :             // Having already logged any differences, log any shards that simply aren't present in the database
    7163            0 :             for (tenant_shard_id, memory_tsp) in &expect_shards {
    7164            0 :                 if !persistent_shards.contains_key(tenant_shard_id) {
    7165            0 :                     tracing::error!(
    7166            0 :                         "Shard {} found in memory but not in database: {}",
    7167              :                         tenant_shard_id,
    7168            0 :                         serde_json::to_string(memory_tsp)
    7169            0 :                             .map_err(|e| ApiError::InternalServerError(e.into()))?
    7170              :                     );
    7171            0 :                 }
    7172              :             }
    7173              : 
    7174            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    7175            0 :                 "Shard consistency failure"
    7176            0 :             )));
    7177            0 :         }
    7178              : 
    7179            0 :         node_result
    7180            0 :     }
    7181              : 
    7182              :     /// For debug/support: a JSON dump of the [`Scheduler`].  Returns a response so that
    7183              :     /// we don't have to make TenantShard clonable in the return path.
    7184            0 :     pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    7185            0 :         let serialized = {
    7186            0 :             let locked = self.inner.read().unwrap();
    7187            0 :             serde_json::to_string(&locked.scheduler)
    7188            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?
    7189              :         };
    7190              : 
    7191            0 :         hyper::Response::builder()
    7192            0 :             .status(hyper::StatusCode::OK)
    7193            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    7194            0 :             .body(hyper::Body::from(serialized))
    7195            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    7196            0 :     }
    7197              : 
    7198              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    7199              :     /// detaching or deleting it on pageservers.  We do not try and re-schedule any
    7200              :     /// tenants that were on this node.
    7201            0 :     pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
    7202            0 :         self.persistence.set_tombstone(node_id).await?;
    7203              : 
    7204            0 :         let mut locked = self.inner.write().unwrap();
    7205              : 
    7206            0 :         for shard in locked.tenants.values_mut() {
    7207            0 :             shard.deref_node(node_id);
    7208            0 :             shard.observed.locations.remove(&node_id);
    7209            0 :         }
    7210              : 
    7211            0 :         let mut nodes = (*locked.nodes).clone();
    7212            0 :         nodes.remove(&node_id);
    7213            0 :         locked.nodes = Arc::new(nodes);
    7214            0 :         metrics::METRICS_REGISTRY
    7215            0 :             .metrics_group
    7216            0 :             .storage_controller_pageserver_nodes
    7217            0 :             .set(locked.nodes.len() as i64);
    7218            0 :         metrics::METRICS_REGISTRY
    7219            0 :             .metrics_group
    7220            0 :             .storage_controller_https_pageserver_nodes
    7221            0 :             .set(locked.nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7222              : 
    7223            0 :         locked.scheduler.node_remove(node_id);
    7224              : 
    7225            0 :         Ok(())
    7226            0 :     }
    7227              : 
    7228              :     /// If a node has any work on it, it will be rescheduled: this is "clean" in the sense
    7229              :     /// that we don't leave any bad state behind in the storage controller, but unclean
    7230              :     /// in the sense that we are not carefully draining the node.
    7231            0 :     pub(crate) async fn node_delete_old(&self, node_id: NodeId) -> Result<(), ApiError> {
    7232            0 :         let _node_lock =
    7233            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Delete).await;
    7234              : 
    7235              :         // 1. Atomically update in-memory state:
    7236              :         //    - set the scheduling state to Pause to make subsequent scheduling ops skip it
    7237              :         //    - update shards' intents to exclude the node, and reschedule any shards whose intents we modified.
    7238              :         //    - drop the node from the main nodes map, so that when running reconciles complete they do not
    7239              :         //      re-insert references to this node into the ObservedState of shards
    7240              :         //    - drop the node from the scheduler
    7241              :         {
    7242            0 :             let mut locked = self.inner.write().unwrap();
    7243            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    7244              : 
    7245              :             {
    7246            0 :                 let mut nodes_mut = (*nodes).deref().clone();
    7247            0 :                 match nodes_mut.get_mut(&node_id) {
    7248            0 :                     Some(node) => {
    7249            0 :                         // We do not bother setting this in the database, because we're about to delete the row anyway, and
    7250            0 :                         // if we crash it would not be desirable to leave the node paused after a restart.
    7251            0 :                         node.set_scheduling(NodeSchedulingPolicy::Pause);
    7252            0 :                     }
    7253              :                     None => {
    7254            0 :                         tracing::info!(
    7255            0 :                             "Node not found: presuming this is a retry and returning success"
    7256              :                         );
    7257            0 :                         return Ok(());
    7258              :                     }
    7259              :                 }
    7260              : 
    7261            0 :                 *nodes = Arc::new(nodes_mut);
    7262              :             }
    7263              : 
    7264            0 :             for (_tenant_id, mut schedule_context, shards) in
    7265            0 :                 TenantShardExclusiveIterator::new(tenants, ScheduleMode::Normal)
    7266              :             {
    7267            0 :                 for shard in shards {
    7268            0 :                     if shard.deref_node(node_id) {
    7269            0 :                         if let Err(e) = shard.schedule(scheduler, &mut schedule_context) {
    7270              :                             // TODO: implement force flag to remove a node even if we can't reschedule
    7271              :                             // a tenant
    7272            0 :                             tracing::error!(
    7273            0 :                                 "Refusing to delete node, shard {} can't be rescheduled: {e}",
    7274              :                                 shard.tenant_shard_id
    7275              :                             );
    7276            0 :                             return Err(e.into());
    7277              :                         } else {
    7278            0 :                             tracing::info!(
    7279            0 :                                 "Rescheduled shard {} away from node during deletion",
    7280              :                                 shard.tenant_shard_id
    7281              :                             )
    7282              :                         }
    7283              : 
    7284            0 :                         self.maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal);
    7285            0 :                     }
    7286              : 
    7287              :                     // Here we remove an existing observed location for the node we're removing, and it will
    7288              :                     // not be re-added by a reconciler's completion because we filter out removed nodes in
    7289              :                     // process_result.
    7290              :                     //
    7291              :                     // Note that we update the shard's observed state _after_ calling maybe_reconcile_shard: that
    7292              :                     // means any reconciles we spawned will know about the node we're deleting, enabling them
    7293              :                     // to do live migrations if it's still online.
    7294            0 :                     shard.observed.locations.remove(&node_id);
    7295              :                 }
    7296              :             }
    7297              : 
    7298            0 :             scheduler.node_remove(node_id);
    7299              : 
    7300              :             {
    7301            0 :                 let mut nodes_mut = (**nodes).clone();
    7302            0 :                 if let Some(mut removed_node) = nodes_mut.remove(&node_id) {
    7303            0 :                     // Ensure that any reconciler holding an Arc<> to this node will
    7304            0 :                     // drop out when trying to RPC to it (setting Offline state sets the
    7305            0 :                     // cancellation token on the Node object).
    7306            0 :                     removed_node.set_availability(NodeAvailability::Offline);
    7307            0 :                 }
    7308            0 :                 *nodes = Arc::new(nodes_mut);
    7309            0 :                 metrics::METRICS_REGISTRY
    7310            0 :                     .metrics_group
    7311            0 :                     .storage_controller_pageserver_nodes
    7312            0 :                     .set(nodes.len() as i64);
    7313            0 :                 metrics::METRICS_REGISTRY
    7314            0 :                     .metrics_group
    7315            0 :                     .storage_controller_https_pageserver_nodes
    7316            0 :                     .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7317              :             }
    7318              :         }
    7319              : 
    7320              :         // Note: some `generation_pageserver` columns on tenant shards in the database may still refer to
    7321              :         // the removed node, as this column means "The pageserver to which this generation was issued", and
    7322              :         // their generations won't get updated until the reconcilers moving them away from this node complete.
    7323              :         // That is safe because in Service::spawn we only use generation_pageserver if it refers to a node
    7324              :         // that exists.
    7325              : 
    7326              :         // 2. Actually delete the node from in-memory state and set tombstone to the database
    7327              :         // for preventing the node to register again.
    7328            0 :         tracing::info!("Deleting node from database");
    7329            0 :         self.persistence.set_tombstone(node_id).await?;
    7330              : 
    7331            0 :         Ok(())
    7332            0 :     }
    7333              : 
    7334            0 :     pub(crate) async fn delete_node(
    7335            0 :         self: &Arc<Self>,
    7336            0 :         node_id: NodeId,
    7337            0 :         policy_on_start: NodeSchedulingPolicy,
    7338            0 :         cancel: CancellationToken,
    7339            0 :     ) -> Result<(), OperationError> {
    7340            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal).build();
    7341              : 
    7342            0 :         let mut waiters: Vec<ReconcilerWaiter> = Vec::new();
    7343            0 :         let mut tid_iter = create_shared_shard_iterator(self.clone());
    7344              : 
    7345            0 :         while !tid_iter.finished() {
    7346            0 :             if cancel.is_cancelled() {
    7347            0 :                 match self
    7348            0 :                     .node_configure(node_id, None, Some(policy_on_start))
    7349            0 :                     .await
    7350              :                 {
    7351            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    7352            0 :                     Err(err) => {
    7353            0 :                         return Err(OperationError::FinalizeError(
    7354            0 :                             format!(
    7355            0 :                                 "Failed to finalise delete cancel of {} by setting scheduling policy to {}: {}",
    7356            0 :                                 node_id, String::from(policy_on_start), err
    7357            0 :                             )
    7358            0 :                             .into(),
    7359            0 :                         ));
    7360              :                     }
    7361              :                 }
    7362            0 :             }
    7363              : 
    7364            0 :             operation_utils::validate_node_state(
    7365            0 :                 &node_id,
    7366            0 :                 self.inner.read().unwrap().nodes.clone(),
    7367            0 :                 NodeSchedulingPolicy::Deleting,
    7368            0 :             )?;
    7369              : 
    7370            0 :             while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    7371            0 :                 let tid = match tid_iter.next() {
    7372            0 :                     Some(tid) => tid,
    7373              :                     None => {
    7374            0 :                         break;
    7375              :                     }
    7376              :                 };
    7377              : 
    7378            0 :                 let mut locked = self.inner.write().unwrap();
    7379            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    7380              : 
    7381              :                 // Calculate a schedule context here to avoid borrow checker issues.
    7382            0 :                 let mut schedule_context = ScheduleContext::default();
    7383            0 :                 for (_, shard) in tenants.range(TenantShardId::tenant_range(tid.tenant_id)) {
    7384            0 :                     schedule_context.avoid(&shard.intent.all_pageservers());
    7385            0 :                 }
    7386              : 
    7387            0 :                 let tenant_shard = match tenants.get_mut(&tid) {
    7388            0 :                     Some(tenant_shard) => tenant_shard,
    7389              :                     None => {
    7390              :                         // Tenant shard was deleted by another operation. Skip it.
    7391            0 :                         continue;
    7392              :                     }
    7393              :                 };
    7394              : 
    7395            0 :                 match tenant_shard.get_scheduling_policy() {
    7396            0 :                     ShardSchedulingPolicy::Active | ShardSchedulingPolicy::Essential => {
    7397            0 :                         // A migration during delete is classed as 'essential' because it is required to
    7398            0 :                         // uphold our availability goals for the tenant: this shard is elegible for migration.
    7399            0 :                     }
    7400              :                     ShardSchedulingPolicy::Pause | ShardSchedulingPolicy::Stop => {
    7401              :                         // If we have been asked to avoid rescheduling this shard, then do not migrate it during a deletion
    7402            0 :                         tracing::warn!(
    7403            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    7404            0 :                             "Skip migration during deletion because shard scheduling policy {:?} disallows it",
    7405            0 :                             tenant_shard.get_scheduling_policy(),
    7406              :                         );
    7407            0 :                         continue;
    7408              :                     }
    7409              :                 }
    7410              : 
    7411            0 :                 if tenant_shard.deref_node(node_id) {
    7412            0 :                     if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
    7413            0 :                         tracing::error!(
    7414            0 :                             "Refusing to delete node, shard {} can't be rescheduled: {e}",
    7415              :                             tenant_shard.tenant_shard_id
    7416              :                         );
    7417            0 :                         return Err(OperationError::ImpossibleConstraint(e.to_string().into()));
    7418              :                     } else {
    7419            0 :                         tracing::info!(
    7420            0 :                             "Rescheduled shard {} away from node during deletion",
    7421              :                             tenant_shard.tenant_shard_id
    7422              :                         )
    7423              :                     }
    7424              : 
    7425            0 :                     let waiter = self.maybe_configured_reconcile_shard(
    7426            0 :                         tenant_shard,
    7427            0 :                         nodes,
    7428            0 :                         reconciler_config,
    7429            0 :                     );
    7430            0 :                     if let Some(some) = waiter {
    7431            0 :                         waiters.push(some);
    7432            0 :                     }
    7433            0 :                 }
    7434              :             }
    7435              : 
    7436            0 :             waiters = self
    7437            0 :                 .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT)
    7438            0 :                 .await;
    7439              : 
    7440            0 :             failpoint_support::sleep_millis_async!("sleepy-delete-loop", &cancel);
    7441              :         }
    7442              : 
    7443            0 :         while !waiters.is_empty() {
    7444            0 :             if cancel.is_cancelled() {
    7445            0 :                 match self
    7446            0 :                     .node_configure(node_id, None, Some(policy_on_start))
    7447            0 :                     .await
    7448              :                 {
    7449            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    7450            0 :                     Err(err) => {
    7451            0 :                         return Err(OperationError::FinalizeError(
    7452            0 :                             format!(
    7453            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to {}: {}",
    7454            0 :                                 node_id, String::from(policy_on_start), err
    7455            0 :                             )
    7456            0 :                             .into(),
    7457            0 :                         ));
    7458              :                     }
    7459              :                 }
    7460            0 :             }
    7461              : 
    7462            0 :             tracing::info!("Awaiting {} pending delete reconciliations", waiters.len());
    7463              : 
    7464            0 :             waiters = self
    7465            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    7466            0 :                 .await;
    7467              :         }
    7468              : 
    7469            0 :         self.persistence
    7470            0 :             .set_tombstone(node_id)
    7471            0 :             .await
    7472            0 :             .map_err(|e| OperationError::FinalizeError(e.to_string().into()))?;
    7473              : 
    7474              :         {
    7475            0 :             let mut locked = self.inner.write().unwrap();
    7476            0 :             let (nodes, _, scheduler) = locked.parts_mut();
    7477              : 
    7478            0 :             scheduler.node_remove(node_id);
    7479              : 
    7480            0 :             let mut nodes_mut = (**nodes).clone();
    7481            0 :             if let Some(mut removed_node) = nodes_mut.remove(&node_id) {
    7482            0 :                 // Ensure that any reconciler holding an Arc<> to this node will
    7483            0 :                 // drop out when trying to RPC to it (setting Offline state sets the
    7484            0 :                 // cancellation token on the Node object).
    7485            0 :                 removed_node.set_availability(NodeAvailability::Offline);
    7486            0 :             }
    7487            0 :             *nodes = Arc::new(nodes_mut);
    7488              : 
    7489            0 :             metrics::METRICS_REGISTRY
    7490            0 :                 .metrics_group
    7491            0 :                 .storage_controller_pageserver_nodes
    7492            0 :                 .set(nodes.len() as i64);
    7493            0 :             metrics::METRICS_REGISTRY
    7494            0 :                 .metrics_group
    7495            0 :                 .storage_controller_https_pageserver_nodes
    7496            0 :                 .set(nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7497              :         }
    7498              : 
    7499            0 :         Ok(())
    7500            0 :     }
    7501              : 
    7502            0 :     pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
    7503            0 :         let nodes = {
    7504            0 :             self.inner
    7505            0 :                 .read()
    7506            0 :                 .unwrap()
    7507            0 :                 .nodes
    7508            0 :                 .values()
    7509            0 :                 .cloned()
    7510            0 :                 .collect::<Vec<_>>()
    7511              :         };
    7512              : 
    7513            0 :         Ok(nodes)
    7514            0 :     }
    7515              : 
    7516            0 :     pub(crate) async fn tombstone_list(&self) -> Result<Vec<Node>, ApiError> {
    7517            0 :         self.persistence
    7518            0 :             .list_tombstones()
    7519            0 :             .await?
    7520            0 :             .into_iter()
    7521            0 :             .map(|np| Node::from_persistent(np, false))
    7522            0 :             .collect::<Result<Vec<_>, _>>()
    7523            0 :             .map_err(ApiError::InternalServerError)
    7524            0 :     }
    7525              : 
    7526            0 :     pub(crate) async fn tombstone_delete(&self, node_id: NodeId) -> Result<(), ApiError> {
    7527            0 :         let _node_lock = trace_exclusive_lock(
    7528            0 :             &self.node_op_locks,
    7529            0 :             node_id,
    7530            0 :             NodeOperations::DeleteTombstone,
    7531            0 :         )
    7532            0 :         .await;
    7533              : 
    7534            0 :         if matches!(self.get_node(node_id).await, Err(ApiError::NotFound(_))) {
    7535            0 :             self.persistence.delete_node(node_id).await?;
    7536            0 :             Ok(())
    7537              :         } else {
    7538            0 :             Err(ApiError::Conflict(format!(
    7539            0 :                 "Node {node_id} is in use, consider using tombstone API first"
    7540            0 :             )))
    7541              :         }
    7542            0 :     }
    7543              : 
    7544            0 :     pub(crate) async fn get_node(&self, node_id: NodeId) -> Result<Node, ApiError> {
    7545            0 :         self.inner
    7546            0 :             .read()
    7547            0 :             .unwrap()
    7548            0 :             .nodes
    7549            0 :             .get(&node_id)
    7550            0 :             .cloned()
    7551            0 :             .ok_or(ApiError::NotFound(
    7552            0 :                 format!("Node {node_id} not registered").into(),
    7553            0 :             ))
    7554            0 :     }
    7555              : 
    7556            0 :     pub(crate) async fn get_node_shards(
    7557            0 :         &self,
    7558            0 :         node_id: NodeId,
    7559            0 :     ) -> Result<NodeShardResponse, ApiError> {
    7560            0 :         let locked = self.inner.read().unwrap();
    7561            0 :         let mut shards = Vec::new();
    7562            0 :         for (tid, tenant) in locked.tenants.iter() {
    7563            0 :             let is_intended_secondary = match (
    7564            0 :                 tenant.intent.get_attached() == &Some(node_id),
    7565            0 :                 tenant.intent.get_secondary().contains(&node_id),
    7566            0 :             ) {
    7567              :                 (true, true) => {
    7568            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    7569            0 :                         "{} attached as primary+secondary on the same node",
    7570            0 :                         tid
    7571            0 :                     )));
    7572              :                 }
    7573            0 :                 (true, false) => Some(false),
    7574            0 :                 (false, true) => Some(true),
    7575            0 :                 (false, false) => None,
    7576              :             };
    7577            0 :             let is_observed_secondary = if let Some(ObservedStateLocation { conf: Some(conf) }) =
    7578            0 :                 tenant.observed.locations.get(&node_id)
    7579              :             {
    7580            0 :                 Some(conf.secondary_conf.is_some())
    7581              :             } else {
    7582            0 :                 None
    7583              :             };
    7584            0 :             if is_intended_secondary.is_some() || is_observed_secondary.is_some() {
    7585            0 :                 shards.push(NodeShard {
    7586            0 :                     tenant_shard_id: *tid,
    7587            0 :                     is_intended_secondary,
    7588            0 :                     is_observed_secondary,
    7589            0 :                 });
    7590            0 :             }
    7591              :         }
    7592            0 :         Ok(NodeShardResponse { node_id, shards })
    7593            0 :     }
    7594              : 
    7595            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
    7596            0 :         self.persistence.get_leader().await
    7597            0 :     }
    7598              : 
    7599            0 :     pub(crate) async fn node_register(
    7600            0 :         &self,
    7601            0 :         register_req: NodeRegisterRequest,
    7602            0 :     ) -> Result<(), ApiError> {
    7603            0 :         let _node_lock = trace_exclusive_lock(
    7604            0 :             &self.node_op_locks,
    7605            0 :             register_req.node_id,
    7606            0 :             NodeOperations::Register,
    7607            0 :         )
    7608            0 :         .await;
    7609              : 
    7610              :         #[derive(PartialEq)]
    7611              :         enum RegistrationStatus {
    7612              :             UpToDate,
    7613              :             NeedUpdate,
    7614              :             Mismatched,
    7615              :             New,
    7616              :         }
    7617              : 
    7618            0 :         let registration_status = {
    7619            0 :             let locked = self.inner.read().unwrap();
    7620            0 :             if let Some(node) = locked.nodes.get(&register_req.node_id) {
    7621            0 :                 if node.registration_match(&register_req) {
    7622            0 :                     if node.need_update(&register_req) {
    7623            0 :                         RegistrationStatus::NeedUpdate
    7624              :                     } else {
    7625            0 :                         RegistrationStatus::UpToDate
    7626              :                     }
    7627              :                 } else {
    7628            0 :                     RegistrationStatus::Mismatched
    7629              :                 }
    7630              :             } else {
    7631            0 :                 RegistrationStatus::New
    7632              :             }
    7633              :         };
    7634              : 
    7635            0 :         match registration_status {
    7636              :             RegistrationStatus::UpToDate => {
    7637            0 :                 tracing::info!(
    7638            0 :                     "Node {} re-registered with matching address and is up to date",
    7639              :                     register_req.node_id
    7640              :                 );
    7641              : 
    7642            0 :                 return Ok(());
    7643              :             }
    7644              :             RegistrationStatus::Mismatched => {
    7645              :                 // TODO: decide if we want to allow modifying node addresses without removing and re-adding
    7646              :                 // the node.  Safest/simplest thing is to refuse it, and usually we deploy with
    7647              :                 // a fixed address through the lifetime of a node.
    7648            0 :                 tracing::warn!(
    7649            0 :                     "Node {} tried to register with different address",
    7650              :                     register_req.node_id
    7651              :                 );
    7652            0 :                 return Err(ApiError::Conflict(
    7653            0 :                     "Node is already registered with different address".to_string(),
    7654            0 :                 ));
    7655              :             }
    7656            0 :             RegistrationStatus::New | RegistrationStatus::NeedUpdate => {
    7657            0 :                 // fallthrough
    7658            0 :             }
    7659              :         }
    7660              : 
    7661              :         // We do not require that a node is actually online when registered (it will start life
    7662              :         // with it's  availability set to Offline), but we _do_ require that its DNS record exists. We're
    7663              :         // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
    7664              :         // that register themselves with a broken DNS config.  We check only the HTTP hostname, because
    7665              :         // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
    7666            0 :         if tokio::net::lookup_host(format!(
    7667            0 :             "{}:{}",
    7668              :             register_req.listen_http_addr, register_req.listen_http_port
    7669              :         ))
    7670            0 :         .await
    7671            0 :         .is_err()
    7672              :         {
    7673              :             // If we have a transient DNS issue, it's up to the caller to retry their registration.  Because
    7674              :             // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
    7675              :             // we return a soft 503 error, to encourage callers to retry past transient issues.
    7676            0 :             return Err(ApiError::ResourceUnavailable(
    7677            0 :                 format!(
    7678            0 :                     "Node {} tried to register with unknown DNS name '{}'",
    7679            0 :                     register_req.node_id, register_req.listen_http_addr
    7680            0 :                 )
    7681            0 :                 .into(),
    7682            0 :             ));
    7683            0 :         }
    7684              : 
    7685            0 :         if self.config.use_https_pageserver_api && register_req.listen_https_port.is_none() {
    7686            0 :             return Err(ApiError::PreconditionFailed(
    7687            0 :                 format!(
    7688            0 :                     "Node {} has no https port, but use_https is enabled",
    7689            0 :                     register_req.node_id
    7690            0 :                 )
    7691            0 :                 .into(),
    7692            0 :             ));
    7693            0 :         }
    7694              : 
    7695            0 :         if register_req.listen_grpc_addr.is_some() != register_req.listen_grpc_port.is_some() {
    7696            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    7697            0 :                 "must specify both gRPC address and port"
    7698            0 :             )));
    7699            0 :         }
    7700              : 
    7701              :         // Ordering: we must persist the new node _before_ adding it to in-memory state.
    7702              :         // This ensures that before we use it for anything or expose it via any external
    7703              :         // API, it is guaranteed to be available after a restart.
    7704            0 :         let new_node = Node::new(
    7705            0 :             register_req.node_id,
    7706            0 :             register_req.listen_http_addr,
    7707            0 :             register_req.listen_http_port,
    7708            0 :             register_req.listen_https_port,
    7709            0 :             register_req.listen_pg_addr,
    7710            0 :             register_req.listen_pg_port,
    7711            0 :             register_req.listen_grpc_addr,
    7712            0 :             register_req.listen_grpc_port,
    7713            0 :             register_req.availability_zone_id.clone(),
    7714            0 :             self.config.use_https_pageserver_api,
    7715              :         );
    7716            0 :         let new_node = match new_node {
    7717            0 :             Ok(new_node) => new_node,
    7718            0 :             Err(error) => return Err(ApiError::InternalServerError(error)),
    7719              :         };
    7720              : 
    7721            0 :         match registration_status {
    7722              :             RegistrationStatus::New => {
    7723            0 :                 self.persistence.insert_node(&new_node).await.map_err(|e| {
    7724            0 :                     if matches!(
    7725            0 :                         e,
    7726              :                         crate::persistence::DatabaseError::Query(
    7727              :                             diesel::result::Error::DatabaseError(
    7728              :                                 diesel::result::DatabaseErrorKind::UniqueViolation,
    7729              :                                 _,
    7730              :                             )
    7731              :                         )
    7732              :                     ) {
    7733              :                         // The node can be deleted by tombstone API, and not show up in the list of nodes.
    7734              :                         // If you see this error, check tombstones first.
    7735            0 :                         ApiError::Conflict(format!("Node {} is already exists", new_node.get_id()))
    7736              :                     } else {
    7737            0 :                         ApiError::from(e)
    7738              :                     }
    7739            0 :                 })?;
    7740              :             }
    7741              :             RegistrationStatus::NeedUpdate => {
    7742            0 :                 self.persistence
    7743            0 :                     .update_node_on_registration(
    7744            0 :                         register_req.node_id,
    7745            0 :                         register_req.listen_https_port,
    7746            0 :                     )
    7747            0 :                     .await?
    7748              :             }
    7749            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    7750              :         }
    7751              : 
    7752            0 :         let mut locked = self.inner.write().unwrap();
    7753            0 :         let mut new_nodes = (*locked.nodes).clone();
    7754              : 
    7755            0 :         locked.scheduler.node_upsert(&new_node);
    7756            0 :         new_nodes.insert(register_req.node_id, new_node);
    7757              : 
    7758            0 :         locked.nodes = Arc::new(new_nodes);
    7759              : 
    7760            0 :         metrics::METRICS_REGISTRY
    7761            0 :             .metrics_group
    7762            0 :             .storage_controller_pageserver_nodes
    7763            0 :             .set(locked.nodes.len() as i64);
    7764            0 :         metrics::METRICS_REGISTRY
    7765            0 :             .metrics_group
    7766            0 :             .storage_controller_https_pageserver_nodes
    7767            0 :             .set(locked.nodes.values().filter(|n| n.has_https_port()).count() as i64);
    7768              : 
    7769            0 :         match registration_status {
    7770              :             RegistrationStatus::New => {
    7771            0 :                 tracing::info!(
    7772            0 :                     "Registered pageserver {} ({}), now have {} pageservers",
    7773              :                     register_req.node_id,
    7774              :                     register_req.availability_zone_id,
    7775            0 :                     locked.nodes.len()
    7776              :                 );
    7777              :             }
    7778              :             RegistrationStatus::NeedUpdate => {
    7779            0 :                 tracing::info!(
    7780            0 :                     "Re-registered and updated node {} ({})",
    7781              :                     register_req.node_id,
    7782              :                     register_req.availability_zone_id,
    7783              :                 );
    7784              :             }
    7785            0 :             _ => unreachable!("Other statuses have been processed earlier"),
    7786              :         }
    7787            0 :         Ok(())
    7788            0 :     }
    7789              : 
    7790              :     /// Configure in-memory and persistent state of a node as requested
    7791              :     ///
    7792              :     /// Note that this function does not trigger any immediate side effects in response
    7793              :     /// to the changes. That part is handled by [`Self::handle_node_availability_transition`].
    7794            0 :     async fn node_state_configure(
    7795            0 :         &self,
    7796            0 :         node_id: NodeId,
    7797            0 :         availability: Option<NodeAvailability>,
    7798            0 :         scheduling: Option<NodeSchedulingPolicy>,
    7799            0 :         node_lock: &TracingExclusiveGuard<NodeOperations>,
    7800            0 :     ) -> Result<AvailabilityTransition, ApiError> {
    7801            0 :         if let Some(scheduling) = scheduling {
    7802              :             // Scheduling is a persistent part of Node: we must write updates to the database before
    7803              :             // applying them in memory
    7804            0 :             self.persistence
    7805            0 :                 .update_node_scheduling_policy(node_id, scheduling)
    7806            0 :                 .await?;
    7807            0 :         }
    7808              : 
    7809              :         // If we're activating a node, then before setting it active we must reconcile any shard locations
    7810              :         // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
    7811              :         // by calling [`Self::node_activate_reconcile`]
    7812              :         //
    7813              :         // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
    7814              :         // nothing else can mutate its availability while we run.
    7815            0 :         let availability_transition = if let Some(input_availability) = availability.as_ref() {
    7816            0 :             let (activate_node, availability_transition) = {
    7817            0 :                 let locked = self.inner.read().unwrap();
    7818            0 :                 let Some(node) = locked.nodes.get(&node_id) else {
    7819            0 :                     return Err(ApiError::NotFound(
    7820            0 :                         anyhow::anyhow!("Node {} not registered", node_id).into(),
    7821            0 :                     ));
    7822              :                 };
    7823              : 
    7824            0 :                 (
    7825            0 :                     node.clone(),
    7826            0 :                     node.get_availability_transition(input_availability),
    7827            0 :                 )
    7828              :             };
    7829              : 
    7830            0 :             if matches!(availability_transition, AvailabilityTransition::ToActive) {
    7831            0 :                 self.node_activate_reconcile(activate_node, node_lock)
    7832            0 :                     .await?;
    7833            0 :             }
    7834            0 :             availability_transition
    7835              :         } else {
    7836            0 :             AvailabilityTransition::Unchanged
    7837              :         };
    7838              : 
    7839              :         // Apply changes from the request to our in-memory state for the Node
    7840            0 :         let mut locked = self.inner.write().unwrap();
    7841            0 :         let (nodes, _tenants, scheduler) = locked.parts_mut();
    7842              : 
    7843            0 :         let mut new_nodes = (**nodes).clone();
    7844              : 
    7845            0 :         let Some(node) = new_nodes.get_mut(&node_id) else {
    7846            0 :             return Err(ApiError::NotFound(
    7847            0 :                 anyhow::anyhow!("Node not registered").into(),
    7848            0 :             ));
    7849              :         };
    7850              : 
    7851            0 :         if let Some(availability) = availability {
    7852            0 :             node.set_availability(availability);
    7853            0 :         }
    7854              : 
    7855            0 :         if let Some(scheduling) = scheduling {
    7856            0 :             node.set_scheduling(scheduling);
    7857            0 :         }
    7858              : 
    7859              :         // Update the scheduler, in case the elegibility of the node for new shards has changed
    7860            0 :         scheduler.node_upsert(node);
    7861              : 
    7862            0 :         let new_nodes = Arc::new(new_nodes);
    7863            0 :         locked.nodes = new_nodes;
    7864              : 
    7865            0 :         Ok(availability_transition)
    7866            0 :     }
    7867              : 
    7868              :     /// Handle availability transition of one node
    7869              :     ///
    7870              :     /// Note that you should first call [`Self::node_state_configure`] to update
    7871              :     /// the in-memory state referencing that node. If you need to handle more than one transition
    7872              :     /// consider using [`Self::handle_node_availability_transitions`].
    7873            0 :     async fn handle_node_availability_transition(
    7874            0 :         &self,
    7875            0 :         node_id: NodeId,
    7876            0 :         transition: AvailabilityTransition,
    7877            0 :         _node_lock: &TracingExclusiveGuard<NodeOperations>,
    7878            0 :     ) -> Result<(), ApiError> {
    7879              :         // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
    7880            0 :         match transition {
    7881              :             AvailabilityTransition::ToOffline => {
    7882            0 :                 tracing::info!("Node {} transition to offline", node_id);
    7883              : 
    7884            0 :                 let mut locked = self.inner.write().unwrap();
    7885            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    7886              : 
    7887            0 :                 let mut tenants_affected: usize = 0;
    7888              : 
    7889            0 :                 for (_tenant_id, mut schedule_context, shards) in
    7890            0 :                     TenantShardExclusiveIterator::new(tenants, ScheduleMode::Normal)
    7891              :                 {
    7892            0 :                     for tenant_shard in shards {
    7893            0 :                         let tenant_shard_id = tenant_shard.tenant_shard_id;
    7894            0 :                         if let Some(observed_loc) =
    7895            0 :                             tenant_shard.observed.locations.get_mut(&node_id)
    7896            0 :                         {
    7897            0 :                             // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
    7898            0 :                             // not assume our knowledge of the node's configuration is accurate until it comes back online
    7899            0 :                             observed_loc.conf = None;
    7900            0 :                         }
    7901              : 
    7902            0 :                         if nodes.len() == 1 {
    7903              :                             // Special case for single-node cluster: there is no point trying to reschedule
    7904              :                             // any tenant shards: avoid doing so, in order to avoid spewing warnings about
    7905              :                             // failures to schedule them.
    7906            0 :                             continue;
    7907            0 :                         }
    7908              : 
    7909            0 :                         if !nodes
    7910            0 :                             .values()
    7911            0 :                             .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    7912              :                         {
    7913              :                             // Special case for when all nodes are unavailable and/or unschedulable: there is no point
    7914              :                             // trying to reschedule since there's nowhere else to go. Without this
    7915              :                             // branch we incorrectly detach tenants in response to node unavailability.
    7916            0 :                             continue;
    7917            0 :                         }
    7918              : 
    7919            0 :                         if tenant_shard.intent.demote_attached(scheduler, node_id) {
    7920            0 :                             tenant_shard.sequence = tenant_shard.sequence.next();
    7921              : 
    7922            0 :                             match tenant_shard.schedule(scheduler, &mut schedule_context) {
    7923            0 :                                 Err(e) => {
    7924              :                                     // It is possible that some tenants will become unschedulable when too many pageservers
    7925              :                                     // go offline: in this case there isn't much we can do other than make the issue observable.
    7926              :                                     // TODO: give TenantShard a scheduling error attribute to be queried later.
    7927            0 :                                     tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
    7928              :                                 }
    7929              :                                 Ok(()) => {
    7930            0 :                                     if self
    7931            0 :                                         .maybe_reconcile_shard(
    7932            0 :                                             tenant_shard,
    7933            0 :                                             nodes,
    7934            0 :                                             ReconcilerPriority::Normal,
    7935            0 :                                         )
    7936            0 :                                         .is_some()
    7937            0 :                                     {
    7938            0 :                                         tenants_affected += 1;
    7939            0 :                                     };
    7940              :                                 }
    7941              :                             }
    7942            0 :                         }
    7943              :                     }
    7944              :                 }
    7945            0 :                 tracing::info!(
    7946            0 :                     "Launched {} reconciler tasks for tenants affected by node {} going offline",
    7947              :                     tenants_affected,
    7948              :                     node_id
    7949              :                 )
    7950              :             }
    7951              :             AvailabilityTransition::ToActive => {
    7952            0 :                 tracing::info!("Node {} transition to active", node_id);
    7953              : 
    7954            0 :                 let mut locked = self.inner.write().unwrap();
    7955            0 :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    7956              : 
    7957              :                 // When a node comes back online, we must reconcile any tenant that has a None observed
    7958              :                 // location on the node.
    7959            0 :                 for tenant_shard in tenants.values_mut() {
    7960              :                     // If a reconciliation is already in progress, rely on the previous scheduling
    7961              :                     // decision and skip triggering a new reconciliation.
    7962            0 :                     if tenant_shard.reconciler.is_some() {
    7963            0 :                         continue;
    7964            0 :                     }
    7965              : 
    7966            0 :                     if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
    7967            0 :                         if observed_loc.conf.is_none() {
    7968            0 :                             self.maybe_reconcile_shard(
    7969            0 :                                 tenant_shard,
    7970            0 :                                 nodes,
    7971            0 :                                 ReconcilerPriority::Normal,
    7972            0 :                             );
    7973            0 :                         }
    7974            0 :                     }
    7975              :                 }
    7976              : 
    7977              :                 // TODO: in the background, we should balance work back onto this pageserver
    7978              :             }
    7979              :             // No action required for the intermediate unavailable state.
    7980              :             // When we transition into active or offline from the unavailable state,
    7981              :             // the correct handling above will kick in.
    7982              :             AvailabilityTransition::ToWarmingUpFromActive => {
    7983            0 :                 tracing::info!("Node {} transition to unavailable from active", node_id);
    7984              :             }
    7985              :             AvailabilityTransition::ToWarmingUpFromOffline => {
    7986            0 :                 tracing::info!("Node {} transition to unavailable from offline", node_id);
    7987              :             }
    7988              :             AvailabilityTransition::Unchanged => {
    7989            0 :                 tracing::debug!("Node {} no availability change during config", node_id);
    7990              :             }
    7991              :         }
    7992              : 
    7993            0 :         Ok(())
    7994            0 :     }
    7995              : 
    7996              :     /// Handle availability transition for multiple nodes
    7997              :     ///
    7998              :     /// Note that you should first call [`Self::node_state_configure`] for
    7999              :     /// all nodes being handled here for the handling to use fresh in-memory state.
    8000            0 :     async fn handle_node_availability_transitions(
    8001            0 :         &self,
    8002            0 :         transitions: Vec<(
    8003            0 :             NodeId,
    8004            0 :             TracingExclusiveGuard<NodeOperations>,
    8005            0 :             AvailabilityTransition,
    8006            0 :         )>,
    8007            0 :     ) -> Result<(), Vec<(NodeId, ApiError)>> {
    8008            0 :         let mut errors = Vec::default();
    8009            0 :         for (node_id, node_lock, transition) in transitions {
    8010            0 :             let res = self
    8011            0 :                 .handle_node_availability_transition(node_id, transition, &node_lock)
    8012            0 :                 .await;
    8013            0 :             if let Err(err) = res {
    8014            0 :                 errors.push((node_id, err));
    8015            0 :             }
    8016              :         }
    8017              : 
    8018            0 :         if errors.is_empty() {
    8019            0 :             Ok(())
    8020              :         } else {
    8021            0 :             Err(errors)
    8022              :         }
    8023            0 :     }
    8024              : 
    8025            0 :     pub(crate) async fn node_configure(
    8026            0 :         &self,
    8027            0 :         node_id: NodeId,
    8028            0 :         availability: Option<NodeAvailability>,
    8029            0 :         scheduling: Option<NodeSchedulingPolicy>,
    8030            0 :     ) -> Result<(), ApiError> {
    8031            0 :         let node_lock =
    8032            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
    8033              : 
    8034            0 :         let transition = self
    8035            0 :             .node_state_configure(node_id, availability, scheduling, &node_lock)
    8036            0 :             .await?;
    8037            0 :         self.handle_node_availability_transition(node_id, transition, &node_lock)
    8038            0 :             .await
    8039            0 :     }
    8040              : 
    8041              :     /// Wrapper around [`Self::node_configure`] which only allows changes while there is no ongoing
    8042              :     /// operation for HTTP api.
    8043            0 :     pub(crate) async fn external_node_configure(
    8044            0 :         &self,
    8045            0 :         node_id: NodeId,
    8046            0 :         availability: Option<NodeAvailability>,
    8047            0 :         scheduling: Option<NodeSchedulingPolicy>,
    8048            0 :     ) -> Result<(), ApiError> {
    8049              :         {
    8050            0 :             let locked = self.inner.read().unwrap();
    8051            0 :             if let Some(op) = locked.ongoing_operation.as_ref().map(|op| op.operation) {
    8052            0 :                 return Err(ApiError::PreconditionFailed(
    8053            0 :                     format!("Ongoing background operation forbids configuring: {op}").into(),
    8054            0 :                 ));
    8055            0 :             }
    8056              :         }
    8057              : 
    8058            0 :         self.node_configure(node_id, availability, scheduling).await
    8059            0 :     }
    8060              : 
    8061            0 :     pub(crate) async fn start_node_delete(
    8062            0 :         self: &Arc<Self>,
    8063            0 :         node_id: NodeId,
    8064            0 :     ) -> Result<(), ApiError> {
    8065            0 :         let (ongoing_op, node_policy, schedulable_nodes_count) = {
    8066            0 :             let locked = self.inner.read().unwrap();
    8067            0 :             let nodes = &locked.nodes;
    8068            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8069            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8070            0 :             ))?;
    8071            0 :             let schedulable_nodes_count = nodes
    8072            0 :                 .iter()
    8073            0 :                 .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    8074            0 :                 .count();
    8075              : 
    8076              :             (
    8077            0 :                 locked
    8078            0 :                     .ongoing_operation
    8079            0 :                     .as_ref()
    8080            0 :                     .map(|ongoing| ongoing.operation),
    8081            0 :                 node.get_scheduling(),
    8082            0 :                 schedulable_nodes_count,
    8083              :             )
    8084              :         };
    8085              : 
    8086            0 :         if let Some(ongoing) = ongoing_op {
    8087            0 :             return Err(ApiError::PreconditionFailed(
    8088            0 :                 format!("Background operation already ongoing for node: {ongoing}").into(),
    8089            0 :             ));
    8090            0 :         }
    8091              : 
    8092            0 :         if schedulable_nodes_count == 0 {
    8093            0 :             return Err(ApiError::PreconditionFailed(
    8094            0 :                 "No other schedulable nodes to move shards".into(),
    8095            0 :             ));
    8096            0 :         }
    8097              : 
    8098            0 :         match node_policy {
    8099              :             NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => {
    8100            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Deleting))
    8101            0 :                     .await?;
    8102              : 
    8103            0 :                 let cancel = self.cancel.child_token();
    8104            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    8105            0 :                 let policy_on_start = node_policy;
    8106              : 
    8107            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    8108            0 :                     operation: Operation::Delete(Delete { node_id }),
    8109            0 :                     cancel: cancel.clone(),
    8110            0 :                 });
    8111              : 
    8112            0 :                 let span = tracing::info_span!(parent: None, "delete_node", %node_id);
    8113              : 
    8114            0 :                 tokio::task::spawn(
    8115              :                     {
    8116            0 :                         let service = self.clone();
    8117            0 :                         let cancel = cancel.clone();
    8118            0 :                         async move {
    8119            0 :                             let _gate_guard = gate_guard;
    8120              : 
    8121            0 :                             scopeguard::defer! {
    8122              :                                 let prev = service.inner.write().unwrap().ongoing_operation.take();
    8123              : 
    8124              :                                 if let Some(Operation::Delete(removed_delete)) = prev.map(|h| h.operation) {
    8125              :                                     assert_eq!(removed_delete.node_id, node_id, "We always take the same operation");
    8126              :                                 } else {
    8127              :                                     panic!("We always remove the same operation")
    8128              :                                 }
    8129              :                             }
    8130              : 
    8131            0 :                             tracing::info!("Delete background operation starting");
    8132            0 :                             let res = service
    8133            0 :                                 .delete_node(node_id, policy_on_start, cancel)
    8134            0 :                                 .await;
    8135            0 :                             match res {
    8136              :                                 Ok(()) => {
    8137            0 :                                     tracing::info!(
    8138            0 :                                         "Delete background operation completed successfully"
    8139              :                                     );
    8140              :                                 }
    8141              :                                 Err(OperationError::Cancelled) => {
    8142            0 :                                     tracing::info!("Delete background operation was cancelled");
    8143              :                                 }
    8144            0 :                                 Err(err) => {
    8145            0 :                                     tracing::error!(
    8146            0 :                                         "Delete background operation encountered: {err}"
    8147              :                                     )
    8148              :                                 }
    8149              :                             }
    8150            0 :                         }
    8151              :                     }
    8152            0 :                     .instrument(span),
    8153              :                 );
    8154              :             }
    8155              :             NodeSchedulingPolicy::Deleting => {
    8156            0 :                 return Err(ApiError::Conflict(format!(
    8157            0 :                     "Node {node_id} has delete in progress"
    8158            0 :                 )));
    8159              :             }
    8160            0 :             policy => {
    8161            0 :                 return Err(ApiError::PreconditionFailed(
    8162            0 :                     format!("Node {node_id} cannot be deleted due to {policy:?} policy").into(),
    8163            0 :                 ));
    8164              :             }
    8165              :         }
    8166              : 
    8167            0 :         Ok(())
    8168            0 :     }
    8169              : 
    8170            0 :     pub(crate) async fn cancel_node_delete(
    8171            0 :         self: &Arc<Self>,
    8172            0 :         node_id: NodeId,
    8173            0 :     ) -> Result<(), ApiError> {
    8174              :         {
    8175            0 :             let locked = self.inner.read().unwrap();
    8176            0 :             let nodes = &locked.nodes;
    8177            0 :             nodes.get(&node_id).ok_or(ApiError::NotFound(
    8178            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8179            0 :             ))?;
    8180              :         }
    8181              : 
    8182            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    8183            0 :             if let Operation::Delete(delete) = op_handler.operation {
    8184            0 :                 if delete.node_id == node_id {
    8185            0 :                     tracing::info!("Cancelling background delete operation for node {node_id}");
    8186            0 :                     op_handler.cancel.cancel();
    8187            0 :                     return Ok(());
    8188            0 :                 }
    8189            0 :             }
    8190            0 :         }
    8191              : 
    8192            0 :         Err(ApiError::PreconditionFailed(
    8193            0 :             format!("Node {node_id} has no delete in progress").into(),
    8194            0 :         ))
    8195            0 :     }
    8196              : 
    8197            0 :     pub(crate) async fn start_node_drain(
    8198            0 :         self: &Arc<Self>,
    8199            0 :         node_id: NodeId,
    8200            0 :     ) -> Result<(), ApiError> {
    8201            0 :         let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = {
    8202            0 :             let locked = self.inner.read().unwrap();
    8203            0 :             let nodes = &locked.nodes;
    8204            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8205            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8206            0 :             ))?;
    8207            0 :             let schedulable_nodes_count = nodes
    8208            0 :                 .iter()
    8209            0 :                 .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    8210            0 :                 .count();
    8211              : 
    8212              :             (
    8213            0 :                 locked
    8214            0 :                     .ongoing_operation
    8215            0 :                     .as_ref()
    8216            0 :                     .map(|ongoing| ongoing.operation),
    8217            0 :                 node.is_available(),
    8218            0 :                 node.get_scheduling(),
    8219            0 :                 schedulable_nodes_count,
    8220              :             )
    8221              :         };
    8222              : 
    8223            0 :         if let Some(ongoing) = ongoing_op {
    8224            0 :             return Err(ApiError::PreconditionFailed(
    8225            0 :                 format!("Background operation already ongoing for node: {ongoing}").into(),
    8226            0 :             ));
    8227            0 :         }
    8228              : 
    8229            0 :         if !node_available {
    8230            0 :             return Err(ApiError::ResourceUnavailable(
    8231            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8232            0 :             ));
    8233            0 :         }
    8234              : 
    8235            0 :         if schedulable_nodes_count == 0 {
    8236            0 :             return Err(ApiError::PreconditionFailed(
    8237            0 :                 "No other schedulable nodes to drain to".into(),
    8238            0 :             ));
    8239            0 :         }
    8240              : 
    8241            0 :         match node_policy {
    8242              :             NodeSchedulingPolicy::Active => {
    8243            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining))
    8244            0 :                     .await?;
    8245              : 
    8246            0 :                 let cancel = self.cancel.child_token();
    8247            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    8248              : 
    8249            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    8250            0 :                     operation: Operation::Drain(Drain { node_id }),
    8251            0 :                     cancel: cancel.clone(),
    8252            0 :                 });
    8253              : 
    8254            0 :                 let span = tracing::info_span!(parent: None, "drain_node", %node_id);
    8255              : 
    8256            0 :                 tokio::task::spawn({
    8257            0 :                     let service = self.clone();
    8258            0 :                     let cancel = cancel.clone();
    8259            0 :                     async move {
    8260            0 :                         let _gate_guard = gate_guard;
    8261              : 
    8262            0 :                         scopeguard::defer! {
    8263              :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    8264              : 
    8265              :                             if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) {
    8266              :                                 assert_eq!(removed_drain.node_id, node_id, "We always take the same operation");
    8267              :                             } else {
    8268              :                                 panic!("We always remove the same operation")
    8269              :                             }
    8270              :                         }
    8271              : 
    8272            0 :                         tracing::info!("Drain background operation starting");
    8273            0 :                         let res = service.drain_node(node_id, cancel).await;
    8274            0 :                         match res {
    8275              :                             Ok(()) => {
    8276            0 :                                 tracing::info!("Drain background operation completed successfully");
    8277              :                             }
    8278              :                             Err(OperationError::Cancelled) => {
    8279            0 :                                 tracing::info!("Drain background operation was cancelled");
    8280              :                             }
    8281            0 :                             Err(err) => {
    8282            0 :                                 tracing::error!("Drain background operation encountered: {err}")
    8283              :                             }
    8284              :                         }
    8285            0 :                     }
    8286            0 :                 }.instrument(span));
    8287              :             }
    8288              :             NodeSchedulingPolicy::Draining => {
    8289            0 :                 return Err(ApiError::Conflict(format!(
    8290            0 :                     "Node {node_id} has drain in progress"
    8291            0 :                 )));
    8292              :             }
    8293            0 :             policy => {
    8294            0 :                 return Err(ApiError::PreconditionFailed(
    8295            0 :                     format!("Node {node_id} cannot be drained due to {policy:?} policy").into(),
    8296            0 :                 ));
    8297              :             }
    8298              :         }
    8299              : 
    8300            0 :         Ok(())
    8301            0 :     }
    8302              : 
    8303            0 :     pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> {
    8304            0 :         let node_available = {
    8305            0 :             let locked = self.inner.read().unwrap();
    8306            0 :             let nodes = &locked.nodes;
    8307            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8308            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8309            0 :             ))?;
    8310              : 
    8311            0 :             node.is_available()
    8312              :         };
    8313              : 
    8314            0 :         if !node_available {
    8315            0 :             return Err(ApiError::ResourceUnavailable(
    8316            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8317            0 :             ));
    8318            0 :         }
    8319              : 
    8320            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    8321            0 :             if let Operation::Drain(drain) = op_handler.operation {
    8322            0 :                 if drain.node_id == node_id {
    8323            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    8324            0 :                     op_handler.cancel.cancel();
    8325            0 :                     return Ok(());
    8326            0 :                 }
    8327            0 :             }
    8328            0 :         }
    8329              : 
    8330            0 :         Err(ApiError::PreconditionFailed(
    8331            0 :             format!("Node {node_id} has no drain in progress").into(),
    8332            0 :         ))
    8333            0 :     }
    8334              : 
    8335            0 :     pub(crate) async fn start_node_fill(self: &Arc<Self>, node_id: NodeId) -> Result<(), ApiError> {
    8336            0 :         let (ongoing_op, node_available, node_policy, total_nodes_count) = {
    8337            0 :             let locked = self.inner.read().unwrap();
    8338            0 :             let nodes = &locked.nodes;
    8339            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8340            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8341            0 :             ))?;
    8342              : 
    8343              :             (
    8344            0 :                 locked
    8345            0 :                     .ongoing_operation
    8346            0 :                     .as_ref()
    8347            0 :                     .map(|ongoing| ongoing.operation),
    8348            0 :                 node.is_available(),
    8349            0 :                 node.get_scheduling(),
    8350            0 :                 nodes.len(),
    8351              :             )
    8352              :         };
    8353              : 
    8354            0 :         if let Some(ongoing) = ongoing_op {
    8355            0 :             return Err(ApiError::PreconditionFailed(
    8356            0 :                 format!("Background operation already ongoing for node: {ongoing}").into(),
    8357            0 :             ));
    8358            0 :         }
    8359              : 
    8360            0 :         if !node_available {
    8361            0 :             return Err(ApiError::ResourceUnavailable(
    8362            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8363            0 :             ));
    8364            0 :         }
    8365              : 
    8366            0 :         if total_nodes_count <= 1 {
    8367            0 :             return Err(ApiError::PreconditionFailed(
    8368            0 :                 "No other nodes to fill from".into(),
    8369            0 :             ));
    8370            0 :         }
    8371              : 
    8372            0 :         match node_policy {
    8373              :             NodeSchedulingPolicy::Active => {
    8374            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling))
    8375            0 :                     .await?;
    8376              : 
    8377            0 :                 let cancel = self.cancel.child_token();
    8378            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    8379              : 
    8380            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    8381            0 :                     operation: Operation::Fill(Fill { node_id }),
    8382            0 :                     cancel: cancel.clone(),
    8383            0 :                 });
    8384              : 
    8385            0 :                 let span = tracing::info_span!(parent: None, "fill_node", %node_id);
    8386              : 
    8387            0 :                 tokio::task::spawn({
    8388            0 :                     let service = self.clone();
    8389            0 :                     let cancel = cancel.clone();
    8390            0 :                     async move {
    8391            0 :                         let _gate_guard = gate_guard;
    8392              : 
    8393            0 :                         scopeguard::defer! {
    8394              :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    8395              : 
    8396              :                             if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) {
    8397              :                                 assert_eq!(removed_fill.node_id, node_id, "We always take the same operation");
    8398              :                             } else {
    8399              :                                 panic!("We always remove the same operation")
    8400              :                             }
    8401              :                         }
    8402              : 
    8403            0 :                         tracing::info!("Fill background operation starting");
    8404            0 :                         let res = service.fill_node(node_id, cancel).await;
    8405            0 :                         match res {
    8406              :                             Ok(()) => {
    8407            0 :                                 tracing::info!("Fill background operation completed successfully");
    8408              :                             }
    8409              :                             Err(OperationError::Cancelled) => {
    8410            0 :                                 tracing::info!("Fill background operation was cancelled");
    8411              :                             }
    8412            0 :                             Err(err) => {
    8413            0 :                                 tracing::error!("Fill background operation encountered: {err}")
    8414              :                             }
    8415              :                         }
    8416            0 :                     }
    8417            0 :                 }.instrument(span));
    8418              :             }
    8419              :             NodeSchedulingPolicy::Filling => {
    8420            0 :                 return Err(ApiError::Conflict(format!(
    8421            0 :                     "Node {node_id} has fill in progress"
    8422            0 :                 )));
    8423              :             }
    8424            0 :             policy => {
    8425            0 :                 return Err(ApiError::PreconditionFailed(
    8426            0 :                     format!("Node {node_id} cannot be filled due to {policy:?} policy").into(),
    8427            0 :                 ));
    8428              :             }
    8429              :         }
    8430              : 
    8431            0 :         Ok(())
    8432            0 :     }
    8433              : 
    8434            0 :     pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> {
    8435            0 :         let node_available = {
    8436            0 :             let locked = self.inner.read().unwrap();
    8437            0 :             let nodes = &locked.nodes;
    8438            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    8439            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    8440            0 :             ))?;
    8441              : 
    8442            0 :             node.is_available()
    8443              :         };
    8444              : 
    8445            0 :         if !node_available {
    8446            0 :             return Err(ApiError::ResourceUnavailable(
    8447            0 :                 format!("Node {node_id} is currently unavailable").into(),
    8448            0 :             ));
    8449            0 :         }
    8450              : 
    8451            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    8452            0 :             if let Operation::Fill(fill) = op_handler.operation {
    8453            0 :                 if fill.node_id == node_id {
    8454            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    8455            0 :                     op_handler.cancel.cancel();
    8456            0 :                     return Ok(());
    8457            0 :                 }
    8458            0 :             }
    8459            0 :         }
    8460              : 
    8461            0 :         Err(ApiError::PreconditionFailed(
    8462            0 :             format!("Node {node_id} has no fill in progress").into(),
    8463            0 :         ))
    8464            0 :     }
    8465              : 
    8466              :     /// Like [`Self::maybe_configured_reconcile_shard`], but uses the default reconciler
    8467              :     /// configuration
    8468            0 :     fn maybe_reconcile_shard(
    8469            0 :         &self,
    8470            0 :         shard: &mut TenantShard,
    8471            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    8472            0 :         priority: ReconcilerPriority,
    8473            0 :     ) -> Option<ReconcilerWaiter> {
    8474            0 :         self.maybe_configured_reconcile_shard(shard, nodes, ReconcilerConfig::new(priority))
    8475            0 :     }
    8476              : 
    8477              :     /// Before constructing a Reconciler, acquire semaphore units from the appropriate concurrency limit (depends on priority)
    8478            0 :     fn get_reconciler_units(
    8479            0 :         &self,
    8480            0 :         priority: ReconcilerPriority,
    8481            0 :     ) -> Result<ReconcileUnits, TryAcquireError> {
    8482            0 :         let units = match priority {
    8483            0 :             ReconcilerPriority::Normal => self.reconciler_concurrency.clone().try_acquire_owned(),
    8484              :             ReconcilerPriority::High => {
    8485            0 :                 match self
    8486            0 :                     .priority_reconciler_concurrency
    8487            0 :                     .clone()
    8488            0 :                     .try_acquire_owned()
    8489              :                 {
    8490            0 :                     Ok(u) => Ok(u),
    8491              :                     Err(TryAcquireError::NoPermits) => {
    8492              :                         // If the high priority semaphore is exhausted, then high priority tasks may steal units from
    8493              :                         // the normal priority semaphore.
    8494            0 :                         self.reconciler_concurrency.clone().try_acquire_owned()
    8495              :                     }
    8496            0 :                     Err(e) => Err(e),
    8497              :                 }
    8498              :             }
    8499              :         };
    8500              : 
    8501            0 :         units.map(ReconcileUnits::new)
    8502            0 :     }
    8503              : 
    8504              :     /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
    8505            0 :     fn maybe_configured_reconcile_shard(
    8506            0 :         &self,
    8507            0 :         shard: &mut TenantShard,
    8508            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    8509            0 :         reconciler_config: ReconcilerConfig,
    8510            0 :     ) -> Option<ReconcilerWaiter> {
    8511            0 :         let reconcile_needed = shard.get_reconcile_needed(nodes);
    8512              : 
    8513            0 :         let reconcile_reason = match reconcile_needed {
    8514            0 :             ReconcileNeeded::No => return None,
    8515            0 :             ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
    8516            0 :             ReconcileNeeded::Yes(reason) => {
    8517              :                 // Fall through to try and acquire units for spawning reconciler
    8518            0 :                 reason
    8519              :             }
    8520              :         };
    8521              : 
    8522            0 :         let units = match self.get_reconciler_units(reconciler_config.priority) {
    8523            0 :             Ok(u) => u,
    8524              :             Err(_) => {
    8525            0 :                 tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
    8526            0 :                     "Concurrency limited: enqueued for reconcile later");
    8527            0 :                 if !shard.delayed_reconcile {
    8528            0 :                     match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
    8529            0 :                         Err(TrySendError::Closed(_)) => {
    8530            0 :                             // Weird mid-shutdown case?
    8531            0 :                         }
    8532              :                         Err(TrySendError::Full(_)) => {
    8533              :                             // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
    8534            0 :                             tracing::warn!(
    8535            0 :                                 "Many shards are waiting to reconcile: delayed_reconcile queue is full"
    8536              :                             );
    8537              :                         }
    8538            0 :                         Ok(()) => {
    8539            0 :                             shard.delayed_reconcile = true;
    8540            0 :                         }
    8541              :                     }
    8542            0 :                 }
    8543              : 
    8544              :                 // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
    8545              :                 // number to advance.  When this function is eventually called again and succeeds in getting units,
    8546              :                 // it will spawn a reconciler that makes this waiter complete.
    8547            0 :                 return Some(shard.future_reconcile_waiter());
    8548              :             }
    8549              :         };
    8550              : 
    8551            0 :         let Ok(gate_guard) = self.reconcilers_gate.enter() else {
    8552              :             // Gate closed: we're shutting down, drop out.
    8553            0 :             return None;
    8554              :         };
    8555              : 
    8556            0 :         shard.spawn_reconciler(
    8557            0 :             reconcile_reason,
    8558            0 :             &self.result_tx,
    8559            0 :             nodes,
    8560            0 :             &self.compute_hook,
    8561            0 :             reconciler_config,
    8562            0 :             &self.config,
    8563            0 :             &self.persistence,
    8564            0 :             units,
    8565            0 :             gate_guard,
    8566            0 :             &self.reconcilers_cancel,
    8567            0 :             self.http_client.clone(),
    8568              :         )
    8569            0 :     }
    8570              : 
    8571              :     /// Check all tenants for pending reconciliation work, and reconcile those in need.
    8572              :     /// Additionally, reschedule tenants that require it.
    8573              :     ///
    8574              :     /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
    8575              :     /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
    8576              :     /// available.  A return value of 0 indicates that everything is fully reconciled already.
    8577            0 :     fn reconcile_all(&self) -> ReconcileAllResult {
    8578            0 :         let mut locked = self.inner.write().unwrap();
    8579            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    8580            0 :         let pageservers = nodes.clone();
    8581              : 
    8582              :         // This function is an efficient place to update lazy statistics, since we are walking
    8583              :         // all tenants.
    8584            0 :         let mut pending_reconciles = 0;
    8585            0 :         let mut keep_failing_reconciles = 0;
    8586            0 :         let mut az_violations = 0;
    8587              : 
    8588              :         // If we find any tenants to drop from memory, stash them to offload after
    8589              :         // we're done traversing the map of tenants.
    8590            0 :         let mut drop_detached_tenants = Vec::new();
    8591              : 
    8592            0 :         let mut spawned_reconciles = 0;
    8593            0 :         let mut has_delayed_reconciles = false;
    8594              : 
    8595            0 :         for shard in tenants.values_mut() {
    8596              :             // Accumulate scheduling statistics
    8597            0 :             if let (Some(attached), Some(preferred)) =
    8598            0 :                 (shard.intent.get_attached(), shard.preferred_az())
    8599              :             {
    8600            0 :                 let node_az = nodes
    8601            0 :                     .get(attached)
    8602            0 :                     .expect("Nodes exist if referenced")
    8603            0 :                     .get_availability_zone_id();
    8604            0 :                 if node_az != preferred {
    8605            0 :                     az_violations += 1;
    8606            0 :                 }
    8607            0 :             }
    8608              : 
    8609              :             // Skip checking if this shard is already enqueued for reconciliation
    8610            0 :             if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
    8611              :                 // If there is something delayed, then return a nonzero count so that
    8612              :                 // callers like reconcile_all_now do not incorrectly get the impression
    8613              :                 // that the system is in a quiescent state.
    8614            0 :                 has_delayed_reconciles = true;
    8615            0 :                 pending_reconciles += 1;
    8616            0 :                 continue;
    8617            0 :             }
    8618              : 
    8619              :             // Eventual consistency: if an earlier reconcile job failed, and the shard is still
    8620              :             // dirty, spawn another one
    8621            0 :             let consecutive_errors_count = shard.consecutive_errors_count;
    8622            0 :             if self
    8623            0 :                 .maybe_reconcile_shard(shard, &pageservers, ReconcilerPriority::Normal)
    8624            0 :                 .is_some()
    8625              :             {
    8626            0 :                 spawned_reconciles += 1;
    8627              : 
    8628              :                 // Count shards that are keep-failing. We still want to reconcile them
    8629              :                 // to avoid a situation where a shard is stuck.
    8630              :                 // But we don't want to consider them when deciding to run optimizations.
    8631            0 :                 if consecutive_errors_count >= MAX_CONSECUTIVE_RECONCILIATION_ERRORS {
    8632            0 :                     tracing::warn!(
    8633              :                         tenant_id=%shard.tenant_shard_id.tenant_id,
    8634            0 :                         shard_id=%shard.tenant_shard_id.shard_slug(),
    8635            0 :                         "Shard reconciliation is keep-failing: {} errors",
    8636              :                         consecutive_errors_count
    8637              :                     );
    8638            0 :                     keep_failing_reconciles += 1;
    8639            0 :                 }
    8640            0 :             } else if shard.delayed_reconcile {
    8641            0 :                 // Shard wanted to reconcile but for some reason couldn't.
    8642            0 :                 pending_reconciles += 1;
    8643            0 :             }
    8644              : 
    8645              :             // If this tenant is detached, try dropping it from memory. This is usually done
    8646              :             // proactively in [`Self::process_results`], but we do it here to handle the edge
    8647              :             // case where a reconcile completes while someone else is holding an op lock for the tenant.
    8648            0 :             if shard.tenant_shard_id.shard_number == ShardNumber(0)
    8649            0 :                 && shard.policy == PlacementPolicy::Detached
    8650              :             {
    8651            0 :                 if let Some(guard) = self.tenant_op_locks.try_exclusive(
    8652            0 :                     shard.tenant_shard_id.tenant_id,
    8653            0 :                     TenantOperations::DropDetached,
    8654            0 :                 ) {
    8655            0 :                     drop_detached_tenants.push((shard.tenant_shard_id.tenant_id, guard));
    8656            0 :                 }
    8657            0 :             }
    8658              :         }
    8659              : 
    8660              :         // Some metrics are calculated from SchedulerNode state, update these periodically
    8661            0 :         scheduler.update_metrics();
    8662              : 
    8663              :         // Process any deferred tenant drops
    8664            0 :         for (tenant_id, guard) in drop_detached_tenants {
    8665            0 :             self.maybe_drop_tenant(tenant_id, &mut locked, &guard);
    8666            0 :         }
    8667              : 
    8668            0 :         metrics::METRICS_REGISTRY
    8669            0 :             .metrics_group
    8670            0 :             .storage_controller_schedule_az_violation
    8671            0 :             .set(az_violations as i64);
    8672              : 
    8673            0 :         metrics::METRICS_REGISTRY
    8674            0 :             .metrics_group
    8675            0 :             .storage_controller_pending_reconciles
    8676            0 :             .set(pending_reconciles as i64);
    8677              : 
    8678            0 :         metrics::METRICS_REGISTRY
    8679            0 :             .metrics_group
    8680            0 :             .storage_controller_keep_failing_reconciles
    8681            0 :             .set(keep_failing_reconciles as i64);
    8682              : 
    8683            0 :         ReconcileAllResult::new(
    8684            0 :             spawned_reconciles,
    8685            0 :             keep_failing_reconciles,
    8686            0 :             has_delayed_reconciles,
    8687              :         )
    8688            0 :     }
    8689              : 
    8690              :     /// `optimize` in this context means identifying shards which have valid scheduled locations, but
    8691              :     /// could be scheduled somewhere better:
    8692              :     /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
    8693              :     ///    * e.g. after a node fails then recovers, to move some work back to it
    8694              :     /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
    8695              :     ///    * e.g. after a shard split, the initial attached locations will all be on the node where
    8696              :     ///      we did the split, but are probably better placed elsewhere.
    8697              :     /// - Creating new secondary locations if it improves the spreading of a sharded tenant
    8698              :     ///    * e.g. after a shard split, some locations will be on the same node (where the split
    8699              :     ///      happened), and will probably be better placed elsewhere.
    8700              :     ///
    8701              :     /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
    8702              :     /// the time of scheduling, this function looks for cases where a better-scoring location is available
    8703              :     /// according to those same soft constraints.
    8704            0 :     async fn optimize_all(&self) -> usize {
    8705              :         // Limit on how many shards' optmizations each call to this function will execute.  Combined
    8706              :         // with the frequency of background calls, this acts as an implicit rate limit that runs a small
    8707              :         // trickle of optimizations in the background, rather than executing a large number in parallel
    8708              :         // when a change occurs.
    8709              :         const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 16;
    8710              : 
    8711              :         // Synchronous prepare: scan shards for possible scheduling optimizations
    8712            0 :         let candidate_work = self.optimize_all_plan();
    8713            0 :         let candidate_work_len = candidate_work.len();
    8714              : 
    8715              :         // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
    8716            0 :         let validated_work = self.optimize_all_validate(candidate_work).await;
    8717              : 
    8718            0 :         let was_work_filtered = validated_work.len() != candidate_work_len;
    8719              : 
    8720              :         // Synchronous apply: update the shards' intent states according to validated optimisations
    8721            0 :         let mut reconciles_spawned = 0;
    8722            0 :         let mut optimizations_applied = 0;
    8723            0 :         let mut locked = self.inner.write().unwrap();
    8724            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    8725            0 :         for (tenant_shard_id, optimization) in validated_work {
    8726            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    8727              :                 // Shard was dropped between planning and execution;
    8728            0 :                 continue;
    8729              :             };
    8730            0 :             tracing::info!(tenant_shard_id=%tenant_shard_id, "Applying optimization: {optimization:?}");
    8731            0 :             if shard.apply_optimization(scheduler, optimization) {
    8732            0 :                 optimizations_applied += 1;
    8733            0 :                 if self
    8734            0 :                     .maybe_reconcile_shard(shard, nodes, ReconcilerPriority::Normal)
    8735            0 :                     .is_some()
    8736            0 :                 {
    8737            0 :                     reconciles_spawned += 1;
    8738            0 :                 }
    8739            0 :             }
    8740              : 
    8741            0 :             if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
    8742            0 :                 break;
    8743            0 :             }
    8744              :         }
    8745              : 
    8746            0 :         if was_work_filtered {
    8747            0 :             // If we filtered any work out during validation, ensure we return a nonzero value to indicate
    8748            0 :             // to callers that the system is not in a truly quiet state, it's going to do some work as soon
    8749            0 :             // as these validations start passing.
    8750            0 :             reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
    8751            0 :         }
    8752              : 
    8753            0 :         reconciles_spawned
    8754            0 :     }
    8755              : 
    8756            0 :     fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
    8757              :         // How many candidate optimizations we will generate, before evaluating them for readniess: setting
    8758              :         // this higher than the execution limit gives us a chance to execute some work even if the first
    8759              :         // few optimizations we find are not ready.
    8760              :         const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 64;
    8761              : 
    8762            0 :         let mut work = Vec::new();
    8763            0 :         let mut locked = self.inner.write().unwrap();
    8764            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    8765              : 
    8766              :         // We are going to plan a bunch of optimisations before applying any of them, so the
    8767              :         // utilisation stats on nodes will be effectively stale for the >1st optimisation we
    8768              :         // generate.  To avoid this causing unstable migrations/flapping, it's important that the
    8769              :         // code in TenantShard for finding optimisations uses [`NodeAttachmentSchedulingScore::disregard_utilization`]
    8770              :         // to ignore the utilisation component of the score.
    8771              : 
    8772            0 :         for (_tenant_id, schedule_context, shards) in
    8773            0 :             TenantShardExclusiveIterator::new(tenants, ScheduleMode::Speculative)
    8774              :         {
    8775            0 :             for shard in shards {
    8776            0 :                 if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
    8777            0 :                     break;
    8778            0 :                 }
    8779            0 :                 match shard.get_scheduling_policy() {
    8780            0 :                     ShardSchedulingPolicy::Active => {
    8781            0 :                         // Ok to do optimization
    8782            0 :                     }
    8783            0 :                     ShardSchedulingPolicy::Essential if shard.get_preferred_node().is_some() => {
    8784            0 :                         // Ok to do optimization: we are executing a graceful migration that
    8785            0 :                         // has set preferred_node
    8786            0 :                     }
    8787              :                     ShardSchedulingPolicy::Essential
    8788              :                     | ShardSchedulingPolicy::Pause
    8789              :                     | ShardSchedulingPolicy::Stop => {
    8790              :                         // Policy prevents optimizing this shard.
    8791            0 :                         continue;
    8792              :                     }
    8793              :                 }
    8794              : 
    8795            0 :                 if !matches!(shard.splitting, SplitState::Idle)
    8796            0 :                     || matches!(shard.policy, PlacementPolicy::Detached)
    8797            0 :                     || shard.reconciler.is_some()
    8798              :                 {
    8799              :                     // Do not start any optimizations while another change to the tenant is ongoing: this
    8800              :                     // is not necessary for correctness, but simplifies operations and implicitly throttles
    8801              :                     // optimization changes to happen in a "trickle" over time.
    8802            0 :                     continue;
    8803            0 :                 }
    8804              : 
    8805              :                 // Fast path: we may quickly identify shards that don't have any possible optimisations
    8806            0 :                 if !shard.maybe_optimizable(scheduler, &schedule_context) {
    8807            0 :                     if cfg!(feature = "testing") {
    8808              :                         // Check that maybe_optimizable doesn't disagree with the actual optimization functions.
    8809              :                         // Only do this in testing builds because it is not a correctness-critical check, so we shouldn't
    8810              :                         // panic in prod if we hit this, or spend cycles on it in prod.
    8811            0 :                         assert!(
    8812            0 :                             shard
    8813            0 :                                 .optimize_attachment(scheduler, &schedule_context)
    8814            0 :                                 .is_none()
    8815              :                         );
    8816            0 :                         assert!(
    8817            0 :                             shard
    8818            0 :                                 .optimize_secondary(scheduler, &schedule_context)
    8819            0 :                                 .is_none()
    8820              :                         );
    8821            0 :                     }
    8822            0 :                     continue;
    8823            0 :                 }
    8824              : 
    8825            0 :                 if let Some(optimization) =
    8826              :                     // If idle, maybe optimize attachments: if a shard has a secondary location that is preferable to
    8827              :                     // its primary location based on soft constraints, cut it over.
    8828            0 :                     shard.optimize_attachment(scheduler, &schedule_context)
    8829              :                 {
    8830            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for attachment: {optimization:?}");
    8831            0 :                     work.push((shard.tenant_shard_id, optimization));
    8832            0 :                     break;
    8833            0 :                 } else if let Some(optimization) =
    8834              :                     // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
    8835              :                     // better placed on another node, based on ScheduleContext, then adjust it.  This
    8836              :                     // covers cases like after a shard split, where we might have too many shards
    8837              :                     // in the same tenant with secondary locations on the node where they originally split.
    8838            0 :                     shard.optimize_secondary(scheduler, &schedule_context)
    8839              :                 {
    8840            0 :                     tracing::info!(tenant_shard_id=%shard.tenant_shard_id, "Identified optimization for secondary: {optimization:?}");
    8841            0 :                     work.push((shard.tenant_shard_id, optimization));
    8842            0 :                     break;
    8843            0 :                 }
    8844              :             }
    8845              :         }
    8846              : 
    8847            0 :         work
    8848            0 :     }
    8849              : 
    8850            0 :     async fn optimize_all_validate(
    8851            0 :         &self,
    8852            0 :         candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
    8853            0 :     ) -> Vec<(TenantShardId, ScheduleOptimization)> {
    8854              :         // Take a clone of the node map to use outside the lock in async validation phase
    8855            0 :         let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
    8856              : 
    8857            0 :         let mut want_secondary_status = Vec::new();
    8858              : 
    8859              :         // Validate our plans: this is an async phase where we may do I/O to pageservers to
    8860              :         // check that the state of locations is acceptable to run the optimization, such as
    8861              :         // checking that a secondary location is sufficiently warmed-up to cleanly cut over
    8862              :         // in a live migration.
    8863            0 :         let mut validated_work = Vec::new();
    8864            0 :         for (tenant_shard_id, optimization) in candidate_work {
    8865            0 :             match optimization.action {
    8866              :                 ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
    8867              :                     old_attached_node_id: _,
    8868            0 :                     new_attached_node_id,
    8869              :                 }) => {
    8870            0 :                     match validation_nodes.get(&new_attached_node_id) {
    8871            0 :                         None => {
    8872            0 :                             // Node was dropped between planning and validation
    8873            0 :                         }
    8874            0 :                         Some(node) => {
    8875            0 :                             if !node.is_available() {
    8876            0 :                                 tracing::info!(
    8877            0 :                                     "Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable"
    8878              :                                 );
    8879            0 :                             } else {
    8880            0 :                                 // Accumulate optimizations that require fetching secondary status, so that we can execute these
    8881            0 :                                 // remote API requests concurrently.
    8882            0 :                                 want_secondary_status.push((
    8883            0 :                                     tenant_shard_id,
    8884            0 :                                     node.clone(),
    8885            0 :                                     optimization,
    8886            0 :                                 ));
    8887            0 :                             }
    8888              :                         }
    8889              :                     }
    8890              :                 }
    8891              :                 ScheduleOptimizationAction::ReplaceSecondary(_)
    8892              :                 | ScheduleOptimizationAction::CreateSecondary(_)
    8893              :                 | ScheduleOptimizationAction::RemoveSecondary(_) => {
    8894              :                     // No extra checks needed to manage secondaries: this does not interrupt client access
    8895            0 :                     validated_work.push((tenant_shard_id, optimization))
    8896              :                 }
    8897              :             };
    8898              :         }
    8899              : 
    8900              :         // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
    8901              :         // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
    8902              :         // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
    8903            0 :         let results = self
    8904            0 :             .tenant_for_shards_api(
    8905            0 :                 want_secondary_status
    8906            0 :                     .iter()
    8907            0 :                     .map(|i| (i.0, i.1.clone()))
    8908            0 :                     .collect(),
    8909            0 :                 |tenant_shard_id, client| async move {
    8910            0 :                     client.tenant_secondary_status(tenant_shard_id).await
    8911            0 :                 },
    8912              :                 1,
    8913              :                 1,
    8914              :                 SHORT_RECONCILE_TIMEOUT,
    8915            0 :                 &self.cancel,
    8916              :             )
    8917            0 :             .await;
    8918              : 
    8919            0 :         for ((tenant_shard_id, node, optimization), (_, secondary_status)) in
    8920            0 :             want_secondary_status.into_iter().zip(results.into_iter())
    8921              :         {
    8922            0 :             match secondary_status {
    8923            0 :                 Err(e) => {
    8924            0 :                     tracing::info!(
    8925            0 :                         "Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}"
    8926              :                     );
    8927              :                 }
    8928            0 :                 Ok(progress) => {
    8929              :                     // We require secondary locations to have less than 10GiB of downloads pending before we will use
    8930              :                     // them in an optimization
    8931              :                     const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
    8932              : 
    8933            0 :                     if progress.heatmap_mtime.is_none()
    8934            0 :                         || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
    8935            0 :                             && progress.bytes_downloaded != progress.bytes_total
    8936            0 :                         || progress.bytes_total - progress.bytes_downloaded
    8937            0 :                             > DOWNLOAD_FRESHNESS_THRESHOLD
    8938              :                     {
    8939            0 :                         tracing::info!(
    8940            0 :                             "Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}"
    8941              :                         );
    8942              : 
    8943            0 :                         if progress.heatmap_mtime.is_none() {
    8944              :                             // No heatmap might mean the attached location has never uploaded one, or that
    8945              :                             // the secondary download hasn't happened yet.  This is relatively unusual in the field,
    8946              :                             // but fairly common in tests.
    8947            0 :                             self.kick_secondary_download(tenant_shard_id).await;
    8948            0 :                         }
    8949              :                     } else {
    8950              :                         // Location looks ready: proceed
    8951            0 :                         tracing::info!(
    8952            0 :                             "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
    8953              :                         );
    8954            0 :                         validated_work.push((tenant_shard_id, optimization))
    8955              :                     }
    8956              :                 }
    8957              :             }
    8958              :         }
    8959              : 
    8960            0 :         validated_work
    8961            0 :     }
    8962              : 
    8963              :     /// Some aspects of scheduling optimisation wait for secondary locations to be warm.  This
    8964              :     /// happens on multi-minute timescales in the field, which is fine because optimisation is meant
    8965              :     /// to be a lazy background thing. However, when testing, it is not practical to wait around, so
    8966              :     /// we have this helper to move things along faster.
    8967            0 :     async fn kick_secondary_download(&self, tenant_shard_id: TenantShardId) {
    8968            0 :         if !self.config.kick_secondary_downloads {
    8969              :             // No-op if kick_secondary_downloads functionaliuty is not configured
    8970            0 :             return;
    8971            0 :         }
    8972              : 
    8973            0 :         let (attached_node, secondaries) = {
    8974            0 :             let locked = self.inner.read().unwrap();
    8975            0 :             let Some(shard) = locked.tenants.get(&tenant_shard_id) else {
    8976            0 :                 tracing::warn!(
    8977            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: not found"
    8978              :                 );
    8979            0 :                 return;
    8980              :             };
    8981              : 
    8982            0 :             let Some(attached) = shard.intent.get_attached() else {
    8983            0 :                 tracing::warn!(
    8984            0 :                     "Skipping kick of secondary download for {tenant_shard_id}: no attached"
    8985              :                 );
    8986            0 :                 return;
    8987              :             };
    8988              : 
    8989            0 :             let secondaries = shard
    8990            0 :                 .intent
    8991            0 :                 .get_secondary()
    8992            0 :                 .iter()
    8993            0 :                 .map(|n| locked.nodes.get(n).unwrap().clone())
    8994            0 :                 .collect::<Vec<_>>();
    8995              : 
    8996            0 :             (locked.nodes.get(attached).unwrap().clone(), secondaries)
    8997              :         };
    8998              : 
    8999              :         // Make remote API calls to upload + download heatmaps: we ignore errors because this is just
    9000              :         // a 'kick' to let scheduling optimisation run more promptly.
    9001            0 :         match attached_node
    9002            0 :             .with_client_retries(
    9003            0 :                 |client| async move { client.tenant_heatmap_upload(tenant_shard_id).await },
    9004            0 :                 &self.http_client,
    9005            0 :                 &self.config.pageserver_jwt_token,
    9006              :                 3,
    9007              :                 10,
    9008              :                 SHORT_RECONCILE_TIMEOUT,
    9009            0 :                 &self.cancel,
    9010              :             )
    9011            0 :             .await
    9012              :         {
    9013            0 :             Some(Err(e)) => {
    9014            0 :                 tracing::info!(
    9015            0 :                     "Failed to upload heatmap from {attached_node} for {tenant_shard_id}: {e}"
    9016              :                 );
    9017              :             }
    9018              :             None => {
    9019            0 :                 tracing::info!(
    9020            0 :                     "Cancelled while uploading heatmap from {attached_node} for {tenant_shard_id}"
    9021              :                 );
    9022              :             }
    9023              :             Some(Ok(_)) => {
    9024            0 :                 tracing::info!(
    9025            0 :                     "Successfully uploaded heatmap from {attached_node} for {tenant_shard_id}"
    9026              :                 );
    9027              :             }
    9028              :         }
    9029              : 
    9030            0 :         for secondary_node in secondaries {
    9031            0 :             match secondary_node
    9032            0 :                 .with_client_retries(
    9033            0 :                     |client| async move {
    9034            0 :                         client
    9035            0 :                             .tenant_secondary_download(
    9036            0 :                                 tenant_shard_id,
    9037            0 :                                 Some(Duration::from_secs(1)),
    9038            0 :                             )
    9039            0 :                             .await
    9040            0 :                     },
    9041            0 :                     &self.http_client,
    9042            0 :                     &self.config.pageserver_jwt_token,
    9043              :                     3,
    9044              :                     10,
    9045              :                     SHORT_RECONCILE_TIMEOUT,
    9046            0 :                     &self.cancel,
    9047              :                 )
    9048            0 :                 .await
    9049              :             {
    9050            0 :                 Some(Err(e)) => {
    9051            0 :                     tracing::info!(
    9052            0 :                         "Failed to download heatmap from {secondary_node} for {tenant_shard_id}: {e}"
    9053              :                     );
    9054              :                 }
    9055              :                 None => {
    9056            0 :                     tracing::info!(
    9057            0 :                         "Cancelled while downloading heatmap from {secondary_node} for {tenant_shard_id}"
    9058              :                     );
    9059              :                 }
    9060            0 :                 Some(Ok(progress)) => {
    9061            0 :                     tracing::info!(
    9062            0 :                         "Successfully downloaded heatmap from {secondary_node} for {tenant_shard_id}: {progress:?}"
    9063              :                     );
    9064              :                 }
    9065              :             }
    9066              :         }
    9067            0 :     }
    9068              : 
    9069              :     /// Asynchronously split a tenant that's eligible for automatic splits. At most one tenant will
    9070              :     /// be split per call.
    9071              :     ///
    9072              :     /// Two sets of criteria are used: initial splits and size-based splits (in that order).
    9073              :     /// Initial splits are used to eagerly split unsharded tenants that may be performing initial
    9074              :     /// ingestion, since sharded tenants have significantly better ingestion throughput. Size-based
    9075              :     /// splits are used to bound the maximum shard size and balance out load.
    9076              :     ///
    9077              :     /// Splits are based on max_logical_size, i.e. the logical size of the largest timeline in a
    9078              :     /// tenant. We use this instead of the total logical size because branches will duplicate
    9079              :     /// logical size without actually using more storage. We could also use visible physical size,
    9080              :     /// but this might overestimate tenants that frequently churn branches.
    9081              :     ///
    9082              :     /// Initial splits (initial_split_threshold):
    9083              :     /// * Applies to tenants with 1 shard.
    9084              :     /// * The largest timeline (max_logical_size) exceeds initial_split_threshold.
    9085              :     /// * Splits into initial_split_shards.
    9086              :     ///
    9087              :     /// Size-based splits (split_threshold):
    9088              :     /// * Applies to all tenants.
    9089              :     /// * The largest timeline (max_logical_size) divided by shard count exceeds split_threshold.
    9090              :     /// * Splits such that max_logical_size / shard_count <= split_threshold, in powers of 2.
    9091              :     ///
    9092              :     /// Tenant shards are ordered by descending max_logical_size, first initial split candidates
    9093              :     /// then size-based split candidates. The first matching candidate is split.
    9094              :     ///
    9095              :     /// The shard count is clamped to max_split_shards. If a candidate is eligible for both initial
    9096              :     /// and size-based splits, the largest shard count will be used.
    9097              :     ///
    9098              :     /// An unsharded tenant will get DEFAULT_STRIPE_SIZE, regardless of what its ShardIdentity says.
    9099              :     /// A sharded tenant will retain its stripe size, as splits do not allow changing it.
    9100              :     ///
    9101              :     /// TODO: consider spawning multiple splits in parallel: this is only called once every 20
    9102              :     /// seconds, so a large backlog can take a long time, and if a tenant fails to split it will
    9103              :     /// block all other splits.
    9104            0 :     async fn autosplit_tenants(self: &Arc<Self>) {
    9105              :         // If max_split_shards is set to 0 or 1, we can't split.
    9106            0 :         let max_split_shards = self.config.max_split_shards;
    9107            0 :         if max_split_shards <= 1 {
    9108            0 :             return;
    9109            0 :         }
    9110              : 
    9111              :         // If initial_split_shards is set to 0 or 1, disable initial splits.
    9112            0 :         let mut initial_split_threshold = self.config.initial_split_threshold.unwrap_or(0);
    9113            0 :         let initial_split_shards = self.config.initial_split_shards;
    9114            0 :         if initial_split_shards <= 1 {
    9115            0 :             initial_split_threshold = 0;
    9116            0 :         }
    9117              : 
    9118              :         // If no split_threshold nor initial_split_threshold, disable autosplits.
    9119            0 :         let split_threshold = self.config.split_threshold.unwrap_or(0);
    9120            0 :         if split_threshold == 0 && initial_split_threshold == 0 {
    9121            0 :             return;
    9122            0 :         }
    9123              : 
    9124              :         // Fetch split candidates in prioritized order.
    9125              :         //
    9126              :         // If initial splits are enabled, fetch eligible tenants first. We prioritize initial splits
    9127              :         // over size-based splits, since these are often performing initial ingestion and rely on
    9128              :         // splits to improve ingest throughput.
    9129            0 :         let mut candidates = Vec::new();
    9130              : 
    9131            0 :         if initial_split_threshold > 0 {
    9132              :             // Initial splits: fetch tenants with 1 shard where the logical size of the largest
    9133              :             // timeline exceeds the initial split threshold.
    9134            0 :             let initial_candidates = self
    9135            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    9136            0 :                     order_by: TenantSorting::MaxLogicalSize,
    9137            0 :                     limit: 10,
    9138            0 :                     where_shards_lt: Some(ShardCount(2)),
    9139            0 :                     where_gt: Some(initial_split_threshold),
    9140            0 :                 })
    9141            0 :                 .await;
    9142            0 :             candidates.extend(initial_candidates);
    9143            0 :         }
    9144              : 
    9145            0 :         if split_threshold > 0 {
    9146              :             // Size-based splits: fetch tenants where the logical size of the largest timeline
    9147              :             // divided by shard count exceeds the split threshold.
    9148              :             //
    9149              :             // max_logical_size is only tracked on shard 0, and contains the total logical size
    9150              :             // across all shards. We have to order and filter by MaxLogicalSizePerShard, i.e.
    9151              :             // max_logical_size / shard_count, such that we only receive tenants that are actually
    9152              :             // eligible for splits. But we still use max_logical_size for later split calculations.
    9153            0 :             let size_candidates = self
    9154            0 :                 .get_top_tenant_shards(&TopTenantShardsRequest {
    9155            0 :                     order_by: TenantSorting::MaxLogicalSizePerShard,
    9156            0 :                     limit: 10,
    9157            0 :                     where_shards_lt: Some(ShardCount(max_split_shards)),
    9158            0 :                     where_gt: Some(split_threshold),
    9159            0 :                 })
    9160            0 :                 .await;
    9161              :             #[cfg(feature = "testing")]
    9162            0 :             assert!(
    9163            0 :                 size_candidates.iter().all(|c| c.id.is_shard_zero()),
    9164            0 :                 "MaxLogicalSizePerShard returned non-zero shard: {size_candidates:?}",
    9165              :             );
    9166            0 :             candidates.extend(size_candidates);
    9167            0 :         }
    9168              : 
    9169              :         // Filter out tenants in a prohibiting scheduling modes
    9170              :         // and tenants with an ongoing import.
    9171              :         //
    9172              :         // Note that the import check here is oportunistic. An import might start
    9173              :         // after the check before we actually update [`TenantShard::splitting`].
    9174              :         // [`Self::tenant_shard_split`] checks the database whilst holding the exclusive
    9175              :         // tenant lock. Imports might take a long time, so the check here allows us
    9176              :         // to split something else instead of trying the same shard over and over.
    9177              :         {
    9178            0 :             let state = self.inner.read().unwrap();
    9179            0 :             candidates.retain(|i| {
    9180            0 :                 let shard = state.tenants.get(&i.id);
    9181            0 :                 match shard {
    9182            0 :                     Some(t) => {
    9183            0 :                         t.get_scheduling_policy() == ShardSchedulingPolicy::Active
    9184            0 :                             && t.importing == TimelineImportState::Idle
    9185              :                     }
    9186            0 :                     None => false,
    9187              :                 }
    9188            0 :             });
    9189              :         }
    9190              : 
    9191              :         // Pick the first candidate to split. This will generally always be the first one in
    9192              :         // candidates, but we defensively skip candidates that end up not actually splitting.
    9193            0 :         let Some((candidate, new_shard_count)) = candidates
    9194            0 :             .into_iter()
    9195            0 :             .filter_map(|candidate| {
    9196            0 :                 let new_shard_count = Self::compute_split_shards(ShardSplitInputs {
    9197            0 :                     shard_count: candidate.id.shard_count,
    9198            0 :                     max_logical_size: candidate.max_logical_size,
    9199            0 :                     split_threshold,
    9200            0 :                     max_split_shards,
    9201            0 :                     initial_split_threshold,
    9202            0 :                     initial_split_shards,
    9203            0 :                 });
    9204            0 :                 new_shard_count.map(|shards| (candidate, shards.count()))
    9205            0 :             })
    9206            0 :             .next()
    9207              :         else {
    9208            0 :             debug!("no split-eligible tenants found");
    9209            0 :             return;
    9210              :         };
    9211              : 
    9212              :         // Retain the stripe size of sharded tenants, as splits don't allow changing it. Otherwise,
    9213              :         // use DEFAULT_STRIPE_SIZE for unsharded tenants -- their stripe size doesn't really matter,
    9214              :         // and if we change the default stripe size we want to use the new default rather than an
    9215              :         // old, persisted stripe size.
    9216            0 :         let new_stripe_size = match candidate.id.shard_count.count() {
    9217            0 :             0 => panic!("invalid shard count 0"),
    9218            0 :             1 => Some(DEFAULT_STRIPE_SIZE),
    9219            0 :             2.. => None,
    9220              :         };
    9221              : 
    9222              :         // We spawn a task to run this, so it's exactly like some external API client requesting
    9223              :         // it.  We don't want to block the background reconcile loop on this.
    9224            0 :         let old_shard_count = candidate.id.shard_count.count();
    9225            0 :         info!(
    9226            0 :             "auto-splitting tenant {old_shard_count} → {new_shard_count} shards, \
    9227            0 :                 current size {candidate:?} (split_threshold={split_threshold} \
    9228            0 :                 initial_split_threshold={initial_split_threshold})"
    9229              :         );
    9230              : 
    9231            0 :         let this = self.clone();
    9232            0 :         tokio::spawn(
    9233            0 :             async move {
    9234            0 :                 match this
    9235            0 :                     .tenant_shard_split(
    9236            0 :                         candidate.id.tenant_id,
    9237            0 :                         TenantShardSplitRequest {
    9238            0 :                             new_shard_count,
    9239            0 :                             new_stripe_size,
    9240            0 :                         },
    9241            0 :                     )
    9242            0 :                     .await
    9243              :                 {
    9244              :                     Ok(_) => {
    9245            0 :                         info!("successful auto-split {old_shard_count} → {new_shard_count} shards")
    9246              :                     }
    9247            0 :                     Err(err) => error!("auto-split failed: {err}"),
    9248              :                 }
    9249            0 :             }
    9250            0 :             .instrument(info_span!("auto_split", tenant_id=%candidate.id.tenant_id)),
    9251              :         );
    9252            0 :     }
    9253              : 
    9254              :     /// Returns the number of shards to split a tenant into, or None if the tenant shouldn't split,
    9255              :     /// based on the total logical size of the largest timeline summed across all shards. Uses the
    9256              :     /// larger of size-based and initial splits, clamped to max_split_shards.
    9257              :     ///
    9258              :     /// NB: the thresholds are exclusive, since TopTenantShardsRequest uses where_gt.
    9259           25 :     fn compute_split_shards(inputs: ShardSplitInputs) -> Option<ShardCount> {
    9260              :         let ShardSplitInputs {
    9261           25 :             shard_count,
    9262           25 :             max_logical_size,
    9263           25 :             split_threshold,
    9264           25 :             max_split_shards,
    9265           25 :             initial_split_threshold,
    9266           25 :             initial_split_shards,
    9267           25 :         } = inputs;
    9268              : 
    9269           25 :         let mut new_shard_count: u8 = shard_count.count();
    9270              : 
    9271              :         // Size-based splits. Ensures max_logical_size / new_shard_count <= split_threshold, using
    9272              :         // power-of-two shard counts.
    9273              :         //
    9274              :         // If the current shard count is not a power of two, and does not exceed split_threshold,
    9275              :         // then we leave it alone rather than forcing a power-of-two split.
    9276           25 :         if split_threshold > 0
    9277           18 :             && max_logical_size.div_ceil(split_threshold) > shard_count.count() as u64
    9278           12 :         {
    9279           12 :             new_shard_count = max_logical_size
    9280           12 :                 .div_ceil(split_threshold)
    9281           12 :                 .checked_next_power_of_two()
    9282           12 :                 .unwrap_or(u8::MAX as u64)
    9283           12 :                 .try_into()
    9284           12 :                 .unwrap_or(u8::MAX);
    9285           13 :         }
    9286              : 
    9287              :         // Initial splits. Use the larger of size-based and initial split shard counts. This only
    9288              :         // applies to unsharded tenants, i.e. changes to initial_split_threshold or
    9289              :         // initial_split_shards are not retroactive for sharded tenants.
    9290           25 :         if initial_split_threshold > 0
    9291           14 :             && shard_count.count() <= 1
    9292           11 :             && max_logical_size > initial_split_threshold
    9293            8 :         {
    9294            8 :             new_shard_count = new_shard_count.max(initial_split_shards);
    9295           17 :         }
    9296              : 
    9297              :         // Clamp to max shards.
    9298           25 :         new_shard_count = new_shard_count.min(max_split_shards);
    9299              : 
    9300              :         // Don't split if we're not increasing the shard count.
    9301           25 :         if new_shard_count <= shard_count.count() {
    9302           10 :             return None;
    9303           15 :         }
    9304              : 
    9305           15 :         Some(ShardCount(new_shard_count))
    9306           25 :     }
    9307              : 
    9308              :     /// Fetches the top tenant shards from every available node, in descending order of
    9309              :     /// max logical size. Offline nodes are skipped, and any errors from available nodes
    9310              :     /// will be logged and ignored.
    9311            0 :     async fn get_top_tenant_shards(
    9312            0 :         &self,
    9313            0 :         request: &TopTenantShardsRequest,
    9314            0 :     ) -> Vec<TopTenantShardItem> {
    9315            0 :         let nodes = self
    9316            0 :             .inner
    9317            0 :             .read()
    9318            0 :             .unwrap()
    9319            0 :             .nodes
    9320            0 :             .values()
    9321            0 :             .filter(|node| node.is_available())
    9322            0 :             .cloned()
    9323            0 :             .collect_vec();
    9324              : 
    9325            0 :         let mut futures = FuturesUnordered::new();
    9326            0 :         for node in nodes {
    9327            0 :             futures.push(async move {
    9328            0 :                 node.with_client_retries(
    9329            0 :                     |client| async move { client.top_tenant_shards(request.clone()).await },
    9330            0 :                     &self.http_client,
    9331            0 :                     &self.config.pageserver_jwt_token,
    9332              :                     3,
    9333              :                     3,
    9334            0 :                     Duration::from_secs(5),
    9335            0 :                     &self.cancel,
    9336              :                 )
    9337            0 :                 .await
    9338            0 :             });
    9339              :         }
    9340              : 
    9341            0 :         let mut top = Vec::new();
    9342            0 :         while let Some(output) = futures.next().await {
    9343            0 :             match output {
    9344            0 :                 Some(Ok(response)) => top.extend(response.shards),
    9345            0 :                 Some(Err(mgmt_api::Error::Cancelled)) => {}
    9346            0 :                 Some(Err(err)) => warn!("failed to fetch top tenants: {err}"),
    9347            0 :                 None => {} // node is shutting down
    9348              :             }
    9349              :         }
    9350              : 
    9351            0 :         top.sort_by_key(|i| i.max_logical_size);
    9352            0 :         top.reverse();
    9353            0 :         top
    9354            0 :     }
    9355              : 
    9356              :     /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
    9357              :     /// also wait for any generated Reconcilers to complete.  Calling this until it returns zero should
    9358              :     /// put the system into a quiescent state where future background reconciliations won't do anything.
    9359            0 :     pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
    9360            0 :         let reconcile_all_result = self.reconcile_all();
    9361            0 :         let mut spawned_reconciles = reconcile_all_result.spawned_reconciles;
    9362            0 :         if reconcile_all_result.can_run_optimizations() {
    9363              :             // Only optimize when we are otherwise idle
    9364            0 :             let optimization_reconciles = self.optimize_all().await;
    9365            0 :             spawned_reconciles += optimization_reconciles;
    9366            0 :         }
    9367              : 
    9368            0 :         let waiters = {
    9369            0 :             let mut waiters = Vec::new();
    9370            0 :             let locked = self.inner.read().unwrap();
    9371            0 :             for (_tenant_shard_id, shard) in locked.tenants.iter() {
    9372            0 :                 if let Some(waiter) = shard.get_waiter() {
    9373            0 :                     waiters.push(waiter);
    9374            0 :                 }
    9375              :             }
    9376            0 :             waiters
    9377              :         };
    9378              : 
    9379            0 :         let waiter_count = waiters.len();
    9380            0 :         match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    9381            0 :             Ok(()) => {}
    9382            0 :             Err(e) => {
    9383            0 :                 if let ReconcileWaitError::Failed(_, reconcile_error) = &e {
    9384            0 :                     match **reconcile_error {
    9385              :                         ReconcileError::Cancel
    9386            0 :                         | ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
    9387            0 :                             // Ignore reconciler cancel errors: this reconciler might have shut down
    9388            0 :                             // because some other change superceded it.  We will return a nonzero number,
    9389            0 :                             // so the caller knows they might have to call again to quiesce the system.
    9390            0 :                         }
    9391              :                         _ => {
    9392            0 :                             return Err(e);
    9393              :                         }
    9394              :                     }
    9395              :                 } else {
    9396            0 :                     return Err(e);
    9397              :                 }
    9398              :             }
    9399              :         };
    9400              : 
    9401            0 :         tracing::info!(
    9402            0 :             "{} reconciles in reconcile_all, {} waiters",
    9403              :             spawned_reconciles,
    9404              :             waiter_count
    9405              :         );
    9406              : 
    9407            0 :         Ok(std::cmp::max(waiter_count, spawned_reconciles))
    9408            0 :     }
    9409              : 
    9410            0 :     async fn stop_reconciliations(&self, reason: StopReconciliationsReason) {
    9411              :         // Cancel all on-going reconciles and wait for them to exit the gate.
    9412            0 :         tracing::info!("{reason}: cancelling and waiting for in-flight reconciles");
    9413            0 :         self.reconcilers_cancel.cancel();
    9414            0 :         self.reconcilers_gate.close().await;
    9415              : 
    9416              :         // Signal the background loop in [`Service::process_results`] to exit once
    9417              :         // it has proccessed the results from all the reconciles we cancelled earlier.
    9418            0 :         tracing::info!("{reason}: processing results from previously in-flight reconciles");
    9419            0 :         self.result_tx.send(ReconcileResultRequest::Stop).ok();
    9420            0 :         self.result_tx.closed().await;
    9421            0 :     }
    9422              : 
    9423            0 :     pub async fn shutdown(&self) {
    9424            0 :         self.stop_reconciliations(StopReconciliationsReason::ShuttingDown)
    9425            0 :             .await;
    9426              : 
    9427              :         // Background tasks hold gate guards: this notifies them of the cancellation and
    9428              :         // waits for them all to complete.
    9429            0 :         tracing::info!("Shutting down: cancelling and waiting for background tasks to exit");
    9430            0 :         self.cancel.cancel();
    9431            0 :         self.gate.close().await;
    9432            0 :     }
    9433              : 
    9434              :     /// Spot check the download lag for a secondary location of a shard.
    9435              :     /// Should be used as a heuristic, since it's not always precise: the
    9436              :     /// secondary might have not downloaded the new heat map yet and, hence,
    9437              :     /// is not aware of the lag.
    9438              :     ///
    9439              :     /// Returns:
    9440              :     /// * Ok(None) if the lag could not be determined from the status,
    9441              :     /// * Ok(Some(_)) if the lag could be determind
    9442              :     /// * Err on failures to query the pageserver.
    9443            0 :     async fn secondary_lag(
    9444            0 :         &self,
    9445            0 :         secondary: &NodeId,
    9446            0 :         tenant_shard_id: TenantShardId,
    9447            0 :     ) -> Result<Option<u64>, mgmt_api::Error> {
    9448            0 :         let nodes = self.inner.read().unwrap().nodes.clone();
    9449            0 :         let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError(
    9450            0 :             StatusCode::NOT_FOUND,
    9451            0 :             format!("Node with id {secondary} not found"),
    9452            0 :         ))?;
    9453              : 
    9454            0 :         match node
    9455            0 :             .with_client_retries(
    9456            0 :                 |client| async move { client.tenant_secondary_status(tenant_shard_id).await },
    9457            0 :                 &self.http_client,
    9458            0 :                 &self.config.pageserver_jwt_token,
    9459              :                 1,
    9460              :                 3,
    9461            0 :                 Duration::from_millis(250),
    9462            0 :                 &self.cancel,
    9463              :             )
    9464            0 :             .await
    9465              :         {
    9466            0 :             Some(Ok(status)) => match status.heatmap_mtime {
    9467            0 :                 Some(_) => Ok(Some(status.bytes_total - status.bytes_downloaded)),
    9468            0 :                 None => Ok(None),
    9469              :             },
    9470            0 :             Some(Err(e)) => Err(e),
    9471            0 :             None => Err(mgmt_api::Error::Cancelled),
    9472              :         }
    9473            0 :     }
    9474              : 
    9475              :     /// Drain a node by moving the shards attached to it as primaries.
    9476              :     /// This is a long running operation and it should run as a separate Tokio task.
    9477            0 :     pub(crate) async fn drain_node(
    9478            0 :         self: &Arc<Self>,
    9479            0 :         node_id: NodeId,
    9480            0 :         cancel: CancellationToken,
    9481            0 :     ) -> Result<(), OperationError> {
    9482              :         const MAX_SECONDARY_LAG_BYTES_DEFAULT: u64 = 256 * 1024 * 1024;
    9483            0 :         let max_secondary_lag_bytes = self
    9484            0 :             .config
    9485            0 :             .max_secondary_lag_bytes
    9486            0 :             .unwrap_or(MAX_SECONDARY_LAG_BYTES_DEFAULT);
    9487              : 
    9488              :         // By default, live migrations are generous about the wait time for getting
    9489              :         // the secondary location up to speed. When draining, give up earlier in order
    9490              :         // to not stall the operation when a cold secondary is encountered.
    9491              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(30);
    9492              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    9493            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    9494            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    9495            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    9496            0 :             .build();
    9497              : 
    9498            0 :         let mut waiters = Vec::new();
    9499              : 
    9500            0 :         let mut tid_iter = create_shared_shard_iterator(self.clone());
    9501              : 
    9502            0 :         while !tid_iter.finished() {
    9503            0 :             if cancel.is_cancelled() {
    9504            0 :                 match self
    9505            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9506            0 :                     .await
    9507              :                 {
    9508            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9509            0 :                     Err(err) => {
    9510            0 :                         return Err(OperationError::FinalizeError(
    9511            0 :                             format!(
    9512            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9513            0 :                             )
    9514            0 :                             .into(),
    9515            0 :                         ));
    9516              :                     }
    9517              :                 }
    9518            0 :             }
    9519              : 
    9520            0 :             operation_utils::validate_node_state(
    9521            0 :                 &node_id,
    9522            0 :                 self.inner.read().unwrap().nodes.clone(),
    9523            0 :                 NodeSchedulingPolicy::Draining,
    9524            0 :             )?;
    9525              : 
    9526            0 :             while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    9527            0 :                 let tid = match tid_iter.next() {
    9528            0 :                     Some(tid) => tid,
    9529              :                     None => {
    9530            0 :                         break;
    9531              :                     }
    9532              :                 };
    9533              : 
    9534            0 :                 let tid_drain = TenantShardDrain {
    9535            0 :                     drained_node: node_id,
    9536            0 :                     tenant_shard_id: tid,
    9537            0 :                 };
    9538              : 
    9539            0 :                 let dest_node_id = {
    9540            0 :                     let locked = self.inner.read().unwrap();
    9541              : 
    9542            0 :                     match tid_drain
    9543            0 :                         .tenant_shard_eligible_for_drain(&locked.tenants, &locked.scheduler)
    9544              :                     {
    9545            0 :                         Some(node_id) => node_id,
    9546              :                         None => {
    9547            0 :                             continue;
    9548              :                         }
    9549              :                     }
    9550              :                 };
    9551              : 
    9552            0 :                 match self.secondary_lag(&dest_node_id, tid).await {
    9553            0 :                     Ok(Some(lag)) if lag <= max_secondary_lag_bytes => {
    9554            0 :                         // The secondary is reasonably up to date.
    9555            0 :                         // Migrate to it
    9556            0 :                     }
    9557            0 :                     Ok(Some(lag)) => {
    9558            0 :                         tracing::info!(
    9559            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9560            0 :                             "Secondary on node {dest_node_id} is lagging by {lag}. Skipping reconcile."
    9561              :                         );
    9562            0 :                         continue;
    9563              :                     }
    9564              :                     Ok(None) => {
    9565            0 :                         tracing::info!(
    9566            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9567            0 :                             "Could not determine lag for secondary on node {dest_node_id}. Skipping reconcile."
    9568              :                         );
    9569            0 :                         continue;
    9570              :                     }
    9571            0 :                     Err(err) => {
    9572            0 :                         tracing::warn!(
    9573            0 :                             tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9574            0 :                             "Failed to get secondary lag from node {dest_node_id}. Skipping reconcile: {err}"
    9575              :                         );
    9576            0 :                         continue;
    9577              :                     }
    9578              :                 }
    9579              : 
    9580              :                 {
    9581            0 :                     let mut locked = self.inner.write().unwrap();
    9582            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    9583            0 :                     let rescheduled = tid_drain.reschedule_to_secondary(
    9584            0 :                         dest_node_id,
    9585            0 :                         tenants,
    9586            0 :                         scheduler,
    9587            0 :                         nodes,
    9588            0 :                     )?;
    9589              : 
    9590            0 :                     if let Some(tenant_shard) = rescheduled {
    9591            0 :                         let waiter = self.maybe_configured_reconcile_shard(
    9592            0 :                             tenant_shard,
    9593            0 :                             nodes,
    9594            0 :                             reconciler_config,
    9595            0 :                         );
    9596            0 :                         if let Some(some) = waiter {
    9597            0 :                             waiters.push(some);
    9598            0 :                         }
    9599            0 :                     }
    9600              :                 }
    9601              :             }
    9602              : 
    9603            0 :             waiters = self
    9604            0 :                 .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT)
    9605            0 :                 .await;
    9606              : 
    9607            0 :             failpoint_support::sleep_millis_async!("sleepy-drain-loop", &cancel);
    9608              :         }
    9609              : 
    9610            0 :         while !waiters.is_empty() {
    9611            0 :             if cancel.is_cancelled() {
    9612            0 :                 match self
    9613            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9614            0 :                     .await
    9615              :                 {
    9616            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9617            0 :                     Err(err) => {
    9618            0 :                         return Err(OperationError::FinalizeError(
    9619            0 :                             format!(
    9620            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9621            0 :                             )
    9622            0 :                             .into(),
    9623            0 :                         ));
    9624              :                     }
    9625              :                 }
    9626            0 :             }
    9627              : 
    9628            0 :             tracing::info!("Awaiting {} pending drain reconciliations", waiters.len());
    9629              : 
    9630            0 :             waiters = self
    9631            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    9632            0 :                 .await;
    9633              :         }
    9634              : 
    9635              :         // At this point we have done the best we could to drain shards from this node.
    9636              :         // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]`
    9637              :         // to complete the drain.
    9638            0 :         if let Err(err) = self
    9639            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart))
    9640            0 :             .await
    9641              :         {
    9642              :             // This is not fatal. Anything that is polling the node scheduling policy to detect
    9643              :             // the end of the drain operations will hang, but all such places should enforce an
    9644              :             // overall timeout. The scheduling policy will be updated upon node re-attach and/or
    9645              :             // by the counterpart fill operation.
    9646            0 :             return Err(OperationError::FinalizeError(
    9647            0 :                 format!(
    9648            0 :                     "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}"
    9649            0 :                 )
    9650            0 :                 .into(),
    9651            0 :             ));
    9652            0 :         }
    9653              : 
    9654            0 :         Ok(())
    9655            0 :     }
    9656              : 
    9657              :     /// Create a node fill plan (pick secondaries to promote), based on:
    9658              :     /// 1. Shards which have a secondary on this node, and this node is in their home AZ, and are currently attached to a node
    9659              :     ///    outside their home AZ, should be migrated back here.
    9660              :     /// 2. If after step 1 we have not migrated enough shards for this node to have its fair share of
    9661              :     ///    attached shards, we will promote more shards from the nodes with the most attached shards, unless
    9662              :     ///    those shards have a home AZ that doesn't match the node we're filling.
    9663            0 :     fn fill_node_plan(&self, node_id: NodeId) -> Vec<TenantShardId> {
    9664            0 :         let mut locked = self.inner.write().unwrap();
    9665            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    9666              : 
    9667            0 :         let node_az = nodes
    9668            0 :             .get(&node_id)
    9669            0 :             .expect("Node must exist")
    9670            0 :             .get_availability_zone_id()
    9671            0 :             .clone();
    9672              : 
    9673              :         // The tenant shard IDs that we plan to promote from secondary to attached on this node
    9674            0 :         let mut plan = Vec::new();
    9675              : 
    9676              :         // Collect shards which do not have a preferred AZ & are elegible for moving in stage 2
    9677            0 :         let mut free_tids_by_node: HashMap<NodeId, Vec<TenantShardId>> = HashMap::new();
    9678              : 
    9679              :         // Don't respect AZ preferences if there is only one AZ.  This comes up in tests, but it could
    9680              :         // conceivably come up in real life if deploying a single-AZ region intentionally.
    9681            0 :         let respect_azs = nodes
    9682            0 :             .values()
    9683            0 :             .map(|n| n.get_availability_zone_id())
    9684            0 :             .unique()
    9685            0 :             .count()
    9686              :             > 1;
    9687              : 
    9688              :         // Step 1: collect all shards that we are required to migrate back to this node because their AZ preference
    9689              :         // requires it.
    9690            0 :         for (tsid, tenant_shard) in tenants {
    9691            0 :             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    9692              :                 // Shard doesn't have a secondary on this node, ignore it.
    9693            0 :                 continue;
    9694            0 :             }
    9695              : 
    9696              :             // AZ check: when filling nodes after a restart, our intent is to move _back_ the
    9697              :             // shards which belong on this node, not to promote shards whose scheduling preference
    9698              :             // would be on their currently attached node.  So will avoid promoting shards whose
    9699              :             // home AZ doesn't match the AZ of the node we're filling.
    9700            0 :             match tenant_shard.preferred_az() {
    9701            0 :                 _ if !respect_azs => {
    9702            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    9703            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    9704            0 :                     }
    9705              :                 }
    9706              :                 None => {
    9707              :                     // Shard doesn't have an AZ preference: it is elegible to be moved, but we
    9708              :                     // will only do so if our target shard count requires it.
    9709            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    9710            0 :                         free_tids_by_node.entry(*primary).or_default().push(*tsid);
    9711            0 :                     }
    9712              :                 }
    9713            0 :                 Some(az) if az == &node_az => {
    9714              :                     // This shard's home AZ is equal to the node we're filling: it should
    9715              :                     // be moved back to this node as part of filling, unless its currently
    9716              :                     // attached location is also in its home AZ.
    9717            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    9718            0 :                         if nodes
    9719            0 :                             .get(primary)
    9720            0 :                             .expect("referenced node must exist")
    9721            0 :                             .get_availability_zone_id()
    9722            0 :                             != tenant_shard
    9723            0 :                                 .preferred_az()
    9724            0 :                                 .expect("tenant must have an AZ preference")
    9725              :                         {
    9726            0 :                             plan.push(*tsid)
    9727            0 :                         }
    9728              :                     } else {
    9729            0 :                         plan.push(*tsid)
    9730              :                     }
    9731              :                 }
    9732            0 :                 Some(_) => {
    9733            0 :                     // This shard's home AZ is somewhere other than the node we're filling,
    9734            0 :                     // it may not be moved back to this node as part of filling.  Ignore it
    9735            0 :                 }
    9736              :             }
    9737              :         }
    9738              : 
    9739              :         // Step 2: also promote any AZ-agnostic shards as required to achieve the target number of attachments
    9740            0 :         let fill_requirement = locked.scheduler.compute_fill_requirement(node_id);
    9741              : 
    9742            0 :         let expected_attached = locked.scheduler.expected_attached_shard_count();
    9743            0 :         let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count();
    9744              : 
    9745            0 :         let mut promoted_per_tenant: HashMap<TenantId, usize> = HashMap::new();
    9746              : 
    9747            0 :         for (node_id, attached) in nodes_by_load {
    9748            0 :             let available = locked.nodes.get(&node_id).is_some_and(|n| n.is_available());
    9749            0 :             if !available {
    9750            0 :                 continue;
    9751            0 :             }
    9752              : 
    9753            0 :             if plan.len() >= fill_requirement
    9754            0 :                 || free_tids_by_node.is_empty()
    9755            0 :                 || attached <= expected_attached
    9756              :             {
    9757            0 :                 break;
    9758            0 :             }
    9759              : 
    9760            0 :             let can_take = attached - expected_attached;
    9761            0 :             let needed = fill_requirement - plan.len();
    9762            0 :             let mut take = std::cmp::min(can_take, needed);
    9763              : 
    9764            0 :             let mut remove_node = false;
    9765            0 :             while take > 0 {
    9766            0 :                 match free_tids_by_node.get_mut(&node_id) {
    9767            0 :                     Some(tids) => match tids.pop() {
    9768            0 :                         Some(tid) => {
    9769            0 :                             let max_promote_for_tenant = std::cmp::max(
    9770            0 :                                 tid.shard_count.count() as usize / locked.nodes.len(),
    9771              :                                 1,
    9772              :                             );
    9773            0 :                             let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default();
    9774            0 :                             if *promoted < max_promote_for_tenant {
    9775            0 :                                 plan.push(tid);
    9776            0 :                                 *promoted += 1;
    9777            0 :                                 take -= 1;
    9778            0 :                             }
    9779              :                         }
    9780              :                         None => {
    9781            0 :                             remove_node = true;
    9782            0 :                             break;
    9783              :                         }
    9784              :                     },
    9785              :                     None => {
    9786            0 :                         break;
    9787              :                     }
    9788              :                 }
    9789              :             }
    9790              : 
    9791            0 :             if remove_node {
    9792            0 :                 free_tids_by_node.remove(&node_id);
    9793            0 :             }
    9794              :         }
    9795              : 
    9796            0 :         plan
    9797            0 :     }
    9798              : 
    9799              :     /// Fill a node by promoting its secondaries until the cluster is balanced
    9800              :     /// with regards to attached shard counts. Note that this operation only
    9801              :     /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`].
    9802              :     /// This is a long running operation and it should run as a separate Tokio task.
    9803            0 :     pub(crate) async fn fill_node(
    9804            0 :         &self,
    9805            0 :         node_id: NodeId,
    9806            0 :         cancel: CancellationToken,
    9807            0 :     ) -> Result<(), OperationError> {
    9808              :         const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(30);
    9809              :         const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
    9810            0 :         let reconciler_config = ReconcilerConfigBuilder::new(ReconcilerPriority::Normal)
    9811            0 :             .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
    9812            0 :             .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
    9813            0 :             .build();
    9814              : 
    9815            0 :         let mut tids_to_promote = self.fill_node_plan(node_id);
    9816            0 :         let mut waiters = Vec::new();
    9817              : 
    9818              :         // Execute the plan we've composed above. Before aplying each move from the plan,
    9819              :         // we validate to ensure that it has not gone stale in the meantime.
    9820            0 :         while !tids_to_promote.is_empty() {
    9821            0 :             if cancel.is_cancelled() {
    9822            0 :                 match self
    9823            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9824            0 :                     .await
    9825              :                 {
    9826            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9827            0 :                     Err(err) => {
    9828            0 :                         return Err(OperationError::FinalizeError(
    9829            0 :                             format!(
    9830            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9831            0 :                             )
    9832            0 :                             .into(),
    9833            0 :                         ));
    9834              :                     }
    9835              :                 }
    9836            0 :             }
    9837              : 
    9838              :             {
    9839            0 :                 let mut locked = self.inner.write().unwrap();
    9840            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    9841              : 
    9842            0 :                 let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
    9843            0 :                     format!("node {node_id} was removed").into(),
    9844            0 :                 ))?;
    9845              : 
    9846            0 :                 let current_policy = node.get_scheduling();
    9847            0 :                 if !matches!(current_policy, NodeSchedulingPolicy::Filling) {
    9848              :                     // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
    9849              :                     // about it
    9850            0 :                     return Err(OperationError::NodeStateChanged(
    9851            0 :                         format!("node {node_id} changed state to {current_policy:?}").into(),
    9852            0 :                     ));
    9853            0 :                 }
    9854              : 
    9855            0 :                 while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    9856            0 :                     if let Some(tid) = tids_to_promote.pop() {
    9857            0 :                         if let Some(tenant_shard) = tenants.get_mut(&tid) {
    9858              :                             // If the node being filled is not a secondary anymore,
    9859              :                             // skip the promotion.
    9860            0 :                             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    9861            0 :                                 continue;
    9862            0 :                             }
    9863              : 
    9864            0 :                             let previously_attached_to = *tenant_shard.intent.get_attached();
    9865            0 :                             match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) {
    9866            0 :                                 Err(e) => {
    9867            0 :                                     tracing::warn!(
    9868            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9869            0 :                                         "Scheduling error when filling pageserver {} : {e}", node_id
    9870              :                                     );
    9871              :                                 }
    9872              :                                 Ok(()) => {
    9873            0 :                                     tracing::info!(
    9874            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    9875            0 :                                         "Rescheduled shard while filling node {}: {:?} -> {}",
    9876              :                                         node_id,
    9877              :                                         previously_attached_to,
    9878              :                                         node_id
    9879              :                                     );
    9880              : 
    9881            0 :                                     if let Some(waiter) = self.maybe_configured_reconcile_shard(
    9882            0 :                                         tenant_shard,
    9883            0 :                                         nodes,
    9884            0 :                                         reconciler_config,
    9885            0 :                                     ) {
    9886            0 :                                         waiters.push(waiter);
    9887            0 :                                     }
    9888              :                                 }
    9889              :                             }
    9890            0 :                         }
    9891              :                     } else {
    9892            0 :                         break;
    9893              :                     }
    9894              :                 }
    9895              :             }
    9896              : 
    9897            0 :             waiters = self
    9898            0 :                 .await_waiters_remainder(waiters, WAITER_OPERATION_POLL_TIMEOUT)
    9899            0 :                 .await;
    9900              :         }
    9901              : 
    9902            0 :         while !waiters.is_empty() {
    9903            0 :             if cancel.is_cancelled() {
    9904            0 :                 match self
    9905            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9906            0 :                     .await
    9907              :                 {
    9908            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    9909            0 :                     Err(err) => {
    9910            0 :                         return Err(OperationError::FinalizeError(
    9911            0 :                             format!(
    9912            0 :                                 "Failed to finalise drain cancel of {node_id} by setting scheduling policy to Active: {err}"
    9913            0 :                             )
    9914            0 :                             .into(),
    9915            0 :                         ));
    9916              :                     }
    9917              :                 }
    9918            0 :             }
    9919              : 
    9920            0 :             tracing::info!("Awaiting {} pending fill reconciliations", waiters.len());
    9921              : 
    9922            0 :             waiters = self
    9923            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    9924            0 :                 .await;
    9925              :         }
    9926              : 
    9927            0 :         if let Err(err) = self
    9928            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    9929            0 :             .await
    9930              :         {
    9931              :             // This isn't a huge issue since the filling process starts upon request. However, it
    9932              :             // will prevent the next drain from starting. The only case in which this can fail
    9933              :             // is database unavailability. Such a case will require manual intervention.
    9934            0 :             return Err(OperationError::FinalizeError(
    9935            0 :                 format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}")
    9936            0 :                     .into(),
    9937            0 :             ));
    9938            0 :         }
    9939              : 
    9940            0 :         Ok(())
    9941            0 :     }
    9942              : 
    9943              :     /// Updates scrubber metadata health check results.
    9944            0 :     pub(crate) async fn metadata_health_update(
    9945            0 :         &self,
    9946            0 :         update_req: MetadataHealthUpdateRequest,
    9947            0 :     ) -> Result<(), ApiError> {
    9948            0 :         let now = chrono::offset::Utc::now();
    9949            0 :         let (healthy_records, unhealthy_records) = {
    9950            0 :             let locked = self.inner.read().unwrap();
    9951            0 :             let healthy_records = update_req
    9952            0 :                 .healthy_tenant_shards
    9953            0 :                 .into_iter()
    9954              :                 // Retain only health records associated with tenant shards managed by storage controller.
    9955            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    9956            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, true, now))
    9957            0 :                 .collect();
    9958            0 :             let unhealthy_records = update_req
    9959            0 :                 .unhealthy_tenant_shards
    9960            0 :                 .into_iter()
    9961            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    9962            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, false, now))
    9963            0 :                 .collect();
    9964              : 
    9965            0 :             (healthy_records, unhealthy_records)
    9966              :         };
    9967              : 
    9968            0 :         self.persistence
    9969            0 :             .update_metadata_health_records(healthy_records, unhealthy_records, now)
    9970            0 :             .await?;
    9971            0 :         Ok(())
    9972            0 :     }
    9973              : 
    9974              :     /// Lists the tenant shards that has unhealthy metadata status.
    9975            0 :     pub(crate) async fn metadata_health_list_unhealthy(
    9976            0 :         &self,
    9977            0 :     ) -> Result<Vec<TenantShardId>, ApiError> {
    9978            0 :         let result = self
    9979            0 :             .persistence
    9980            0 :             .list_unhealthy_metadata_health_records()
    9981            0 :             .await?
    9982            0 :             .iter()
    9983            0 :             .map(|p| p.get_tenant_shard_id().unwrap())
    9984            0 :             .collect();
    9985              : 
    9986            0 :         Ok(result)
    9987            0 :     }
    9988              : 
    9989              :     /// Lists the tenant shards that have not been scrubbed for some duration.
    9990            0 :     pub(crate) async fn metadata_health_list_outdated(
    9991            0 :         &self,
    9992            0 :         not_scrubbed_for: Duration,
    9993            0 :     ) -> Result<Vec<MetadataHealthRecord>, ApiError> {
    9994            0 :         let earlier = chrono::offset::Utc::now() - not_scrubbed_for;
    9995            0 :         let result = self
    9996            0 :             .persistence
    9997            0 :             .list_outdated_metadata_health_records(earlier)
    9998            0 :             .await?
    9999            0 :             .into_iter()
   10000            0 :             .map(|record| record.into())
   10001            0 :             .collect();
   10002            0 :         Ok(result)
   10003            0 :     }
   10004              : 
   10005            0 :     pub(crate) fn get_leadership_status(&self) -> LeadershipStatus {
   10006            0 :         self.inner.read().unwrap().get_leadership_status()
   10007            0 :     }
   10008              : 
   10009              :     /// Handler for step down requests
   10010              :     ///
   10011              :     /// Step down runs in separate task since once it's called it should
   10012              :     /// be driven to completion. Subsequent requests will wait on the same
   10013              :     /// step down task.
   10014            0 :     pub(crate) async fn step_down(self: &Arc<Self>) -> GlobalObservedState {
   10015            0 :         let handle = self.step_down_barrier.get_or_init(|| {
   10016            0 :             let step_down_self = self.clone();
   10017            0 :             let (tx, rx) = tokio::sync::watch::channel::<Option<GlobalObservedState>>(None);
   10018            0 :             tokio::spawn(async move {
   10019            0 :                 let state = step_down_self.step_down_task().await;
   10020            0 :                 tx.send(Some(state))
   10021            0 :                     .expect("Task Arc<Service> keeps receiver alive");
   10022            0 :             });
   10023              : 
   10024            0 :             rx
   10025            0 :         });
   10026              : 
   10027            0 :         handle
   10028            0 :             .clone()
   10029            0 :             .wait_for(|observed_state| observed_state.is_some())
   10030            0 :             .await
   10031            0 :             .expect("Task Arc<Service> keeps sender alive")
   10032            0 :             .deref()
   10033            0 :             .clone()
   10034            0 :             .expect("Checked above")
   10035            0 :     }
   10036              : 
   10037            0 :     async fn step_down_task(&self) -> GlobalObservedState {
   10038            0 :         tracing::info!("Received step down request from peer");
   10039            0 :         failpoint_support::sleep_millis_async!("sleep-on-step-down-handling");
   10040              : 
   10041            0 :         self.inner.write().unwrap().step_down();
   10042              : 
   10043            0 :         let stop_reconciliations =
   10044            0 :             self.stop_reconciliations(StopReconciliationsReason::SteppingDown);
   10045            0 :         let mut stop_reconciliations = std::pin::pin!(stop_reconciliations);
   10046              : 
   10047            0 :         let started_at = Instant::now();
   10048              : 
   10049              :         // Wait for reconciliations to stop and warn if that's taking a long time
   10050              :         loop {
   10051            0 :             tokio::select! {
   10052            0 :                 _ = &mut stop_reconciliations => {
   10053            0 :                     tracing::info!("Reconciliations stopped, proceeding with step down");
   10054            0 :                     break;
   10055              :                 }
   10056            0 :                 _ = tokio::time::sleep(Duration::from_secs(10)) => {
   10057            0 :                     tracing::warn!(
   10058            0 :                         elapsed_sec=%started_at.elapsed().as_secs(),
   10059            0 :                         "Stopping reconciliations during step down is taking too long"
   10060              :                     );
   10061              :                 }
   10062              :             }
   10063              :         }
   10064              : 
   10065            0 :         let mut global_observed = GlobalObservedState::default();
   10066            0 :         let locked = self.inner.read().unwrap();
   10067            0 :         for (tid, tenant_shard) in locked.tenants.iter() {
   10068            0 :             global_observed
   10069            0 :                 .0
   10070            0 :                 .insert(*tid, tenant_shard.observed.clone());
   10071            0 :         }
   10072              : 
   10073            0 :         global_observed
   10074            0 :     }
   10075              : 
   10076            0 :     pub(crate) async fn update_shards_preferred_azs(
   10077            0 :         &self,
   10078            0 :         req: ShardsPreferredAzsRequest,
   10079            0 :     ) -> Result<ShardsPreferredAzsResponse, ApiError> {
   10080            0 :         let preferred_azs = req.preferred_az_ids.into_iter().collect::<Vec<_>>();
   10081            0 :         let updated = self
   10082            0 :             .persistence
   10083            0 :             .set_tenant_shard_preferred_azs(preferred_azs)
   10084            0 :             .await
   10085            0 :             .map_err(|err| {
   10086            0 :                 ApiError::InternalServerError(anyhow::anyhow!(
   10087            0 :                     "Failed to persist preferred AZs: {err}"
   10088            0 :                 ))
   10089            0 :             })?;
   10090              : 
   10091            0 :         let mut updated_in_mem_and_db = Vec::default();
   10092              : 
   10093            0 :         let mut locked = self.inner.write().unwrap();
   10094            0 :         let state = locked.deref_mut();
   10095            0 :         for (tid, az_id) in updated {
   10096            0 :             let shard = state.tenants.get_mut(&tid);
   10097            0 :             if let Some(shard) = shard {
   10098            0 :                 shard.set_preferred_az(&mut state.scheduler, az_id);
   10099            0 :                 updated_in_mem_and_db.push(tid);
   10100            0 :             }
   10101              :         }
   10102              : 
   10103            0 :         Ok(ShardsPreferredAzsResponse {
   10104            0 :             updated: updated_in_mem_and_db,
   10105            0 :         })
   10106            0 :     }
   10107              : }
   10108              : 
   10109              : #[cfg(test)]
   10110              : mod tests {
   10111              :     use super::*;
   10112              : 
   10113              :     /// Tests Service::compute_split_shards. For readability, this specifies sizes in GBs rather
   10114              :     /// than bytes. Note that max_logical_size is the total logical size of the largest timeline
   10115              :     /// summed across all shards.
   10116              :     #[test]
   10117            1 :     fn compute_split_shards() {
   10118              :         // Size-based split: two shards have a 500 GB timeline, which need to split into 8 shards
   10119              :         // that are <= 64 GB,
   10120            1 :         assert_eq!(
   10121            1 :             Service::compute_split_shards(ShardSplitInputs {
   10122            1 :                 shard_count: ShardCount(2),
   10123            1 :                 max_logical_size: 500,
   10124            1 :                 split_threshold: 64,
   10125            1 :                 max_split_shards: 16,
   10126            1 :                 initial_split_threshold: 0,
   10127            1 :                 initial_split_shards: 0,
   10128            1 :             }),
   10129              :             Some(ShardCount(8))
   10130              :         );
   10131              : 
   10132              :         // Size-based split: noop at or below threshold, fires above.
   10133            1 :         assert_eq!(
   10134            1 :             Service::compute_split_shards(ShardSplitInputs {
   10135            1 :                 shard_count: ShardCount(2),
   10136            1 :                 max_logical_size: 127,
   10137            1 :                 split_threshold: 64,
   10138            1 :                 max_split_shards: 16,
   10139            1 :                 initial_split_threshold: 0,
   10140            1 :                 initial_split_shards: 0,
   10141            1 :             }),
   10142              :             None,
   10143              :         );
   10144            1 :         assert_eq!(
   10145            1 :             Service::compute_split_shards(ShardSplitInputs {
   10146            1 :                 shard_count: ShardCount(2),
   10147            1 :                 max_logical_size: 128,
   10148            1 :                 split_threshold: 64,
   10149            1 :                 max_split_shards: 16,
   10150            1 :                 initial_split_threshold: 0,
   10151            1 :                 initial_split_shards: 0,
   10152            1 :             }),
   10153              :             None,
   10154              :         );
   10155            1 :         assert_eq!(
   10156            1 :             Service::compute_split_shards(ShardSplitInputs {
   10157            1 :                 shard_count: ShardCount(2),
   10158            1 :                 max_logical_size: 129,
   10159            1 :                 split_threshold: 64,
   10160            1 :                 max_split_shards: 16,
   10161            1 :                 initial_split_threshold: 0,
   10162            1 :                 initial_split_shards: 0,
   10163            1 :             }),
   10164              :             Some(ShardCount(4)),
   10165              :         );
   10166              : 
   10167              :         // Size-based split: clamped to max_split_shards.
   10168            1 :         assert_eq!(
   10169            1 :             Service::compute_split_shards(ShardSplitInputs {
   10170            1 :                 shard_count: ShardCount(2),
   10171            1 :                 max_logical_size: 10000,
   10172            1 :                 split_threshold: 64,
   10173            1 :                 max_split_shards: 16,
   10174            1 :                 initial_split_threshold: 0,
   10175            1 :                 initial_split_shards: 0,
   10176            1 :             }),
   10177              :             Some(ShardCount(16))
   10178              :         );
   10179              : 
   10180              :         // Size-based split: tenant already at or beyond max_split_shards is not split.
   10181            1 :         assert_eq!(
   10182            1 :             Service::compute_split_shards(ShardSplitInputs {
   10183            1 :                 shard_count: ShardCount(16),
   10184            1 :                 max_logical_size: 10000,
   10185            1 :                 split_threshold: 64,
   10186            1 :                 max_split_shards: 16,
   10187            1 :                 initial_split_threshold: 0,
   10188            1 :                 initial_split_shards: 0,
   10189            1 :             }),
   10190              :             None
   10191              :         );
   10192              : 
   10193            1 :         assert_eq!(
   10194            1 :             Service::compute_split_shards(ShardSplitInputs {
   10195            1 :                 shard_count: ShardCount(32),
   10196            1 :                 max_logical_size: 10000,
   10197            1 :                 split_threshold: 64,
   10198            1 :                 max_split_shards: 16,
   10199            1 :                 initial_split_threshold: 0,
   10200            1 :                 initial_split_shards: 0,
   10201            1 :             }),
   10202              :             None
   10203              :         );
   10204              : 
   10205              :         // Size-based split: a non-power-of-2 shard count is normalized to power-of-2 if it
   10206              :         // exceeds split_threshold (i.e. a 3-shard tenant splits into 8, not 6).
   10207            1 :         assert_eq!(
   10208            1 :             Service::compute_split_shards(ShardSplitInputs {
   10209            1 :                 shard_count: ShardCount(3),
   10210            1 :                 max_logical_size: 320,
   10211            1 :                 split_threshold: 64,
   10212            1 :                 max_split_shards: 16,
   10213            1 :                 initial_split_threshold: 0,
   10214            1 :                 initial_split_shards: 0,
   10215            1 :             }),
   10216              :             Some(ShardCount(8))
   10217              :         );
   10218              : 
   10219              :         // Size-based split: a non-power-of-2 shard count is not normalized to power-of-2 if the
   10220              :         // existing shards are below or at split_threshold, but splits into 4 if it exceeds it.
   10221            1 :         assert_eq!(
   10222            1 :             Service::compute_split_shards(ShardSplitInputs {
   10223            1 :                 shard_count: ShardCount(3),
   10224            1 :                 max_logical_size: 191,
   10225            1 :                 split_threshold: 64,
   10226            1 :                 max_split_shards: 16,
   10227            1 :                 initial_split_threshold: 0,
   10228            1 :                 initial_split_shards: 0,
   10229            1 :             }),
   10230              :             None
   10231              :         );
   10232            1 :         assert_eq!(
   10233            1 :             Service::compute_split_shards(ShardSplitInputs {
   10234            1 :                 shard_count: ShardCount(3),
   10235            1 :                 max_logical_size: 192,
   10236            1 :                 split_threshold: 64,
   10237            1 :                 max_split_shards: 16,
   10238            1 :                 initial_split_threshold: 0,
   10239            1 :                 initial_split_shards: 0,
   10240            1 :             }),
   10241              :             None
   10242              :         );
   10243            1 :         assert_eq!(
   10244            1 :             Service::compute_split_shards(ShardSplitInputs {
   10245            1 :                 shard_count: ShardCount(3),
   10246            1 :                 max_logical_size: 193,
   10247            1 :                 split_threshold: 64,
   10248            1 :                 max_split_shards: 16,
   10249            1 :                 initial_split_threshold: 0,
   10250            1 :                 initial_split_shards: 0,
   10251            1 :             }),
   10252              :             Some(ShardCount(4))
   10253              :         );
   10254              : 
   10255              :         // Initial split: tenant has a 10 GB timeline, split into 4 shards.
   10256            1 :         assert_eq!(
   10257            1 :             Service::compute_split_shards(ShardSplitInputs {
   10258            1 :                 shard_count: ShardCount(1),
   10259            1 :                 max_logical_size: 10,
   10260            1 :                 split_threshold: 0,
   10261            1 :                 max_split_shards: 16,
   10262            1 :                 initial_split_threshold: 8,
   10263            1 :                 initial_split_shards: 4,
   10264            1 :             }),
   10265              :             Some(ShardCount(4))
   10266              :         );
   10267              : 
   10268              :         // Initial split: 0 ShardCount is equivalent to 1.
   10269            1 :         assert_eq!(
   10270            1 :             Service::compute_split_shards(ShardSplitInputs {
   10271            1 :                 shard_count: ShardCount(0),
   10272            1 :                 max_logical_size: 10,
   10273            1 :                 split_threshold: 0,
   10274            1 :                 max_split_shards: 16,
   10275            1 :                 initial_split_threshold: 8,
   10276            1 :                 initial_split_shards: 4,
   10277            1 :             }),
   10278              :             Some(ShardCount(4))
   10279              :         );
   10280              : 
   10281              :         // Initial split: at or below threshold is noop.
   10282            1 :         assert_eq!(
   10283            1 :             Service::compute_split_shards(ShardSplitInputs {
   10284            1 :                 shard_count: ShardCount(1),
   10285            1 :                 max_logical_size: 7,
   10286            1 :                 split_threshold: 0,
   10287            1 :                 max_split_shards: 16,
   10288            1 :                 initial_split_threshold: 8,
   10289            1 :                 initial_split_shards: 4,
   10290            1 :             }),
   10291              :             None,
   10292              :         );
   10293            1 :         assert_eq!(
   10294            1 :             Service::compute_split_shards(ShardSplitInputs {
   10295            1 :                 shard_count: ShardCount(1),
   10296            1 :                 max_logical_size: 8,
   10297            1 :                 split_threshold: 0,
   10298            1 :                 max_split_shards: 16,
   10299            1 :                 initial_split_threshold: 8,
   10300            1 :                 initial_split_shards: 4,
   10301            1 :             }),
   10302              :             None,
   10303              :         );
   10304            1 :         assert_eq!(
   10305            1 :             Service::compute_split_shards(ShardSplitInputs {
   10306            1 :                 shard_count: ShardCount(1),
   10307            1 :                 max_logical_size: 9,
   10308            1 :                 split_threshold: 0,
   10309            1 :                 max_split_shards: 16,
   10310            1 :                 initial_split_threshold: 8,
   10311            1 :                 initial_split_shards: 4,
   10312            1 :             }),
   10313              :             Some(ShardCount(4))
   10314              :         );
   10315              : 
   10316              :         // Initial split: already sharded tenant is not affected, even if above threshold and below
   10317              :         // shard count.
   10318            1 :         assert_eq!(
   10319            1 :             Service::compute_split_shards(ShardSplitInputs {
   10320            1 :                 shard_count: ShardCount(2),
   10321            1 :                 max_logical_size: 20,
   10322            1 :                 split_threshold: 0,
   10323            1 :                 max_split_shards: 16,
   10324            1 :                 initial_split_threshold: 8,
   10325            1 :                 initial_split_shards: 4,
   10326            1 :             }),
   10327              :             None,
   10328              :         );
   10329              : 
   10330              :         // Initial split: clamped to max_shards.
   10331            1 :         assert_eq!(
   10332            1 :             Service::compute_split_shards(ShardSplitInputs {
   10333            1 :                 shard_count: ShardCount(1),
   10334            1 :                 max_logical_size: 10,
   10335            1 :                 split_threshold: 0,
   10336            1 :                 max_split_shards: 3,
   10337            1 :                 initial_split_threshold: 8,
   10338            1 :                 initial_split_shards: 4,
   10339            1 :             }),
   10340              :             Some(ShardCount(3)),
   10341              :         );
   10342              : 
   10343              :         // Initial+size split: tenant eligible for both will use the larger shard count.
   10344            1 :         assert_eq!(
   10345            1 :             Service::compute_split_shards(ShardSplitInputs {
   10346            1 :                 shard_count: ShardCount(1),
   10347            1 :                 max_logical_size: 10,
   10348            1 :                 split_threshold: 64,
   10349            1 :                 max_split_shards: 16,
   10350            1 :                 initial_split_threshold: 8,
   10351            1 :                 initial_split_shards: 4,
   10352            1 :             }),
   10353              :             Some(ShardCount(4)),
   10354              :         );
   10355            1 :         assert_eq!(
   10356            1 :             Service::compute_split_shards(ShardSplitInputs {
   10357            1 :                 shard_count: ShardCount(1),
   10358            1 :                 max_logical_size: 500,
   10359            1 :                 split_threshold: 64,
   10360            1 :                 max_split_shards: 16,
   10361            1 :                 initial_split_threshold: 8,
   10362            1 :                 initial_split_shards: 4,
   10363            1 :             }),
   10364              :             Some(ShardCount(8)),
   10365              :         );
   10366              : 
   10367              :         // Initial+size split: sharded tenant is only eligible for size-based split.
   10368            1 :         assert_eq!(
   10369            1 :             Service::compute_split_shards(ShardSplitInputs {
   10370            1 :                 shard_count: ShardCount(2),
   10371            1 :                 max_logical_size: 200,
   10372            1 :                 split_threshold: 64,
   10373            1 :                 max_split_shards: 16,
   10374            1 :                 initial_split_threshold: 8,
   10375            1 :                 initial_split_shards: 8,
   10376            1 :             }),
   10377              :             Some(ShardCount(4)),
   10378              :         );
   10379              : 
   10380              :         // Initial+size split: uses the larger shard count even with initial_split_threshold above
   10381              :         // split_threshold.
   10382            1 :         assert_eq!(
   10383            1 :             Service::compute_split_shards(ShardSplitInputs {
   10384            1 :                 shard_count: ShardCount(1),
   10385            1 :                 max_logical_size: 10,
   10386            1 :                 split_threshold: 4,
   10387            1 :                 max_split_shards: 16,
   10388            1 :                 initial_split_threshold: 8,
   10389            1 :                 initial_split_shards: 8,
   10390            1 :             }),
   10391              :             Some(ShardCount(8)),
   10392              :         );
   10393              : 
   10394              :         // Test backwards compatibility with production settings when initial/size-based splits were
   10395              :         // rolled out: a single split into 8 shards at 64 GB. Any already sharded tenants with <8
   10396              :         // shards will split according to split_threshold.
   10397            1 :         assert_eq!(
   10398            1 :             Service::compute_split_shards(ShardSplitInputs {
   10399            1 :                 shard_count: ShardCount(1),
   10400            1 :                 max_logical_size: 65,
   10401            1 :                 split_threshold: 64,
   10402            1 :                 max_split_shards: 8,
   10403            1 :                 initial_split_threshold: 64,
   10404            1 :                 initial_split_shards: 8,
   10405            1 :             }),
   10406              :             Some(ShardCount(8)),
   10407              :         );
   10408              : 
   10409            1 :         assert_eq!(
   10410            1 :             Service::compute_split_shards(ShardSplitInputs {
   10411            1 :                 shard_count: ShardCount(1),
   10412            1 :                 max_logical_size: 64,
   10413            1 :                 split_threshold: 64,
   10414            1 :                 max_split_shards: 8,
   10415            1 :                 initial_split_threshold: 64,
   10416            1 :                 initial_split_shards: 8,
   10417            1 :             }),
   10418              :             None,
   10419              :         );
   10420              : 
   10421            1 :         assert_eq!(
   10422            1 :             Service::compute_split_shards(ShardSplitInputs {
   10423            1 :                 shard_count: ShardCount(2),
   10424            1 :                 max_logical_size: 129,
   10425            1 :                 split_threshold: 64,
   10426            1 :                 max_split_shards: 8,
   10427            1 :                 initial_split_threshold: 64,
   10428            1 :                 initial_split_shards: 8,
   10429            1 :             }),
   10430              :             Some(ShardCount(4)),
   10431              :         );
   10432            1 :     }
   10433              : }
        

Generated by: LCOV version 2.1-beta