LCOV - code coverage report
Current view: top level - storage_controller/src - service.rs (source / functions) Coverage Total Hit
Test: 42f947419473a288706e86ecdf7c2863d760d5d7.info Lines: 0.0 % 3757 0
Test Date: 2024-08-02 21:34:27 Functions: 0.0 % 311 0

            Line data    Source code
       1              : use std::{
       2              :     borrow::Cow,
       3              :     cmp::Ordering,
       4              :     collections::{BTreeMap, HashMap, HashSet},
       5              :     ops::Deref,
       6              :     path::PathBuf,
       7              :     str::FromStr,
       8              :     sync::Arc,
       9              :     time::{Duration, Instant},
      10              : };
      11              : 
      12              : use crate::{
      13              :     background_node_operations::{
      14              :         Drain, Fill, Operation, OperationError, OperationHandler, MAX_RECONCILES_PER_OPERATION,
      15              :     },
      16              :     compute_hook::NotifyError,
      17              :     id_lock_map::{trace_exclusive_lock, trace_shared_lock, IdLockMap, TracingExclusiveGuard},
      18              :     metrics::LeadershipStatusGroup,
      19              :     persistence::{AbortShardSplitStatus, MetadataHealthPersistence, TenantFilter},
      20              :     reconciler::{ReconcileError, ReconcileUnits},
      21              :     scheduler::{MaySchedule, ScheduleContext, ScheduleMode},
      22              :     tenant_shard::{
      23              :         MigrateAttachment, ReconcileNeeded, ReconcilerStatus, ScheduleOptimization,
      24              :         ScheduleOptimizationAction,
      25              :     },
      26              : };
      27              : use anyhow::Context;
      28              : use control_plane::storage_controller::{
      29              :     AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
      30              : };
      31              : use diesel::result::DatabaseErrorKind;
      32              : use futures::{stream::FuturesUnordered, StreamExt};
      33              : use itertools::Itertools;
      34              : use pageserver_api::{
      35              :     controller_api::{
      36              :         MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability, NodeRegisterRequest,
      37              :         NodeSchedulingPolicy, PlacementPolicy, ShardSchedulingPolicy, TenantCreateRequest,
      38              :         TenantCreateResponse, TenantCreateResponseShard, TenantDescribeResponse,
      39              :         TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
      40              :         TenantShardMigrateRequest, TenantShardMigrateResponse, UtilizationScore,
      41              :     },
      42              :     models::{SecondaryProgress, TenantConfigRequest, TopTenantShardsRequest},
      43              : };
      44              : use reqwest::StatusCode;
      45              : use tracing::{instrument, Instrument};
      46              : 
      47              : use crate::pageserver_client::PageserverClient;
      48              : use pageserver_api::{
      49              :     models::{
      50              :         self, LocationConfig, LocationConfigListResponse, LocationConfigMode,
      51              :         PageserverUtilization, ShardParameters, TenantConfig, TenantLocationConfigRequest,
      52              :         TenantLocationConfigResponse, TenantShardLocation, TenantShardSplitRequest,
      53              :         TenantShardSplitResponse, TenantTimeTravelRequest, TimelineCreateRequest, TimelineInfo,
      54              :     },
      55              :     shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId},
      56              :     upcall_api::{
      57              :         ReAttachRequest, ReAttachResponse, ReAttachResponseTenant, ValidateRequest,
      58              :         ValidateResponse, ValidateResponseTenant,
      59              :     },
      60              : };
      61              : use pageserver_client::mgmt_api;
      62              : use tokio::sync::mpsc::error::TrySendError;
      63              : use tokio_util::sync::CancellationToken;
      64              : use utils::{
      65              :     completion::Barrier,
      66              :     failpoint_support,
      67              :     generation::Generation,
      68              :     http::error::ApiError,
      69              :     id::{NodeId, TenantId, TimelineId},
      70              :     sync::gate::Gate,
      71              : };
      72              : 
      73              : use crate::{
      74              :     compute_hook::ComputeHook,
      75              :     heartbeater::{Heartbeater, PageserverState},
      76              :     node::{AvailabilityTransition, Node},
      77              :     persistence::{split_state::SplitState, DatabaseError, Persistence, TenantShardPersistence},
      78              :     reconciler::attached_location_conf,
      79              :     scheduler::Scheduler,
      80              :     tenant_shard::{
      81              :         IntentState, ObservedState, ObservedStateLocation, ReconcileResult, ReconcileWaitError,
      82              :         ReconcilerWaiter, TenantShard,
      83              :     },
      84              : };
      85              : use serde::{Deserialize, Serialize};
      86              : 
      87              : pub mod chaos_injector;
      88              : 
      89              : // For operations that should be quick, like attaching a new tenant
      90              : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
      91              : 
      92              : // For operations that might be slow, like migrating a tenant with
      93              : // some data in it.
      94              : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
      95              : 
      96              : // If we receive a call using Secondary mode initially, it will omit generation.  We will initialize
      97              : // tenant shards into this generation, and as long as it remains in this generation, we will accept
      98              : // input generation from future requests as authoritative.
      99              : const INITIAL_GENERATION: Generation = Generation::new(0);
     100              : 
     101              : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
     102              : /// up on unresponsive pageservers and proceed.
     103              : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
     104              : 
     105              : /// How long a node may be unresponsive to heartbeats before we declare it offline.
     106              : /// This must be long enough to cover node restarts as well as normal operations: in future
     107              : pub const MAX_OFFLINE_INTERVAL_DEFAULT: Duration = Duration::from_secs(30);
     108              : 
     109              : /// How long a node may be unresponsive to heartbeats during start up before we declare it
     110              : /// offline. This is much more lenient than [`MAX_OFFLINE_INTERVAL_DEFAULT`] since the pageserver's
     111              : /// handling of the re-attach response may take a long time and blocks heartbeats from
     112              : /// being handled on the pageserver side.
     113              : pub const MAX_WARMING_UP_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
     114              : 
     115            0 : #[derive(Clone, strum_macros::Display)]
     116              : enum TenantOperations {
     117              :     Create,
     118              :     LocationConfig,
     119              :     ConfigSet,
     120              :     TimeTravelRemoteStorage,
     121              :     Delete,
     122              :     UpdatePolicy,
     123              :     ShardSplit,
     124              :     SecondaryDownload,
     125              :     TimelineCreate,
     126              :     TimelineDelete,
     127              :     AttachHook,
     128              :     TimelineDetachAncestor,
     129              : }
     130              : 
     131            0 : #[derive(Clone, strum_macros::Display)]
     132              : enum NodeOperations {
     133              :     Register,
     134              :     Configure,
     135              :     Delete,
     136              : }
     137              : 
     138              : /// The leadership status for the storage controller process.
     139              : /// Allowed transitions are:
     140              : /// 1. Leader -> SteppedDown
     141              : /// 2. Candidate -> Leader
     142            0 : #[derive(Copy, Clone, strum_macros::Display, measured::FixedCardinalityLabel)]
     143              : #[strum(serialize_all = "snake_case")]
     144              : pub(crate) enum LeadershipStatus {
     145              :     /// This is the steady state where the storage controller can produce
     146              :     /// side effects in the cluster.
     147              :     Leader,
     148              :     /// We've been notified to step down by another candidate. No reconciliations
     149              :     /// take place in this state.
     150              :     SteppedDown,
     151              :     /// Initial state for a new storage controller instance. Will attempt to assume leadership.
     152              :     #[allow(unused)]
     153              :     Candidate,
     154              : }
     155              : 
     156              : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
     157              : 
     158              : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
     159              : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
     160              : // than they're being pushed onto the queue.
     161              : const MAX_DELAYED_RECONCILES: usize = 10000;
     162              : 
     163              : // Top level state available to all HTTP handlers
     164              : struct ServiceState {
     165              :     leadership_status: LeadershipStatus,
     166              : 
     167              :     tenants: BTreeMap<TenantShardId, TenantShard>,
     168              : 
     169              :     nodes: Arc<HashMap<NodeId, Node>>,
     170              : 
     171              :     scheduler: Scheduler,
     172              : 
     173              :     /// Ongoing background operation on the cluster if any is running.
     174              :     /// Note that only one such operation may run at any given time,
     175              :     /// hence the type choice.
     176              :     ongoing_operation: Option<OperationHandler>,
     177              : 
     178              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     179              :     delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     180              : }
     181              : 
     182              : /// Transform an error from a pageserver into an error to return to callers of a storage
     183              : /// controller API.
     184            0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
     185            0 :     match e {
     186            0 :         mgmt_api::Error::SendRequest(e) => {
     187            0 :             // Presume errors sending requests are connectivity/availability issues
     188            0 :             ApiError::ResourceUnavailable(format!("{node} error sending request: {e}").into())
     189              :         }
     190            0 :         mgmt_api::Error::ReceiveErrorBody(str) => {
     191            0 :             // Presume errors receiving body are connectivity/availability issues
     192            0 :             ApiError::ResourceUnavailable(
     193            0 :                 format!("{node} error receiving error body: {str}").into(),
     194            0 :             )
     195              :         }
     196            0 :         mgmt_api::Error::ReceiveBody(str) => {
     197            0 :             // Presume errors receiving body are connectivity/availability issues
     198            0 :             ApiError::ResourceUnavailable(format!("{node} error receiving body: {str}").into())
     199              :         }
     200            0 :         mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
     201            0 :             ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
     202              :         }
     203            0 :         mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
     204            0 :             ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
     205              :         }
     206            0 :         mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
     207            0 :         | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
     208              :             // Auth errors talking to a pageserver are not auth errors for the caller: they are
     209              :             // internal server errors, showing that something is wrong with the pageserver or
     210              :             // storage controller's auth configuration.
     211            0 :             ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
     212              :         }
     213            0 :         mgmt_api::Error::ApiError(status, msg) => {
     214            0 :             // Presume general case of pageserver API errors is that we tried to do something
     215            0 :             // that can't be done right now.
     216            0 :             ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
     217              :         }
     218            0 :         mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
     219              :     }
     220            0 : }
     221              : 
     222              : impl ServiceState {
     223            0 :     fn new(
     224            0 :         nodes: HashMap<NodeId, Node>,
     225            0 :         tenants: BTreeMap<TenantShardId, TenantShard>,
     226            0 :         scheduler: Scheduler,
     227            0 :         delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
     228            0 :     ) -> Self {
     229            0 :         let status = &crate::metrics::METRICS_REGISTRY
     230            0 :             .metrics_group
     231            0 :             .storage_controller_leadership_status;
     232            0 : 
     233            0 :         status.set(
     234            0 :             LeadershipStatusGroup {
     235            0 :                 status: LeadershipStatus::Leader,
     236            0 :             },
     237            0 :             1,
     238            0 :         );
     239            0 : 
     240            0 :         Self {
     241            0 :             // TODO: Starting up as Leader is a transient state. Once we enable rolling
     242            0 :             // upgrades on the k8s side, we should start up as Candidate.
     243            0 :             leadership_status: LeadershipStatus::Leader,
     244            0 :             tenants,
     245            0 :             nodes: Arc::new(nodes),
     246            0 :             scheduler,
     247            0 :             ongoing_operation: None,
     248            0 :             delayed_reconcile_rx,
     249            0 :         }
     250            0 :     }
     251              : 
     252            0 :     fn parts_mut(
     253            0 :         &mut self,
     254            0 :     ) -> (
     255            0 :         &mut Arc<HashMap<NodeId, Node>>,
     256            0 :         &mut BTreeMap<TenantShardId, TenantShard>,
     257            0 :         &mut Scheduler,
     258            0 :     ) {
     259            0 :         (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
     260            0 :     }
     261              : 
     262            0 :     fn get_leadership_status(&self) -> LeadershipStatus {
     263            0 :         self.leadership_status
     264            0 :     }
     265              : 
     266            0 :     fn step_down(&mut self) {
     267            0 :         self.leadership_status = LeadershipStatus::SteppedDown;
     268            0 : 
     269            0 :         let status = &crate::metrics::METRICS_REGISTRY
     270            0 :             .metrics_group
     271            0 :             .storage_controller_leadership_status;
     272            0 : 
     273            0 :         status.set(
     274            0 :             LeadershipStatusGroup {
     275            0 :                 status: LeadershipStatus::SteppedDown,
     276            0 :             },
     277            0 :             1,
     278            0 :         );
     279            0 :         status.set(
     280            0 :             LeadershipStatusGroup {
     281            0 :                 status: LeadershipStatus::Leader,
     282            0 :             },
     283            0 :             0,
     284            0 :         );
     285            0 :         status.set(
     286            0 :             LeadershipStatusGroup {
     287            0 :                 status: LeadershipStatus::Candidate,
     288            0 :             },
     289            0 :             0,
     290            0 :         );
     291            0 :     }
     292              : }
     293              : 
     294              : #[derive(Clone)]
     295              : pub struct Config {
     296              :     // All pageservers managed by one instance of this service must have
     297              :     // the same public key.  This JWT token will be used to authenticate
     298              :     // this service to the pageservers it manages.
     299              :     pub jwt_token: Option<String>,
     300              : 
     301              :     // This JWT token will be used to authenticate this service to the control plane.
     302              :     pub control_plane_jwt_token: Option<String>,
     303              : 
     304              :     /// Where the compute hook should send notifications of pageserver attachment locations
     305              :     /// (this URL points to the control plane in prod). If this is None, the compute hook will
     306              :     /// assume it is running in a test environment and try to update neon_local.
     307              :     pub compute_hook_url: Option<String>,
     308              : 
     309              :     /// Grace period within which a pageserver does not respond to heartbeats, but is still
     310              :     /// considered active. Once the grace period elapses, the next heartbeat failure will
     311              :     /// mark the pagseserver offline.
     312              :     pub max_offline_interval: Duration,
     313              : 
     314              :     /// Extended grace period within which pageserver may not respond to heartbeats.
     315              :     /// This extended grace period kicks in after the node has been drained for restart
     316              :     /// and/or upon handling the re-attach request from a node.
     317              :     pub max_warming_up_interval: Duration,
     318              : 
     319              :     /// How many Reconcilers may be spawned concurrently
     320              :     pub reconciler_concurrency: usize,
     321              : 
     322              :     /// How large must a shard grow in bytes before we split it?
     323              :     /// None disables auto-splitting.
     324              :     pub split_threshold: Option<u64>,
     325              : 
     326              :     // TODO: make this cfg(feature  = "testing")
     327              :     pub neon_local_repo_dir: Option<PathBuf>,
     328              : }
     329              : 
     330              : impl From<DatabaseError> for ApiError {
     331            0 :     fn from(err: DatabaseError) -> ApiError {
     332            0 :         match err {
     333            0 :             DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
     334              :             // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
     335              :             DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
     336            0 :                 ApiError::ShuttingDown
     337              :             }
     338            0 :             DatabaseError::Logical(reason) => {
     339            0 :                 ApiError::InternalServerError(anyhow::anyhow!(reason))
     340              :             }
     341              :         }
     342            0 :     }
     343              : }
     344              : 
     345              : pub struct Service {
     346              :     inner: Arc<std::sync::RwLock<ServiceState>>,
     347              :     config: Config,
     348              :     persistence: Arc<Persistence>,
     349              :     compute_hook: Arc<ComputeHook>,
     350              :     result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResultRequest>,
     351              : 
     352              :     heartbeater: Heartbeater,
     353              : 
     354              :     // Channel for background cleanup from failed operations that require cleanup, such as shard split
     355              :     abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
     356              : 
     357              :     // Locking on a tenant granularity (covers all shards in the tenant):
     358              :     // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
     359              :     // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
     360              :     tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
     361              : 
     362              :     // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
     363              :     // that transition it to/from Active.
     364              :     node_op_locks: IdLockMap<NodeId, NodeOperations>,
     365              : 
     366              :     // Limit how many Reconcilers we will spawn concurrently
     367              :     reconciler_concurrency: Arc<tokio::sync::Semaphore>,
     368              : 
     369              :     /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
     370              :     /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
     371              :     ///
     372              :     /// Note that this state logically lives inside ServiceInner, but carrying Sender here makes the code simpler
     373              :     /// by avoiding needing a &mut ref to something inside the ServiceInner.  This could be optimized to
     374              :     /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
     375              :     delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
     376              : 
     377              :     // Process shutdown will fire this token
     378              :     cancel: CancellationToken,
     379              : 
     380              :     // Child token of [`Service::cancel`] used by reconcilers
     381              :     reconcilers_cancel: CancellationToken,
     382              : 
     383              :     // Background tasks will hold this gate
     384              :     gate: Gate,
     385              : 
     386              :     // Reconcilers background tasks will hold this gate
     387              :     reconcilers_gate: Gate,
     388              : 
     389              :     /// This waits for initial reconciliation with pageservers to complete.  Until this barrier
     390              :     /// passes, it isn't safe to do any actions that mutate tenants.
     391              :     pub(crate) startup_complete: Barrier,
     392              : }
     393              : 
     394              : impl From<ReconcileWaitError> for ApiError {
     395            0 :     fn from(value: ReconcileWaitError) -> Self {
     396            0 :         match value {
     397            0 :             ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
     398            0 :             e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
     399            0 :             e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
     400              :         }
     401            0 :     }
     402              : }
     403              : 
     404              : impl From<OperationError> for ApiError {
     405            0 :     fn from(value: OperationError) -> Self {
     406            0 :         match value {
     407            0 :             OperationError::NodeStateChanged(err) | OperationError::FinalizeError(err) => {
     408            0 :                 ApiError::InternalServerError(anyhow::anyhow!(err))
     409              :             }
     410            0 :             OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()),
     411              :         }
     412            0 :     }
     413              : }
     414              : 
     415              : #[allow(clippy::large_enum_variant)]
     416              : enum TenantCreateOrUpdate {
     417              :     Create(TenantCreateRequest),
     418              :     Update(Vec<ShardUpdate>),
     419              : }
     420              : 
     421              : struct ShardSplitParams {
     422              :     old_shard_count: ShardCount,
     423              :     new_shard_count: ShardCount,
     424              :     new_stripe_size: Option<ShardStripeSize>,
     425              :     targets: Vec<ShardSplitTarget>,
     426              :     policy: PlacementPolicy,
     427              :     config: TenantConfig,
     428              :     shard_ident: ShardIdentity,
     429              : }
     430              : 
     431              : // When preparing for a shard split, we may either choose to proceed with the split,
     432              : // or find that the work is already done and return NoOp.
     433              : enum ShardSplitAction {
     434              :     Split(ShardSplitParams),
     435              :     NoOp(TenantShardSplitResponse),
     436              : }
     437              : 
     438              : // A parent shard which will be split
     439              : struct ShardSplitTarget {
     440              :     parent_id: TenantShardId,
     441              :     node: Node,
     442              :     child_ids: Vec<TenantShardId>,
     443              : }
     444              : 
     445              : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
     446              : /// might not be available.  We therefore use a queue of abort operations processed in the background.
     447              : struct TenantShardSplitAbort {
     448              :     tenant_id: TenantId,
     449              :     /// The target values from the request that failed
     450              :     new_shard_count: ShardCount,
     451              :     new_stripe_size: Option<ShardStripeSize>,
     452              :     /// Until this abort op is complete, no other operations may be done on the tenant
     453              :     _tenant_lock: TracingExclusiveGuard<TenantOperations>,
     454              : }
     455              : 
     456            0 : #[derive(thiserror::Error, Debug)]
     457              : enum TenantShardSplitAbortError {
     458              :     #[error(transparent)]
     459              :     Database(#[from] DatabaseError),
     460              :     #[error(transparent)]
     461              :     Remote(#[from] mgmt_api::Error),
     462              :     #[error("Unavailable")]
     463              :     Unavailable,
     464              : }
     465              : 
     466              : struct ShardUpdate {
     467              :     tenant_shard_id: TenantShardId,
     468              :     placement_policy: PlacementPolicy,
     469              :     tenant_config: TenantConfig,
     470              : 
     471              :     /// If this is None, generation is not updated.
     472              :     generation: Option<Generation>,
     473              : }
     474              : 
     475              : enum StopReconciliationsReason {
     476              :     ShuttingDown,
     477              :     SteppingDown,
     478              : }
     479              : 
     480              : impl std::fmt::Display for StopReconciliationsReason {
     481            0 :     fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result {
     482            0 :         let s = match self {
     483            0 :             Self::ShuttingDown => "Shutting down",
     484            0 :             Self::SteppingDown => "Stepping down",
     485              :         };
     486            0 :         write!(writer, "{}", s)
     487            0 :     }
     488              : }
     489              : 
     490              : pub(crate) enum ReconcileResultRequest {
     491              :     ReconcileResult(ReconcileResult),
     492              :     Stop,
     493              : }
     494              : 
     495              : // TODO: move this into the storcon peer client when that gets added
     496            0 : #[derive(Serialize, Deserialize, Debug, Default)]
     497              : pub(crate) struct GlobalObservedState(HashMap<TenantShardId, ObservedState>);
     498              : 
     499              : impl Service {
     500            0 :     pub fn get_config(&self) -> &Config {
     501            0 :         &self.config
     502            0 :     }
     503              : 
     504              :     /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
     505              :     /// view of the world, and determine which pageservers are responsive.
     506            0 :     #[instrument(skip_all)]
     507              :     async fn startup_reconcile(
     508              :         self: &Arc<Service>,
     509              :         bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
     510              :             Result<(), (TenantShardId, NotifyError)>,
     511              :         >,
     512              :     ) {
     513              :         // For all tenant shards, a vector of observed states on nodes (where None means
     514              :         // indeterminate, same as in [`ObservedStateLocation`])
     515              :         let mut observed: HashMap<TenantShardId, Vec<(NodeId, Option<LocationConfig>)>> =
     516              :             HashMap::new();
     517              : 
     518              :         // Startup reconciliation does I/O to other services: whether they
     519              :         // are responsive or not, we should aim to finish within our deadline, because:
     520              :         // - If we don't, a k8s readiness hook watching /ready will kill us.
     521              :         // - While we're waiting for startup reconciliation, we are not fully
     522              :         //   available for end user operations like creating/deleting tenants and timelines.
     523              :         //
     524              :         // We set multiple deadlines to break up the time available between the phases of work: this is
     525              :         // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
     526              :         let start_at = Instant::now();
     527              :         let node_scan_deadline = start_at
     528              :             .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
     529              :             .expect("Reconcile timeout is a modest constant");
     530              : 
     531              :         // Accumulate a list of any tenant locations that ought to be detached
     532              :         let mut cleanup = Vec::new();
     533              : 
     534              :         let node_listings = self.scan_node_locations(node_scan_deadline).await;
     535              :         // Send initial heartbeat requests to nodes that replied to the location listing above.
     536              :         let nodes_online = self.initial_heartbeat_round(node_listings.keys()).await;
     537              : 
     538              :         for (node_id, list_response) in node_listings {
     539              :             let tenant_shards = list_response.tenant_shards;
     540              :             tracing::info!(
     541              :                 "Received {} shard statuses from pageserver {}, setting it to Active",
     542              :                 tenant_shards.len(),
     543              :                 node_id
     544              :             );
     545              : 
     546              :             for (tenant_shard_id, conf_opt) in tenant_shards {
     547              :                 let shard_observations = observed.entry(tenant_shard_id).or_default();
     548              :                 shard_observations.push((node_id, conf_opt));
     549              :             }
     550              :         }
     551              : 
     552              :         // List of tenants for which we will attempt to notify compute of their location at startup
     553              :         let mut compute_notifications = Vec::new();
     554              : 
     555              :         // Populate intent and observed states for all tenants, based on reported state on pageservers
     556              :         tracing::info!("Populating tenant shards' states from initial pageserver scan...");
     557              :         let shard_count = {
     558              :             let mut locked = self.inner.write().unwrap();
     559              :             let (nodes, tenants, scheduler) = locked.parts_mut();
     560              : 
     561              :             // Mark nodes online if they responded to us: nodes are offline by default after a restart.
     562              :             let mut new_nodes = (**nodes).clone();
     563              :             for (node_id, node) in new_nodes.iter_mut() {
     564              :                 if let Some(utilization) = nodes_online.get(node_id) {
     565              :                     node.set_availability(NodeAvailability::Active(UtilizationScore(
     566              :                         utilization.utilization_score,
     567              :                     )));
     568              :                     scheduler.node_upsert(node);
     569              :                 }
     570              :             }
     571              :             *nodes = Arc::new(new_nodes);
     572              : 
     573              :             for (tenant_shard_id, shard_observations) in observed {
     574              :                 for (node_id, observed_loc) in shard_observations {
     575              :                     let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
     576              :                         cleanup.push((tenant_shard_id, node_id));
     577              :                         continue;
     578              :                     };
     579              :                     tenant_shard
     580              :                         .observed
     581              :                         .locations
     582              :                         .insert(node_id, ObservedStateLocation { conf: observed_loc });
     583              :                 }
     584              :             }
     585              : 
     586              :             // Populate each tenant's intent state
     587              :             let mut schedule_context = ScheduleContext::default();
     588              :             for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
     589              :                 if tenant_shard_id.shard_number == ShardNumber(0) {
     590              :                     // Reset scheduling context each time we advance to the next Tenant
     591              :                     schedule_context = ScheduleContext::default();
     592              :                 }
     593              : 
     594              :                 tenant_shard.intent_from_observed(scheduler);
     595              :                 if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
     596              :                     // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
     597              :                     // not enough pageservers are available.  The tenant may well still be available
     598              :                     // to clients.
     599              :                     tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
     600              :                 } else {
     601              :                     // If we're both intending and observed to be attached at a particular node, we will
     602              :                     // emit a compute notification for this. In the case where our observed state does not
     603              :                     // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
     604              :                     if let Some(attached_at) = tenant_shard.stably_attached() {
     605              :                         compute_notifications.push((
     606              :                             *tenant_shard_id,
     607              :                             attached_at,
     608              :                             tenant_shard.shard.stripe_size,
     609              :                         ));
     610              :                     }
     611              :                 }
     612              :             }
     613              : 
     614              :             tenants.len()
     615              :         };
     616              : 
     617              :         // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
     618              :         // generation_pageserver in the database.
     619              : 
     620              :         // Emit compute hook notifications for all tenants which are already stably attached.  Other tenants
     621              :         // will emit compute hook notifications when they reconcile.
     622              :         //
     623              :         // Ordering: our calls to notify_background synchronously establish a relative order for these notifications vs. any later
     624              :         // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
     625              :         // calls will be correctly ordered wrt these.
     626              :         //
     627              :         // Concurrency: we call notify_background for all tenants, which will create O(N) tokio tasks, but almost all of them
     628              :         // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
     629              :         // unit and start doing I/O.
     630              :         tracing::info!(
     631              :             "Sending {} compute notifications",
     632              :             compute_notifications.len()
     633              :         );
     634              :         self.compute_hook.notify_background(
     635              :             compute_notifications,
     636              :             bg_compute_notify_result_tx.clone(),
     637              :             &self.cancel,
     638              :         );
     639              : 
     640              :         // Finally, now that the service is up and running, launch reconcile operations for any tenants
     641              :         // which require it: under normal circumstances this should only include tenants that were in some
     642              :         // transient state before we restarted, or any tenants whose compute hooks failed above.
     643              :         tracing::info!("Checking for shards in need of reconciliation...");
     644              :         let reconcile_tasks = self.reconcile_all();
     645              :         // We will not wait for these reconciliation tasks to run here: we're now done with startup and
     646              :         // normal operations may proceed.
     647              : 
     648              :         // Clean up any tenants that were found on pageservers but are not known to us.  Do this in the
     649              :         // background because it does not need to complete in order to proceed with other work.
     650              :         if !cleanup.is_empty() {
     651              :             tracing::info!("Cleaning up {} locations in the background", cleanup.len());
     652              :             tokio::task::spawn({
     653              :                 let cleanup_self = self.clone();
     654            0 :                 async move { cleanup_self.cleanup_locations(cleanup).await }
     655              :             });
     656              :         }
     657              : 
     658              :         tracing::info!("Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)");
     659              :     }
     660              : 
     661            0 :     async fn initial_heartbeat_round<'a>(
     662            0 :         &self,
     663            0 :         node_ids: impl Iterator<Item = &'a NodeId>,
     664            0 :     ) -> HashMap<NodeId, PageserverUtilization> {
     665            0 :         assert!(!self.startup_complete.is_ready());
     666              : 
     667            0 :         let all_nodes = {
     668            0 :             let locked = self.inner.read().unwrap();
     669            0 :             locked.nodes.clone()
     670            0 :         };
     671            0 : 
     672            0 :         let mut nodes_to_heartbeat = HashMap::new();
     673            0 :         for node_id in node_ids {
     674            0 :             match all_nodes.get(node_id) {
     675            0 :                 Some(node) => {
     676            0 :                     nodes_to_heartbeat.insert(*node_id, node.clone());
     677            0 :                 }
     678              :                 None => {
     679            0 :                     tracing::warn!("Node {node_id} was removed during start-up");
     680              :                 }
     681              :             }
     682              :         }
     683              : 
     684            0 :         tracing::info!("Sending initial heartbeats...");
     685            0 :         let res = self
     686            0 :             .heartbeater
     687            0 :             .heartbeat(Arc::new(nodes_to_heartbeat))
     688            0 :             .await;
     689              : 
     690            0 :         let mut online_nodes = HashMap::new();
     691            0 :         if let Ok(deltas) = res {
     692            0 :             for (node_id, status) in deltas.0 {
     693            0 :                 match status {
     694            0 :                     PageserverState::Available { utilization, .. } => {
     695            0 :                         online_nodes.insert(node_id, utilization);
     696            0 :                     }
     697            0 :                     PageserverState::Offline => {}
     698              :                     PageserverState::WarmingUp { .. } => {
     699            0 :                         unreachable!("Nodes are never marked warming-up during startup reconcile")
     700              :                     }
     701              :                 }
     702              :             }
     703            0 :         }
     704              : 
     705            0 :         online_nodes
     706            0 :     }
     707              : 
     708              :     /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
     709              :     ///
     710              :     /// The result includes only nodes which responded within the deadline
     711            0 :     async fn scan_node_locations(
     712            0 :         &self,
     713            0 :         deadline: Instant,
     714            0 :     ) -> HashMap<NodeId, LocationConfigListResponse> {
     715            0 :         let nodes = {
     716            0 :             let locked = self.inner.read().unwrap();
     717            0 :             locked.nodes.clone()
     718            0 :         };
     719            0 : 
     720            0 :         let mut node_results = HashMap::new();
     721            0 : 
     722            0 :         let mut node_list_futs = FuturesUnordered::new();
     723            0 : 
     724            0 :         tracing::info!("Scanning shards on {} nodes...", nodes.len());
     725            0 :         for node in nodes.values() {
     726            0 :             node_list_futs.push({
     727            0 :                 async move {
     728            0 :                     tracing::info!("Scanning shards on node {node}...");
     729            0 :                     let timeout = Duration::from_secs(1);
     730            0 :                     let response = node
     731            0 :                         .with_client_retries(
     732            0 :                             |client| async move { client.list_location_config().await },
     733            0 :                             &self.config.jwt_token,
     734            0 :                             1,
     735            0 :                             5,
     736            0 :                             timeout,
     737            0 :                             &self.cancel,
     738            0 :                         )
     739            0 :                         .await;
     740            0 :                     (node.get_id(), response)
     741            0 :                 }
     742            0 :             });
     743            0 :         }
     744              : 
     745              :         loop {
     746            0 :             let (node_id, result) = tokio::select! {
     747              :                 next = node_list_futs.next() => {
     748              :                     match next {
     749              :                         Some(result) => result,
     750              :                         None =>{
     751              :                             // We got results for all our nodes
     752              :                             break;
     753              :                         }
     754              : 
     755              :                     }
     756              :                 },
     757              :                 _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
     758              :                     // Give up waiting for anyone who hasn't responded: we will yield the results that we have
     759              :                     tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
     760              :                     break;
     761              :                 }
     762              :             };
     763              : 
     764            0 :             let Some(list_response) = result else {
     765            0 :                 tracing::info!("Shutdown during startup_reconcile");
     766            0 :                 break;
     767              :             };
     768              : 
     769            0 :             match list_response {
     770            0 :                 Err(e) => {
     771            0 :                     tracing::warn!("Could not scan node {} ({e})", node_id);
     772              :                 }
     773            0 :                 Ok(listing) => {
     774            0 :                     node_results.insert(node_id, listing);
     775            0 :                 }
     776              :             }
     777              :         }
     778              : 
     779            0 :         node_results
     780            0 :     }
     781              : 
     782              :     /// Used during [`Self::startup_reconcile`]: detach a list of unknown-to-us tenants from pageservers.
     783              :     ///
     784              :     /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
     785              :     /// tenants, then it is probably something incompletely deleted before: we will not fight with any
     786              :     /// other task trying to attach it.
     787            0 :     #[instrument(skip_all)]
     788              :     async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
     789              :         let nodes = self.inner.read().unwrap().nodes.clone();
     790              : 
     791              :         for (tenant_shard_id, node_id) in cleanup {
     792              :             // A node reported a tenant_shard_id which is unknown to us: detach it.
     793              :             let Some(node) = nodes.get(&node_id) else {
     794              :                 // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
     795              :                 // a location to clean up on a node that has since been removed.
     796              :                 tracing::info!(
     797              :                     "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
     798              :                 );
     799              :                 continue;
     800              :             };
     801              : 
     802              :             if self.cancel.is_cancelled() {
     803              :                 break;
     804              :             }
     805              : 
     806              :             let client = PageserverClient::new(
     807              :                 node.get_id(),
     808              :                 node.base_url(),
     809              :                 self.config.jwt_token.as_deref(),
     810              :             );
     811              :             match client
     812              :                 .location_config(
     813              :                     tenant_shard_id,
     814              :                     LocationConfig {
     815              :                         mode: LocationConfigMode::Detached,
     816              :                         generation: None,
     817              :                         secondary_conf: None,
     818              :                         shard_number: tenant_shard_id.shard_number.0,
     819              :                         shard_count: tenant_shard_id.shard_count.literal(),
     820              :                         shard_stripe_size: 0,
     821              :                         tenant_conf: models::TenantConfig::default(),
     822              :                     },
     823              :                     None,
     824              :                     false,
     825              :                 )
     826              :                 .await
     827              :             {
     828              :                 Ok(()) => {
     829              :                     tracing::info!(
     830              :                         "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
     831              :                     );
     832              :                 }
     833              :                 Err(e) => {
     834              :                     // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
     835              :                     // break anything.
     836              :                     tracing::error!(
     837              :                         "Failed to detach unknkown shard {tenant_shard_id} on pageserver {node_id}: {e}"
     838              :                     );
     839              :                 }
     840              :             }
     841              :         }
     842              :     }
     843              : 
     844              :     /// Long running background task that periodically wakes up and looks for shards that need
     845              :     /// reconciliation.  Reconciliation is fallible, so any reconciliation tasks that fail during
     846              :     /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
     847              :     /// for those retries.
     848            0 :     #[instrument(skip_all)]
     849              :     async fn background_reconcile(self: &Arc<Self>) {
     850              :         self.startup_complete.clone().wait().await;
     851              : 
     852              :         const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
     853              : 
     854              :         let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
     855              :         while !self.reconcilers_cancel.is_cancelled() {
     856              :             tokio::select! {
     857              :               _ = interval.tick() => {
     858              :                 let reconciles_spawned = self.reconcile_all();
     859              :                 if reconciles_spawned == 0 {
     860              :                     // Run optimizer only when we didn't find any other work to do
     861              :                     let optimizations = self.optimize_all().await;
     862              :                     if optimizations == 0 {
     863              :                         // Run new splits only when no optimizations are pending
     864              :                         self.autosplit_tenants().await;
     865              :                     }
     866              :                 }
     867              :             }
     868              :               _ = self.reconcilers_cancel.cancelled() => return
     869              :             }
     870              :         }
     871              :     }
     872            0 :     #[instrument(skip_all)]
     873              :     async fn spawn_heartbeat_driver(&self) {
     874              :         self.startup_complete.clone().wait().await;
     875              : 
     876              :         const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5);
     877              : 
     878              :         let mut interval = tokio::time::interval(HEARTBEAT_INTERVAL);
     879              :         while !self.cancel.is_cancelled() {
     880              :             tokio::select! {
     881              :               _ = interval.tick() => { }
     882              :               _ = self.cancel.cancelled() => return
     883              :             };
     884              : 
     885              :             let nodes = {
     886              :                 let locked = self.inner.read().unwrap();
     887              :                 locked.nodes.clone()
     888              :             };
     889              : 
     890              :             let res = self.heartbeater.heartbeat(nodes).await;
     891              :             if let Ok(deltas) = res {
     892              :                 for (node_id, state) in deltas.0 {
     893              :                     let new_availability = match state {
     894              :                         PageserverState::Available { utilization, .. } => NodeAvailability::Active(
     895              :                             UtilizationScore(utilization.utilization_score),
     896              :                         ),
     897              :                         PageserverState::WarmingUp { started_at } => {
     898              :                             NodeAvailability::WarmingUp(started_at)
     899              :                         }
     900              :                         PageserverState::Offline => {
     901              :                             // The node might have been placed in the WarmingUp state
     902              :                             // while the heartbeat round was on-going. Hence, filter out
     903              :                             // offline transitions for WarmingUp nodes that are still within
     904              :                             // their grace period.
     905              :                             if let Ok(NodeAvailability::WarmingUp(started_at)) =
     906            0 :                                 self.get_node(node_id).await.map(|n| n.get_availability())
     907              :                             {
     908              :                                 let now = Instant::now();
     909              :                                 if now - started_at >= self.config.max_warming_up_interval {
     910              :                                     NodeAvailability::Offline
     911              :                                 } else {
     912              :                                     NodeAvailability::WarmingUp(started_at)
     913              :                                 }
     914              :                             } else {
     915              :                                 NodeAvailability::Offline
     916              :                             }
     917              :                         }
     918              :                     };
     919              : 
     920              :                     // This is the code path for geniune availability transitions (i.e node
     921              :                     // goes unavailable and/or comes back online).
     922              :                     let res = self
     923              :                         .node_configure(node_id, Some(new_availability), None)
     924              :                         .await;
     925              : 
     926              :                     match res {
     927              :                         Ok(()) => {}
     928              :                         Err(ApiError::NotFound(_)) => {
     929              :                             // This should be rare, but legitimate since the heartbeats are done
     930              :                             // on a snapshot of the nodes.
     931              :                             tracing::info!("Node {} was not found after heartbeat round", node_id);
     932              :                         }
     933              :                         Err(err) => {
     934              :                             // Transition to active involves reconciling: if a node responds to a heartbeat then
     935              :                             // becomes unavailable again, we may get an error here.
     936              :                             tracing::error!(
     937              :                                 "Failed to update node {} after heartbeat round: {}",
     938              :                                 node_id,
     939              :                                 err
     940              :                             );
     941              :                         }
     942              :                     }
     943              :                 }
     944              :             }
     945              :         }
     946              :     }
     947              : 
     948              :     /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
     949              :     /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
     950              :     /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
     951              :     /// will indicate that reconciliation is not needed.
     952            0 :     #[instrument(skip_all, fields(
     953              :         tenant_id=%result.tenant_shard_id.tenant_id, shard_id=%result.tenant_shard_id.shard_slug(),
     954              :         sequence=%result.sequence
     955            0 :     ))]
     956              :     fn process_result(&self, mut result: ReconcileResult) {
     957              :         let mut locked = self.inner.write().unwrap();
     958              :         let (nodes, tenants, _scheduler) = locked.parts_mut();
     959              :         let Some(tenant) = tenants.get_mut(&result.tenant_shard_id) else {
     960              :             // A reconciliation result might race with removing a tenant: drop results for
     961              :             // tenants that aren't in our map.
     962              :             return;
     963              :         };
     964              : 
     965              :         // Usually generation should only be updated via this path, so the max() isn't
     966              :         // needed, but it is used to handle out-of-band updates via. e.g. test hook.
     967              :         tenant.generation = std::cmp::max(tenant.generation, result.generation);
     968              : 
     969              :         // If the reconciler signals that it failed to notify compute, set this state on
     970              :         // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
     971              :         tenant.pending_compute_notification = result.pending_compute_notification;
     972              : 
     973              :         // Let the TenantShard know it is idle.
     974              :         tenant.reconcile_complete(result.sequence);
     975              : 
     976              :         // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
     977              :         // make to the tenant
     978              :         result
     979              :             .observed
     980              :             .locations
     981            0 :             .retain(|node_id, _loc| nodes.contains_key(node_id));
     982              : 
     983              :         match result.result {
     984              :             Ok(()) => {
     985              :                 for (node_id, loc) in &result.observed.locations {
     986              :                     if let Some(conf) = &loc.conf {
     987              :                         tracing::info!("Updating observed location {}: {:?}", node_id, conf);
     988              :                     } else {
     989              :                         tracing::info!("Setting observed location {} to None", node_id,)
     990              :                     }
     991              :                 }
     992              : 
     993              :                 tenant.observed = result.observed;
     994              :                 tenant.waiter.advance(result.sequence);
     995              :             }
     996              :             Err(e) => {
     997              :                 match e {
     998              :                     ReconcileError::Cancel => {
     999              :                         tracing::info!("Reconciler was cancelled");
    1000              :                     }
    1001              :                     ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
    1002              :                         // This might be due to the reconciler getting cancelled, or it might
    1003              :                         // be due to the `Node` being marked offline.
    1004              :                         tracing::info!("Reconciler cancelled during pageserver API call");
    1005              :                     }
    1006              :                     _ => {
    1007              :                         tracing::warn!("Reconcile error: {}", e);
    1008              :                     }
    1009              :                 }
    1010              : 
    1011              :                 // Ordering: populate last_error before advancing error_seq,
    1012              :                 // so that waiters will see the correct error after waiting.
    1013              :                 tenant.set_last_error(result.sequence, e);
    1014              : 
    1015              :                 for (node_id, o) in result.observed.locations {
    1016              :                     tenant.observed.locations.insert(node_id, o);
    1017              :                 }
    1018              :             }
    1019              :         }
    1020              : 
    1021              :         // Maybe some other work can proceed now that this job finished.
    1022              :         if self.reconciler_concurrency.available_permits() > 0 {
    1023              :             while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
    1024              :                 let (nodes, tenants, _scheduler) = locked.parts_mut();
    1025              :                 if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
    1026              :                     shard.delayed_reconcile = false;
    1027              :                     self.maybe_reconcile_shard(shard, nodes);
    1028              :                 }
    1029              : 
    1030              :                 if self.reconciler_concurrency.available_permits() == 0 {
    1031              :                     break;
    1032              :                 }
    1033              :             }
    1034              :         }
    1035              :     }
    1036              : 
    1037            0 :     async fn process_results(
    1038            0 :         &self,
    1039            0 :         mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResultRequest>,
    1040            0 :         mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
    1041            0 :             Result<(), (TenantShardId, NotifyError)>,
    1042            0 :         >,
    1043            0 :     ) {
    1044            0 :         loop {
    1045            0 :             // Wait for the next result, or for cancellation
    1046            0 :             tokio::select! {
    1047              :                 r = result_rx.recv() => {
    1048              :                     match r {
    1049              :                         Some(ReconcileResultRequest::ReconcileResult(result)) => {self.process_result(result);},
    1050              :                         None | Some(ReconcileResultRequest::Stop) => {break;}
    1051              :                     }
    1052              :                 }
    1053            0 :                 _ = async{
    1054            0 :                     match bg_compute_hook_result_rx.recv().await {
    1055            0 :                         Some(result) => {
    1056            0 :                             if let Err((tenant_shard_id, notify_error)) = result {
    1057            0 :                                 tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
    1058            0 :                                 let mut locked = self.inner.write().unwrap();
    1059            0 :                                 if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
    1060            0 :                                     shard.pending_compute_notification = true;
    1061            0 :                                 }
    1062              : 
    1063            0 :                             }
    1064              :                         },
    1065              :                         None => {
    1066              :                             // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
    1067            0 :                             self.cancel.cancelled().await;
    1068              :                         }
    1069              :                     }
    1070            0 :                 } => {},
    1071              :                 _ = self.cancel.cancelled() => {
    1072              :                     break;
    1073              :                 }
    1074            0 :             };
    1075            0 :         }
    1076            0 :     }
    1077              : 
    1078            0 :     async fn process_aborts(
    1079            0 :         &self,
    1080            0 :         mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
    1081            0 :     ) {
    1082              :         loop {
    1083              :             // Wait for the next result, or for cancellation
    1084            0 :             let op = tokio::select! {
    1085              :                 r = abort_rx.recv() => {
    1086              :                     match r {
    1087              :                         Some(op) => {op},
    1088              :                         None => {break;}
    1089              :                     }
    1090              :                 }
    1091              :                 _ = self.cancel.cancelled() => {
    1092              :                     break;
    1093              :                 }
    1094              :             };
    1095              : 
    1096              :             // Retry until shutdown: we must keep this request object alive until it is properly
    1097              :             // processed, as it holds a lock guard that prevents other operations trying to do things
    1098              :             // to the tenant while it is in a weird part-split state.
    1099            0 :             while !self.cancel.is_cancelled() {
    1100            0 :                 match self.abort_tenant_shard_split(&op).await {
    1101            0 :                     Ok(_) => break,
    1102            0 :                     Err(e) => {
    1103            0 :                         tracing::warn!(
    1104            0 :                             "Failed to abort shard split on {}, will retry: {e}",
    1105              :                             op.tenant_id
    1106              :                         );
    1107              : 
    1108              :                         // If a node is unavailable, we hope that it has been properly marked Offline
    1109              :                         // when we retry, so that the abort op will succeed.  If the abort op is failing
    1110              :                         // for some other reason, we will keep retrying forever, or until a human notices
    1111              :                         // and does something about it (either fixing a pageserver or restarting the controller).
    1112            0 :                         tokio::time::timeout(Duration::from_secs(5), self.cancel.cancelled())
    1113            0 :                             .await
    1114            0 :                             .ok();
    1115              :                     }
    1116              :                 }
    1117              :             }
    1118              :         }
    1119            0 :     }
    1120              : 
    1121            0 :     pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
    1122            0 :         let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
    1123            0 :         let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
    1124            0 : 
    1125            0 :         tracing::info!("Loading nodes from database...");
    1126            0 :         let nodes = persistence
    1127            0 :             .list_nodes()
    1128            0 :             .await?
    1129            0 :             .into_iter()
    1130            0 :             .map(Node::from_persistent)
    1131            0 :             .collect::<Vec<_>>();
    1132            0 :         let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
    1133            0 :         tracing::info!("Loaded {} nodes from database.", nodes.len());
    1134              : 
    1135            0 :         tracing::info!("Loading shards from database...");
    1136            0 :         let mut tenant_shard_persistence = persistence.list_tenant_shards().await?;
    1137            0 :         tracing::info!(
    1138            0 :             "Loaded {} shards from database.",
    1139            0 :             tenant_shard_persistence.len()
    1140              :         );
    1141              : 
    1142              :         // If any shard splits were in progress, reset the database state to abort them
    1143            0 :         let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
    1144            0 :             HashMap::new();
    1145            0 :         for tsp in &mut tenant_shard_persistence {
    1146            0 :             let shard = tsp.get_shard_identity()?;
    1147            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1148            0 :             let entry = tenant_shard_count_min_max
    1149            0 :                 .entry(tenant_shard_id.tenant_id)
    1150            0 :                 .or_insert_with(|| (shard.count, shard.count));
    1151            0 :             entry.0 = std::cmp::min(entry.0, shard.count);
    1152            0 :             entry.1 = std::cmp::max(entry.1, shard.count);
    1153            0 :         }
    1154              : 
    1155            0 :         for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
    1156            0 :             if count_min != count_max {
    1157              :                 // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
    1158              :                 // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
    1159              :                 // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
    1160            0 :                 tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
    1161            0 :                 let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
    1162              : 
    1163              :                 // We may never see the Complete status here: if the split was complete, we wouldn't have
    1164              :                 // identified this tenant has having mismatching min/max counts.
    1165            0 :                 assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
    1166              : 
    1167              :                 // Clear the splitting status in-memory, to reflect that we just aborted in the database
    1168            0 :                 tenant_shard_persistence.iter_mut().for_each(|tsp| {
    1169            0 :                     // Set idle split state on those shards that we will retain.
    1170            0 :                     let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
    1171            0 :                     if tsp_tenant_id == tenant_id
    1172            0 :                         && tsp.get_shard_identity().unwrap().count == count_min
    1173            0 :                     {
    1174            0 :                         tsp.splitting = SplitState::Idle;
    1175            0 :                     } else if tsp_tenant_id == tenant_id {
    1176              :                         // Leave the splitting state on the child shards: this will be used next to
    1177              :                         // drop them.
    1178            0 :                         tracing::info!(
    1179            0 :                             "Shard {tsp_tenant_id} will be dropped after shard split abort",
    1180              :                         );
    1181            0 :                     }
    1182            0 :                 });
    1183            0 : 
    1184            0 :                 // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
    1185            0 :                 tenant_shard_persistence.retain(|tsp| {
    1186            0 :                     TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
    1187            0 :                         || tsp.splitting == SplitState::Idle
    1188            0 :                 });
    1189            0 :             }
    1190              :         }
    1191              : 
    1192            0 :         let mut tenants = BTreeMap::new();
    1193            0 : 
    1194            0 :         let mut scheduler = Scheduler::new(nodes.values());
    1195            0 : 
    1196            0 :         #[cfg(feature = "testing")]
    1197            0 :         {
    1198            0 :             // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
    1199            0 :             // tests only store the shards, not the nodes.  The nodes will be loaded shortly
    1200            0 :             // after when pageservers start up and register.
    1201            0 :             let mut node_ids = HashSet::new();
    1202            0 :             for tsp in &tenant_shard_persistence {
    1203            0 :                 if let Some(node_id) = tsp.generation_pageserver {
    1204            0 :                     node_ids.insert(node_id);
    1205            0 :                 }
    1206              :             }
    1207            0 :             for node_id in node_ids {
    1208            0 :                 tracing::info!("Creating node {} in scheduler for tests", node_id);
    1209            0 :                 let node = Node::new(
    1210            0 :                     NodeId(node_id as u64),
    1211            0 :                     "".to_string(),
    1212            0 :                     123,
    1213            0 :                     "".to_string(),
    1214            0 :                     123,
    1215            0 :                 );
    1216            0 : 
    1217            0 :                 scheduler.node_upsert(&node);
    1218              :             }
    1219              :         }
    1220            0 :         for tsp in tenant_shard_persistence {
    1221            0 :             let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1222              : 
    1223              :             // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
    1224              :             // it with what we can infer: the node for which a generation was most recently issued.
    1225            0 :             let mut intent = IntentState::new();
    1226            0 :             if let Some(generation_pageserver) = tsp.generation_pageserver.map(|n| NodeId(n as u64))
    1227              :             {
    1228            0 :                 if nodes.contains_key(&generation_pageserver) {
    1229            0 :                     intent.set_attached(&mut scheduler, Some(generation_pageserver));
    1230            0 :                 } else {
    1231              :                     // If a node was removed before being completely drained, it is legal for it to leave behind a `generation_pageserver` referring
    1232              :                     // to a non-existent node, because node deletion doesn't block on completing the reconciliations that will issue new generations
    1233              :                     // on different pageservers.
    1234            0 :                     tracing::warn!("Tenant shard {tenant_shard_id} references non-existent node {generation_pageserver} in database, will be rescheduled");
    1235              :                 }
    1236            0 :             }
    1237            0 :             let new_tenant = TenantShard::from_persistent(tsp, intent)?;
    1238              : 
    1239            0 :             tenants.insert(tenant_shard_id, new_tenant);
    1240              :         }
    1241              : 
    1242            0 :         let (startup_completion, startup_complete) = utils::completion::channel();
    1243            0 : 
    1244            0 :         // This channel is continuously consumed by process_results, so doesn't need to be very large.
    1245            0 :         let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
    1246            0 :             tokio::sync::mpsc::channel(512);
    1247            0 : 
    1248            0 :         let (delayed_reconcile_tx, delayed_reconcile_rx) =
    1249            0 :             tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
    1250            0 : 
    1251            0 :         let cancel = CancellationToken::new();
    1252            0 :         let reconcilers_cancel = cancel.child_token();
    1253            0 : 
    1254            0 :         let heartbeater = Heartbeater::new(
    1255            0 :             config.jwt_token.clone(),
    1256            0 :             config.max_offline_interval,
    1257            0 :             config.max_warming_up_interval,
    1258            0 :             cancel.clone(),
    1259            0 :         );
    1260            0 :         let this = Arc::new(Self {
    1261            0 :             inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
    1262            0 :                 nodes,
    1263            0 :                 tenants,
    1264            0 :                 scheduler,
    1265            0 :                 delayed_reconcile_rx,
    1266            0 :             ))),
    1267            0 :             config: config.clone(),
    1268            0 :             persistence,
    1269            0 :             compute_hook: Arc::new(ComputeHook::new(config.clone())),
    1270            0 :             result_tx,
    1271            0 :             heartbeater,
    1272            0 :             reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
    1273            0 :                 config.reconciler_concurrency,
    1274            0 :             )),
    1275            0 :             delayed_reconcile_tx,
    1276            0 :             abort_tx,
    1277            0 :             startup_complete: startup_complete.clone(),
    1278            0 :             cancel,
    1279            0 :             reconcilers_cancel,
    1280            0 :             gate: Gate::default(),
    1281            0 :             reconcilers_gate: Gate::default(),
    1282            0 :             tenant_op_locks: Default::default(),
    1283            0 :             node_op_locks: Default::default(),
    1284            0 :         });
    1285            0 : 
    1286            0 :         let result_task_this = this.clone();
    1287            0 :         tokio::task::spawn(async move {
    1288              :             // Block shutdown until we're done (we must respect self.cancel)
    1289            0 :             if let Ok(_gate) = result_task_this.gate.enter() {
    1290            0 :                 result_task_this
    1291            0 :                     .process_results(result_rx, bg_compute_notify_result_rx)
    1292            0 :                     .await
    1293            0 :             }
    1294            0 :         });
    1295            0 : 
    1296            0 :         tokio::task::spawn({
    1297            0 :             let this = this.clone();
    1298            0 :             async move {
    1299              :                 // Block shutdown until we're done (we must respect self.cancel)
    1300            0 :                 if let Ok(_gate) = this.gate.enter() {
    1301            0 :                     this.process_aborts(abort_rx).await
    1302            0 :                 }
    1303            0 :             }
    1304            0 :         });
    1305            0 : 
    1306            0 :         tokio::task::spawn({
    1307            0 :             let this = this.clone();
    1308            0 :             async move {
    1309            0 :                 if let Ok(_gate) = this.gate.enter() {
    1310            0 :                     loop {
    1311            0 :                         tokio::select! {
    1312              :                             _ = this.cancel.cancelled() => {
    1313              :                                 break;
    1314              :                             },
    1315              :                             _ = tokio::time::sleep(Duration::from_secs(60)) => {}
    1316              :                         };
    1317            0 :                         this.tenant_op_locks.housekeeping();
    1318              :                     }
    1319            0 :                 }
    1320            0 :             }
    1321            0 :         });
    1322            0 : 
    1323            0 :         tokio::task::spawn({
    1324            0 :             let this = this.clone();
    1325            0 :             // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
    1326            0 :             // is done.
    1327            0 :             let startup_completion = startup_completion.clone();
    1328            0 :             async move {
    1329              :                 // Block shutdown until we're done (we must respect self.cancel)
    1330            0 :                 let Ok(_gate) = this.gate.enter() else {
    1331            0 :                     return;
    1332              :                 };
    1333              : 
    1334            0 :                 this.startup_reconcile(bg_compute_notify_result_tx).await;
    1335            0 :                 drop(startup_completion);
    1336            0 :             }
    1337            0 :         });
    1338            0 : 
    1339            0 :         tokio::task::spawn({
    1340            0 :             let this = this.clone();
    1341            0 :             let startup_complete = startup_complete.clone();
    1342            0 :             async move {
    1343            0 :                 startup_complete.wait().await;
    1344            0 :                 this.background_reconcile().await;
    1345            0 :             }
    1346            0 :         });
    1347            0 : 
    1348            0 :         tokio::task::spawn({
    1349            0 :             let this = this.clone();
    1350            0 :             let startup_complete = startup_complete.clone();
    1351            0 :             async move {
    1352            0 :                 startup_complete.wait().await;
    1353            0 :                 this.spawn_heartbeat_driver().await;
    1354            0 :             }
    1355            0 :         });
    1356            0 : 
    1357            0 :         Ok(this)
    1358            0 :     }
    1359              : 
    1360            0 :     pub(crate) async fn attach_hook(
    1361            0 :         &self,
    1362            0 :         attach_req: AttachHookRequest,
    1363            0 :     ) -> anyhow::Result<AttachHookResponse> {
    1364            0 :         let _tenant_lock = trace_exclusive_lock(
    1365            0 :             &self.tenant_op_locks,
    1366            0 :             attach_req.tenant_shard_id.tenant_id,
    1367            0 :             TenantOperations::AttachHook,
    1368            0 :         )
    1369            0 :         .await;
    1370              : 
    1371              :         // This is a test hook.  To enable using it on tenants that were created directly with
    1372              :         // the pageserver API (not via this service), we will auto-create any missing tenant
    1373              :         // shards with default state.
    1374            0 :         let insert = {
    1375            0 :             let locked = self.inner.write().unwrap();
    1376            0 :             !locked.tenants.contains_key(&attach_req.tenant_shard_id)
    1377            0 :         };
    1378            0 : 
    1379            0 :         if insert {
    1380            0 :             let tsp = TenantShardPersistence {
    1381            0 :                 tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
    1382            0 :                 shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
    1383            0 :                 shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
    1384            0 :                 shard_stripe_size: 0,
    1385            0 :                 generation: attach_req.generation_override.or(Some(0)),
    1386            0 :                 generation_pageserver: None,
    1387            0 :                 placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
    1388            0 :                 config: serde_json::to_string(&TenantConfig::default()).unwrap(),
    1389            0 :                 splitting: SplitState::default(),
    1390            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    1391            0 :                     .unwrap(),
    1392            0 :             };
    1393            0 : 
    1394            0 :             match self.persistence.insert_tenant_shards(vec![tsp]).await {
    1395            0 :                 Err(e) => match e {
    1396              :                     DatabaseError::Query(diesel::result::Error::DatabaseError(
    1397              :                         DatabaseErrorKind::UniqueViolation,
    1398              :                         _,
    1399              :                     )) => {
    1400            0 :                         tracing::info!(
    1401            0 :                             "Raced with another request to insert tenant {}",
    1402              :                             attach_req.tenant_shard_id
    1403              :                         )
    1404              :                     }
    1405            0 :                     _ => return Err(e.into()),
    1406              :                 },
    1407              :                 Ok(()) => {
    1408            0 :                     tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
    1409              : 
    1410            0 :                     let mut locked = self.inner.write().unwrap();
    1411            0 :                     locked.tenants.insert(
    1412            0 :                         attach_req.tenant_shard_id,
    1413            0 :                         TenantShard::new(
    1414            0 :                             attach_req.tenant_shard_id,
    1415            0 :                             ShardIdentity::unsharded(),
    1416            0 :                             PlacementPolicy::Attached(0),
    1417            0 :                         ),
    1418            0 :                     );
    1419            0 :                     tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
    1420              :                 }
    1421              :             }
    1422            0 :         }
    1423              : 
    1424            0 :         let new_generation = if let Some(req_node_id) = attach_req.node_id {
    1425            0 :             let maybe_tenant_conf = {
    1426            0 :                 let locked = self.inner.write().unwrap();
    1427            0 :                 locked
    1428            0 :                     .tenants
    1429            0 :                     .get(&attach_req.tenant_shard_id)
    1430            0 :                     .map(|t| t.config.clone())
    1431            0 :             };
    1432            0 : 
    1433            0 :             match maybe_tenant_conf {
    1434            0 :                 Some(conf) => {
    1435            0 :                     let new_generation = self
    1436            0 :                         .persistence
    1437            0 :                         .increment_generation(attach_req.tenant_shard_id, req_node_id)
    1438            0 :                         .await?;
    1439              : 
    1440              :                     // Persist the placement policy update. This is required
    1441              :                     // when we reattaching a detached tenant.
    1442            0 :                     self.persistence
    1443            0 :                         .update_tenant_shard(
    1444            0 :                             TenantFilter::Shard(attach_req.tenant_shard_id),
    1445            0 :                             Some(PlacementPolicy::Attached(0)),
    1446            0 :                             Some(conf),
    1447            0 :                             None,
    1448            0 :                             None,
    1449            0 :                         )
    1450            0 :                         .await?;
    1451            0 :                     Some(new_generation)
    1452              :                 }
    1453              :                 None => {
    1454            0 :                     anyhow::bail!("Attach hook handling raced with tenant removal")
    1455              :                 }
    1456              :             }
    1457              :         } else {
    1458            0 :             self.persistence.detach(attach_req.tenant_shard_id).await?;
    1459            0 :             None
    1460              :         };
    1461              : 
    1462            0 :         let mut locked = self.inner.write().unwrap();
    1463            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    1464            0 : 
    1465            0 :         let tenant_shard = tenants
    1466            0 :             .get_mut(&attach_req.tenant_shard_id)
    1467            0 :             .expect("Checked for existence above");
    1468              : 
    1469            0 :         if let Some(new_generation) = new_generation {
    1470            0 :             tenant_shard.generation = Some(new_generation);
    1471            0 :             tenant_shard.policy = PlacementPolicy::Attached(0);
    1472            0 :         } else {
    1473              :             // This is a detach notification.  We must update placement policy to avoid re-attaching
    1474              :             // during background scheduling/reconciliation, or during storage controller restart.
    1475            0 :             assert!(attach_req.node_id.is_none());
    1476            0 :             tenant_shard.policy = PlacementPolicy::Detached;
    1477              :         }
    1478              : 
    1479            0 :         if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
    1480            0 :             tracing::info!(
    1481              :                 tenant_id = %attach_req.tenant_shard_id,
    1482              :                 ps_id = %attaching_pageserver,
    1483              :                 generation = ?tenant_shard.generation,
    1484            0 :                 "issuing",
    1485              :             );
    1486            0 :         } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
    1487            0 :             tracing::info!(
    1488              :                 tenant_id = %attach_req.tenant_shard_id,
    1489              :                 %ps_id,
    1490              :                 generation = ?tenant_shard.generation,
    1491            0 :                 "dropping",
    1492              :             );
    1493              :         } else {
    1494            0 :             tracing::info!(
    1495              :             tenant_id = %attach_req.tenant_shard_id,
    1496            0 :             "no-op: tenant already has no pageserver");
    1497              :         }
    1498            0 :         tenant_shard
    1499            0 :             .intent
    1500            0 :             .set_attached(scheduler, attach_req.node_id);
    1501            0 : 
    1502            0 :         tracing::info!(
    1503            0 :             "attach_hook: tenant {} set generation {:?}, pageserver {}",
    1504            0 :             attach_req.tenant_shard_id,
    1505            0 :             tenant_shard.generation,
    1506            0 :             // TODO: this is an odd number of 0xf's
    1507            0 :             attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff))
    1508              :         );
    1509              : 
    1510              :         // Trick the reconciler into not doing anything for this tenant: this helps
    1511              :         // tests that manually configure a tenant on the pagesrever, and then call this
    1512              :         // attach hook: they don't want background reconciliation to modify what they
    1513              :         // did to the pageserver.
    1514              :         #[cfg(feature = "testing")]
    1515              :         {
    1516            0 :             if let Some(node_id) = attach_req.node_id {
    1517            0 :                 tenant_shard.observed.locations = HashMap::from([(
    1518            0 :                     node_id,
    1519            0 :                     ObservedStateLocation {
    1520            0 :                         conf: Some(attached_location_conf(
    1521            0 :                             tenant_shard.generation.unwrap(),
    1522            0 :                             &tenant_shard.shard,
    1523            0 :                             &tenant_shard.config,
    1524            0 :                             &PlacementPolicy::Attached(0),
    1525            0 :                         )),
    1526            0 :                     },
    1527            0 :                 )]);
    1528            0 :             } else {
    1529            0 :                 tenant_shard.observed.locations.clear();
    1530            0 :             }
    1531              :         }
    1532              : 
    1533            0 :         Ok(AttachHookResponse {
    1534            0 :             gen: attach_req
    1535            0 :                 .node_id
    1536            0 :                 .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
    1537            0 :         })
    1538            0 :     }
    1539              : 
    1540            0 :     pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
    1541            0 :         let locked = self.inner.read().unwrap();
    1542            0 : 
    1543            0 :         let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
    1544            0 : 
    1545            0 :         InspectResponse {
    1546            0 :             attachment: tenant_shard.and_then(|s| {
    1547            0 :                 s.intent
    1548            0 :                     .get_attached()
    1549            0 :                     .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
    1550            0 :             }),
    1551            0 :         }
    1552            0 :     }
    1553              : 
    1554              :     // When the availability state of a node transitions to active, we must do a full reconciliation
    1555              :     // of LocationConfigs on that node.  This is because while a node was offline:
    1556              :     // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
    1557              :     // - aborting a tenant shard split might have left rogue child shards behind on this node.
    1558              :     //
    1559              :     // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
    1560              :     // Reconcilers might communicate with the node, and these must not overlap with the work we do in
    1561              :     // this function.
    1562              :     //
    1563              :     // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
    1564              :     // for written for a single node rather than as a batch job for all nodes.
    1565            0 :     #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
    1566              :     async fn node_activate_reconcile(
    1567              :         &self,
    1568              :         mut node: Node,
    1569              :         _lock: &TracingExclusiveGuard<NodeOperations>,
    1570              :     ) -> Result<(), ApiError> {
    1571              :         // This Node is a mutable local copy: we will set it active so that we can use its
    1572              :         // API client to reconcile with the node.  The Node in [`Self::nodes`] will get updated
    1573              :         // later.
    1574              :         node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
    1575              : 
    1576              :         let configs = match node
    1577              :             .with_client_retries(
    1578            0 :                 |client| async move { client.list_location_config().await },
    1579              :                 &self.config.jwt_token,
    1580              :                 1,
    1581              :                 5,
    1582              :                 SHORT_RECONCILE_TIMEOUT,
    1583              :                 &self.cancel,
    1584              :             )
    1585              :             .await
    1586              :         {
    1587              :             None => {
    1588              :                 // We're shutting down (the Node's cancellation token can't have fired, because
    1589              :                 // we're the only scope that has a reference to it, and we didn't fire it).
    1590              :                 return Err(ApiError::ShuttingDown);
    1591              :             }
    1592              :             Some(Err(e)) => {
    1593              :                 // This node didn't succeed listing its locations: it may not proceed to active state
    1594              :                 // as it is apparently unavailable.
    1595              :                 return Err(ApiError::PreconditionFailed(
    1596              :                     format!("Failed to query node location configs, cannot activate ({e})").into(),
    1597              :                 ));
    1598              :             }
    1599              :             Some(Ok(configs)) => configs,
    1600              :         };
    1601              :         tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
    1602              : 
    1603              :         let mut cleanup = Vec::new();
    1604              :         {
    1605              :             let mut locked = self.inner.write().unwrap();
    1606              : 
    1607              :             for (tenant_shard_id, observed_loc) in configs.tenant_shards {
    1608              :                 let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
    1609              :                     cleanup.push(tenant_shard_id);
    1610              :                     continue;
    1611              :                 };
    1612              :                 tenant_shard
    1613              :                     .observed
    1614              :                     .locations
    1615              :                     .insert(node.get_id(), ObservedStateLocation { conf: observed_loc });
    1616              :             }
    1617              :         }
    1618              : 
    1619              :         for tenant_shard_id in cleanup {
    1620              :             tracing::info!("Detaching {tenant_shard_id}");
    1621              :             match node
    1622              :                 .with_client_retries(
    1623            0 :                     |client| async move {
    1624            0 :                         let config = LocationConfig {
    1625            0 :                             mode: LocationConfigMode::Detached,
    1626            0 :                             generation: None,
    1627            0 :                             secondary_conf: None,
    1628            0 :                             shard_number: tenant_shard_id.shard_number.0,
    1629            0 :                             shard_count: tenant_shard_id.shard_count.literal(),
    1630            0 :                             shard_stripe_size: 0,
    1631            0 :                             tenant_conf: models::TenantConfig::default(),
    1632            0 :                         };
    1633            0 :                         client
    1634            0 :                             .location_config(tenant_shard_id, config, None, false)
    1635            0 :                             .await
    1636            0 :                     },
    1637              :                     &self.config.jwt_token,
    1638              :                     1,
    1639              :                     5,
    1640              :                     SHORT_RECONCILE_TIMEOUT,
    1641              :                     &self.cancel,
    1642              :                 )
    1643              :                 .await
    1644              :             {
    1645              :                 None => {
    1646              :                     // We're shutting down (the Node's cancellation token can't have fired, because
    1647              :                     // we're the only scope that has a reference to it, and we didn't fire it).
    1648              :                     return Err(ApiError::ShuttingDown);
    1649              :                 }
    1650              :                 Some(Err(e)) => {
    1651              :                     // Do not let the node proceed to Active state if it is not responsive to requests
    1652              :                     // to detach.  This could happen if e.g. a shutdown bug in the pageserver is preventing
    1653              :                     // detach completing: we should not let this node back into the set of nodes considered
    1654              :                     // okay for scheduling.
    1655              :                     return Err(ApiError::Conflict(format!(
    1656              :                         "Node {node} failed to detach {tenant_shard_id}: {e}"
    1657              :                     )));
    1658              :                 }
    1659              :                 Some(Ok(_)) => {}
    1660              :             };
    1661              :         }
    1662              : 
    1663              :         Ok(())
    1664              :     }
    1665              : 
    1666            0 :     pub(crate) async fn re_attach(
    1667            0 :         &self,
    1668            0 :         reattach_req: ReAttachRequest,
    1669            0 :     ) -> Result<ReAttachResponse, ApiError> {
    1670            0 :         if let Some(register_req) = reattach_req.register {
    1671            0 :             self.node_register(register_req).await?;
    1672            0 :         }
    1673              : 
    1674              :         // Ordering: we must persist generation number updates before making them visible in the in-memory state
    1675            0 :         let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
    1676              : 
    1677            0 :         tracing::info!(
    1678              :             node_id=%reattach_req.node_id,
    1679            0 :             "Incremented {} tenant shards' generations",
    1680            0 :             incremented_generations.len()
    1681              :         );
    1682              : 
    1683              :         // Apply the updated generation to our in-memory state, and
    1684              :         // gather discover secondary locations.
    1685            0 :         let mut locked = self.inner.write().unwrap();
    1686            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    1687            0 : 
    1688            0 :         let mut response = ReAttachResponse {
    1689            0 :             tenants: Vec::new(),
    1690            0 :         };
    1691              : 
    1692              :         // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
    1693              :         // to call location_conf API with an old generation.  Wait for cancellation to complete
    1694              :         // before responding to this request.  Requires well implemented CancellationToken logic
    1695              :         // all the way to where we call location_conf.  Even then, there can still be a location_conf
    1696              :         // request in flight over the network: TODO handle that by making location_conf API refuse
    1697              :         // to go backward in generations.
    1698              : 
    1699              :         // Scan through all shards, applying updates for ones where we updated generation
    1700              :         // and identifying shards that intend to have a secondary location on this node.
    1701            0 :         for (tenant_shard_id, shard) in tenants {
    1702            0 :             if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
    1703            0 :                 let new_gen = *new_gen;
    1704            0 :                 response.tenants.push(ReAttachResponseTenant {
    1705            0 :                     id: *tenant_shard_id,
    1706            0 :                     gen: Some(new_gen.into().unwrap()),
    1707            0 :                     // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
    1708            0 :                     // execution.  If a pageserver is restarted during that process, then the reconcile pass will
    1709            0 :                     // fail, and start from scratch, so it doesn't make sense for us to try and preserve
    1710            0 :                     // the stale/multi states at this point.
    1711            0 :                     mode: LocationConfigMode::AttachedSingle,
    1712            0 :                 });
    1713            0 : 
    1714            0 :                 shard.generation = std::cmp::max(shard.generation, Some(new_gen));
    1715            0 :                 if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
    1716              :                     // Why can we update `observed` even though we're not sure our response will be received
    1717              :                     // by the pageserver?  Because the pageserver will not proceed with startup until
    1718              :                     // it has processed response: if it loses it, we'll see another request and increment
    1719              :                     // generation again, avoiding any uncertainty about dirtiness of tenant's state.
    1720            0 :                     if let Some(conf) = observed.conf.as_mut() {
    1721            0 :                         conf.generation = new_gen.into();
    1722            0 :                     }
    1723            0 :                 } else {
    1724            0 :                     // This node has no observed state for the shard: perhaps it was offline
    1725            0 :                     // when the pageserver restarted.  Insert a None, so that the Reconciler
    1726            0 :                     // will be prompted to learn the location's state before it makes changes.
    1727            0 :                     shard
    1728            0 :                         .observed
    1729            0 :                         .locations
    1730            0 :                         .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
    1731            0 :                 }
    1732            0 :             } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
    1733            0 :                 // Ordering: pageserver will not accept /location_config requests until it has
    1734            0 :                 // finished processing the response from re-attach.  So we can update our in-memory state
    1735            0 :                 // now, and be confident that we are not stamping on the result of some later location config.
    1736            0 :                 // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
    1737            0 :                 // so we might update observed state here, and then get over-written by some racing
    1738            0 :                 // ReconcileResult.  The impact is low however, since we have set state on pageserver something
    1739            0 :                 // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
    1740            0 : 
    1741            0 :                 response.tenants.push(ReAttachResponseTenant {
    1742            0 :                     id: *tenant_shard_id,
    1743            0 :                     gen: None,
    1744            0 :                     mode: LocationConfigMode::Secondary,
    1745            0 :                 });
    1746            0 : 
    1747            0 :                 // We must not update observed, because we have no guarantee that our
    1748            0 :                 // response will be received by the pageserver. This could leave it
    1749            0 :                 // falsely dirty, but the resulting reconcile should be idempotent.
    1750            0 :             }
    1751              :         }
    1752              : 
    1753              :         // We consider a node Active once we have composed a re-attach response, but we
    1754              :         // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
    1755              :         // implicitly synchronizes the LocationConfigs on the node.
    1756              :         //
    1757              :         // Setting a node active unblocks any Reconcilers that might write to the location config API,
    1758              :         // but those requests will not be accepted by the node until it has finished processing
    1759              :         // the re-attach response.
    1760              :         //
    1761              :         // Additionally, reset the nodes scheduling policy to match the conditional update done
    1762              :         // in [`Persistence::re_attach`].
    1763            0 :         if let Some(node) = nodes.get(&reattach_req.node_id) {
    1764            0 :             let reset_scheduling = matches!(
    1765            0 :                 node.get_scheduling(),
    1766              :                 NodeSchedulingPolicy::PauseForRestart
    1767              :                     | NodeSchedulingPolicy::Draining
    1768              :                     | NodeSchedulingPolicy::Filling
    1769              :             );
    1770              : 
    1771            0 :             let mut new_nodes = (**nodes).clone();
    1772            0 :             if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
    1773            0 :                 if reset_scheduling {
    1774            0 :                     node.set_scheduling(NodeSchedulingPolicy::Active);
    1775            0 :                 }
    1776              : 
    1777            0 :                 tracing::info!("Marking {} warming-up on reattach", reattach_req.node_id);
    1778            0 :                 node.set_availability(NodeAvailability::WarmingUp(std::time::Instant::now()));
    1779            0 : 
    1780            0 :                 scheduler.node_upsert(node);
    1781            0 :                 let new_nodes = Arc::new(new_nodes);
    1782            0 :                 *nodes = new_nodes;
    1783              :             } else {
    1784            0 :                 tracing::error!(
    1785            0 :                     "Reattaching node {} was removed while processing the request",
    1786              :                     reattach_req.node_id
    1787              :                 );
    1788              :             }
    1789            0 :         }
    1790              : 
    1791            0 :         Ok(response)
    1792            0 :     }
    1793              : 
    1794            0 :     pub(crate) fn validate(&self, validate_req: ValidateRequest) -> ValidateResponse {
    1795            0 :         let locked = self.inner.read().unwrap();
    1796            0 : 
    1797            0 :         let mut response = ValidateResponse {
    1798            0 :             tenants: Vec::new(),
    1799            0 :         };
    1800              : 
    1801            0 :         for req_tenant in validate_req.tenants {
    1802            0 :             if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
    1803            0 :                 let valid = tenant_shard.generation == Some(Generation::new(req_tenant.gen));
    1804            0 :                 tracing::info!(
    1805            0 :                     "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
    1806              :                     req_tenant.id,
    1807              :                     req_tenant.gen,
    1808              :                     tenant_shard.generation
    1809              :                 );
    1810            0 :                 response.tenants.push(ValidateResponseTenant {
    1811            0 :                     id: req_tenant.id,
    1812            0 :                     valid,
    1813            0 :                 });
    1814            0 :             } else {
    1815            0 :                 // After tenant deletion, we may approve any validation.  This avoids
    1816            0 :                 // spurious warnings on the pageserver if it has pending LSN updates
    1817            0 :                 // at the point a deletion happens.
    1818            0 :                 response.tenants.push(ValidateResponseTenant {
    1819            0 :                     id: req_tenant.id,
    1820            0 :                     valid: true,
    1821            0 :                 });
    1822            0 :             }
    1823              :         }
    1824            0 :         response
    1825            0 :     }
    1826              : 
    1827            0 :     pub(crate) async fn tenant_create(
    1828            0 :         &self,
    1829            0 :         create_req: TenantCreateRequest,
    1830            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    1831            0 :         let tenant_id = create_req.new_tenant_id.tenant_id;
    1832              : 
    1833              :         // Exclude any concurrent attempts to create/access the same tenant ID
    1834            0 :         let _tenant_lock = trace_exclusive_lock(
    1835            0 :             &self.tenant_op_locks,
    1836            0 :             create_req.new_tenant_id.tenant_id,
    1837            0 :             TenantOperations::Create,
    1838            0 :         )
    1839            0 :         .await;
    1840            0 :         let (response, waiters) = self.do_tenant_create(create_req).await?;
    1841              : 
    1842            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    1843              :             // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
    1844              :             // accept compute notifications while it is in the process of creating.  Reconciliation will
    1845              :             // be retried in the background.
    1846            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
    1847            0 :         }
    1848            0 :         Ok(response)
    1849            0 :     }
    1850              : 
    1851            0 :     pub(crate) async fn do_tenant_create(
    1852            0 :         &self,
    1853            0 :         create_req: TenantCreateRequest,
    1854            0 :     ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
    1855            0 :         let placement_policy = create_req
    1856            0 :             .placement_policy
    1857            0 :             .clone()
    1858            0 :             // As a default, zero secondaries is convenient for tests that don't choose a policy.
    1859            0 :             .unwrap_or(PlacementPolicy::Attached(0));
    1860              : 
    1861              :         // This service expects to handle sharding itself: it is an error to try and directly create
    1862              :         // a particular shard here.
    1863            0 :         let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
    1864            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    1865            0 :                 "Attempted to create a specific shard, this API is for creating the whole tenant"
    1866            0 :             )));
    1867              :         } else {
    1868            0 :             create_req.new_tenant_id.tenant_id
    1869            0 :         };
    1870            0 : 
    1871            0 :         tracing::info!(
    1872            0 :             "Creating tenant {}, shard_count={:?}",
    1873              :             create_req.new_tenant_id,
    1874              :             create_req.shard_parameters.count,
    1875              :         );
    1876              : 
    1877            0 :         let create_ids = (0..create_req.shard_parameters.count.count())
    1878            0 :             .map(|i| TenantShardId {
    1879            0 :                 tenant_id,
    1880            0 :                 shard_number: ShardNumber(i),
    1881            0 :                 shard_count: create_req.shard_parameters.count,
    1882            0 :             })
    1883            0 :             .collect::<Vec<_>>();
    1884              : 
    1885              :         // If the caller specifies a None generation, it means "start from default".  This is different
    1886              :         // to [`Self::tenant_location_config`], where a None generation is used to represent
    1887              :         // an incompletely-onboarded tenant.
    1888            0 :         let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
    1889            0 :             tracing::info!(
    1890            0 :                 "tenant_create: secondary mode, generation is_some={}",
    1891            0 :                 create_req.generation.is_some()
    1892              :             );
    1893            0 :             create_req.generation.map(Generation::new)
    1894              :         } else {
    1895            0 :             tracing::info!(
    1896            0 :                 "tenant_create: not secondary mode, generation is_some={}",
    1897            0 :                 create_req.generation.is_some()
    1898              :             );
    1899            0 :             Some(
    1900            0 :                 create_req
    1901            0 :                     .generation
    1902            0 :                     .map(Generation::new)
    1903            0 :                     .unwrap_or(INITIAL_GENERATION),
    1904            0 :             )
    1905              :         };
    1906              : 
    1907              :         // Ordering: we persist tenant shards before creating them on the pageserver.  This enables a caller
    1908              :         // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
    1909              :         // during the creation, rather than risking leaving orphan objects in S3.
    1910            0 :         let persist_tenant_shards = create_ids
    1911            0 :             .iter()
    1912            0 :             .map(|tenant_shard_id| TenantShardPersistence {
    1913            0 :                 tenant_id: tenant_shard_id.tenant_id.to_string(),
    1914            0 :                 shard_number: tenant_shard_id.shard_number.0 as i32,
    1915            0 :                 shard_count: tenant_shard_id.shard_count.literal() as i32,
    1916            0 :                 shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
    1917            0 :                 generation: initial_generation.map(|g| g.into().unwrap() as i32),
    1918            0 :                 // The pageserver is not known until scheduling happens: we will set this column when
    1919            0 :                 // incrementing the generation the first time we attach to a pageserver.
    1920            0 :                 generation_pageserver: None,
    1921            0 :                 placement_policy: serde_json::to_string(&placement_policy).unwrap(),
    1922            0 :                 config: serde_json::to_string(&create_req.config).unwrap(),
    1923            0 :                 splitting: SplitState::default(),
    1924            0 :                 scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    1925            0 :                     .unwrap(),
    1926            0 :             })
    1927            0 :             .collect();
    1928            0 : 
    1929            0 :         match self
    1930            0 :             .persistence
    1931            0 :             .insert_tenant_shards(persist_tenant_shards)
    1932            0 :             .await
    1933              :         {
    1934            0 :             Ok(_) => {}
    1935              :             Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
    1936              :                 DatabaseErrorKind::UniqueViolation,
    1937              :                 _,
    1938              :             ))) => {
    1939              :                 // Unique key violation: this is probably a retry.  Because the shard count is part of the unique key,
    1940              :                 // if we see a unique key violation it means that the creation request's shard count matches the previous
    1941              :                 // creation's shard count.
    1942            0 :                 tracing::info!("Tenant shards already present in database, proceeding with idempotent creation...");
    1943              :             }
    1944              :             // Any other database error is unexpected and a bug.
    1945            0 :             Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
    1946              :         };
    1947              : 
    1948            0 :         let mut schedule_context = ScheduleContext::default();
    1949              : 
    1950            0 :         let (waiters, response_shards) = {
    1951            0 :             let mut locked = self.inner.write().unwrap();
    1952            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    1953            0 : 
    1954            0 :             let mut response_shards = Vec::new();
    1955            0 :             let mut schcedule_error = None;
    1956              : 
    1957            0 :             for tenant_shard_id in create_ids {
    1958            0 :                 tracing::info!("Creating shard {tenant_shard_id}...");
    1959              : 
    1960              :                 use std::collections::btree_map::Entry;
    1961            0 :                 match tenants.entry(tenant_shard_id) {
    1962            0 :                     Entry::Occupied(mut entry) => {
    1963            0 :                         tracing::info!(
    1964            0 :                             "Tenant shard {tenant_shard_id} already exists while creating"
    1965              :                         );
    1966              : 
    1967              :                         // TODO: schedule() should take an anti-affinity expression that pushes
    1968              :                         // attached and secondary locations (independently) away frorm those
    1969              :                         // pageservers also holding a shard for this tenant.
    1970              : 
    1971            0 :                         entry
    1972            0 :                             .get_mut()
    1973            0 :                             .schedule(scheduler, &mut schedule_context)
    1974            0 :                             .map_err(|e| {
    1975            0 :                                 ApiError::Conflict(format!(
    1976            0 :                                     "Failed to schedule shard {tenant_shard_id}: {e}"
    1977            0 :                                 ))
    1978            0 :                             })?;
    1979              : 
    1980            0 :                         if let Some(node_id) = entry.get().intent.get_attached() {
    1981            0 :                             let generation = entry
    1982            0 :                                 .get()
    1983            0 :                                 .generation
    1984            0 :                                 .expect("Generation is set when in attached mode");
    1985            0 :                             response_shards.push(TenantCreateResponseShard {
    1986            0 :                                 shard_id: tenant_shard_id,
    1987            0 :                                 node_id: *node_id,
    1988            0 :                                 generation: generation.into().unwrap(),
    1989            0 :                             });
    1990            0 :                         }
    1991              : 
    1992            0 :                         continue;
    1993              :                     }
    1994            0 :                     Entry::Vacant(entry) => {
    1995            0 :                         let state = entry.insert(TenantShard::new(
    1996            0 :                             tenant_shard_id,
    1997            0 :                             ShardIdentity::from_params(
    1998            0 :                                 tenant_shard_id.shard_number,
    1999            0 :                                 &create_req.shard_parameters,
    2000            0 :                             ),
    2001            0 :                             placement_policy.clone(),
    2002            0 :                         ));
    2003            0 : 
    2004            0 :                         state.generation = initial_generation;
    2005            0 :                         state.config = create_req.config.clone();
    2006            0 :                         if let Err(e) = state.schedule(scheduler, &mut schedule_context) {
    2007            0 :                             schcedule_error = Some(e);
    2008            0 :                         }
    2009              : 
    2010              :                         // Only include shards in result if we are attaching: the purpose
    2011              :                         // of the response is to tell the caller where the shards are attached.
    2012            0 :                         if let Some(node_id) = state.intent.get_attached() {
    2013            0 :                             let generation = state
    2014            0 :                                 .generation
    2015            0 :                                 .expect("Generation is set when in attached mode");
    2016            0 :                             response_shards.push(TenantCreateResponseShard {
    2017            0 :                                 shard_id: tenant_shard_id,
    2018            0 :                                 node_id: *node_id,
    2019            0 :                                 generation: generation.into().unwrap(),
    2020            0 :                             });
    2021            0 :                         }
    2022              :                     }
    2023              :                 };
    2024              :             }
    2025              : 
    2026              :             // If we failed to schedule shards, then they are still created in the controller,
    2027              :             // but we return an error to the requester to avoid a silent failure when someone
    2028              :             // tries to e.g. create a tenant whose placement policy requires more nodes than
    2029              :             // are present in the system.  We do this here rather than in the above loop, to
    2030              :             // avoid situations where we only create a subset of shards in the tenant.
    2031            0 :             if let Some(e) = schcedule_error {
    2032            0 :                 return Err(ApiError::Conflict(format!(
    2033            0 :                     "Failed to schedule shard(s): {e}"
    2034            0 :                 )));
    2035            0 :             }
    2036            0 : 
    2037            0 :             let waiters = tenants
    2038            0 :                 .range_mut(TenantShardId::tenant_range(tenant_id))
    2039            0 :                 .filter_map(|(_shard_id, shard)| self.maybe_reconcile_shard(shard, nodes))
    2040            0 :                 .collect::<Vec<_>>();
    2041            0 :             (waiters, response_shards)
    2042            0 :         };
    2043            0 : 
    2044            0 :         Ok((
    2045            0 :             TenantCreateResponse {
    2046            0 :                 shards: response_shards,
    2047            0 :             },
    2048            0 :             waiters,
    2049            0 :         ))
    2050            0 :     }
    2051              : 
    2052              :     /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
    2053              :     /// wait for reconciliation to complete before responding.
    2054            0 :     async fn await_waiters(
    2055            0 :         &self,
    2056            0 :         waiters: Vec<ReconcilerWaiter>,
    2057            0 :         timeout: Duration,
    2058            0 :     ) -> Result<(), ReconcileWaitError> {
    2059            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2060            0 :         for waiter in waiters {
    2061            0 :             let timeout = deadline.duration_since(Instant::now());
    2062            0 :             waiter.wait_timeout(timeout).await?;
    2063              :         }
    2064              : 
    2065            0 :         Ok(())
    2066            0 :     }
    2067              : 
    2068              :     /// Same as [`Service::await_waiters`], but returns the waiters which are still
    2069              :     /// in progress
    2070            0 :     async fn await_waiters_remainder(
    2071            0 :         &self,
    2072            0 :         waiters: Vec<ReconcilerWaiter>,
    2073            0 :         timeout: Duration,
    2074            0 :     ) -> Vec<ReconcilerWaiter> {
    2075            0 :         let deadline = Instant::now().checked_add(timeout).unwrap();
    2076            0 :         for waiter in waiters.iter() {
    2077            0 :             let timeout = deadline.duration_since(Instant::now());
    2078            0 :             let _ = waiter.wait_timeout(timeout).await;
    2079              :         }
    2080              : 
    2081            0 :         waiters
    2082            0 :             .into_iter()
    2083            0 :             .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress))
    2084            0 :             .collect::<Vec<_>>()
    2085            0 :     }
    2086              : 
    2087              :     /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
    2088              :     /// and transform it into either a tenant creation of a series of shard updates.
    2089              :     ///
    2090              :     /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
    2091              :     /// still be returned.
    2092            0 :     fn tenant_location_config_prepare(
    2093            0 :         &self,
    2094            0 :         tenant_id: TenantId,
    2095            0 :         req: TenantLocationConfigRequest,
    2096            0 :     ) -> TenantCreateOrUpdate {
    2097            0 :         let mut updates = Vec::new();
    2098            0 :         let mut locked = self.inner.write().unwrap();
    2099            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    2100            0 :         let tenant_shard_id = TenantShardId::unsharded(tenant_id);
    2101              : 
    2102              :         // Use location config mode as an indicator of policy.
    2103            0 :         let placement_policy = match req.config.mode {
    2104            0 :             LocationConfigMode::Detached => PlacementPolicy::Detached,
    2105            0 :             LocationConfigMode::Secondary => PlacementPolicy::Secondary,
    2106              :             LocationConfigMode::AttachedMulti
    2107              :             | LocationConfigMode::AttachedSingle
    2108              :             | LocationConfigMode::AttachedStale => {
    2109            0 :                 if nodes.len() > 1 {
    2110            0 :                     PlacementPolicy::Attached(1)
    2111              :                 } else {
    2112              :                     // Convenience for dev/test: if we just have one pageserver, import
    2113              :                     // tenants into non-HA mode so that scheduling will succeed.
    2114            0 :                     PlacementPolicy::Attached(0)
    2115              :                 }
    2116              :             }
    2117              :         };
    2118              : 
    2119            0 :         let mut create = true;
    2120            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    2121              :             // Saw an existing shard: this is not a creation
    2122            0 :             create = false;
    2123              : 
    2124              :             // Shards may have initially been created by a Secondary request, where we
    2125              :             // would have left generation as None.
    2126              :             //
    2127              :             // We only update generation the first time we see an attached-mode request,
    2128              :             // and if there is no existing generation set. The caller is responsible for
    2129              :             // ensuring that no non-storage-controller pageserver ever uses a higher
    2130              :             // generation than they passed in here.
    2131              :             use LocationConfigMode::*;
    2132            0 :             let set_generation = match req.config.mode {
    2133            0 :                 AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
    2134            0 :                     req.config.generation.map(Generation::new)
    2135              :                 }
    2136            0 :                 _ => None,
    2137              :             };
    2138              : 
    2139            0 :             updates.push(ShardUpdate {
    2140            0 :                 tenant_shard_id: *shard_id,
    2141            0 :                 placement_policy: placement_policy.clone(),
    2142            0 :                 tenant_config: req.config.tenant_conf.clone(),
    2143            0 :                 generation: set_generation,
    2144            0 :             });
    2145              :         }
    2146              : 
    2147            0 :         if create {
    2148              :             use LocationConfigMode::*;
    2149            0 :             let generation = match req.config.mode {
    2150            0 :                 AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
    2151              :                 // If a caller provided a generation in a non-attached request, ignore it
    2152              :                 // and leave our generation as None: this enables a subsequent update to set
    2153              :                 // the generation when setting an attached mode for the first time.
    2154            0 :                 _ => None,
    2155              :             };
    2156              : 
    2157            0 :             TenantCreateOrUpdate::Create(
    2158            0 :                 // Synthesize a creation request
    2159            0 :                 TenantCreateRequest {
    2160            0 :                     new_tenant_id: tenant_shard_id,
    2161            0 :                     generation,
    2162            0 :                     shard_parameters: ShardParameters {
    2163            0 :                         count: tenant_shard_id.shard_count,
    2164            0 :                         // We only import un-sharded or single-sharded tenants, so stripe
    2165            0 :                         // size can be made up arbitrarily here.
    2166            0 :                         stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE,
    2167            0 :                     },
    2168            0 :                     placement_policy: Some(placement_policy),
    2169            0 :                     config: req.config.tenant_conf,
    2170            0 :                 },
    2171            0 :             )
    2172              :         } else {
    2173            0 :             assert!(!updates.is_empty());
    2174            0 :             TenantCreateOrUpdate::Update(updates)
    2175              :         }
    2176            0 :     }
    2177              : 
    2178              :     /// This API is used by the cloud control plane to migrate unsharded tenants that it created
    2179              :     /// directly with pageservers into this service.
    2180              :     ///
    2181              :     /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
    2182              :     /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
    2183              :     /// Think of the first attempt to call this API as a transfer of absolute authority over the
    2184              :     /// tenant's source of generation numbers.
    2185              :     ///
    2186              :     /// The mode in this request coarse-grained control of tenants:
    2187              :     /// - Call with mode Attached* to upsert the tenant.
    2188              :     /// - Call with mode Secondary to either onboard a tenant without attaching it, or
    2189              :     ///   to set an existing tenant to PolicyMode::Secondary
    2190              :     /// - Call with mode Detached to switch to PolicyMode::Detached
    2191            0 :     pub(crate) async fn tenant_location_config(
    2192            0 :         &self,
    2193            0 :         tenant_shard_id: TenantShardId,
    2194            0 :         req: TenantLocationConfigRequest,
    2195            0 :     ) -> Result<TenantLocationConfigResponse, ApiError> {
    2196              :         // We require an exclusive lock, because we are updating both persistent and in-memory state
    2197            0 :         let _tenant_lock = trace_exclusive_lock(
    2198            0 :             &self.tenant_op_locks,
    2199            0 :             tenant_shard_id.tenant_id,
    2200            0 :             TenantOperations::LocationConfig,
    2201            0 :         )
    2202            0 :         .await;
    2203              : 
    2204            0 :         if !tenant_shard_id.is_unsharded() {
    2205            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!(
    2206            0 :                 "This API is for importing single-sharded or unsharded tenants"
    2207            0 :             )));
    2208            0 :         }
    2209            0 : 
    2210            0 :         // First check if this is a creation or an update
    2211            0 :         let create_or_update = self.tenant_location_config_prepare(tenant_shard_id.tenant_id, req);
    2212            0 : 
    2213            0 :         let mut result = TenantLocationConfigResponse {
    2214            0 :             shards: Vec::new(),
    2215            0 :             stripe_size: None,
    2216            0 :         };
    2217            0 :         let waiters = match create_or_update {
    2218            0 :             TenantCreateOrUpdate::Create(create_req) => {
    2219            0 :                 let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
    2220            0 :                 result.shards = create_resp
    2221            0 :                     .shards
    2222            0 :                     .into_iter()
    2223            0 :                     .map(|s| TenantShardLocation {
    2224            0 :                         node_id: s.node_id,
    2225            0 :                         shard_id: s.shard_id,
    2226            0 :                     })
    2227            0 :                     .collect();
    2228            0 :                 waiters
    2229              :             }
    2230            0 :             TenantCreateOrUpdate::Update(updates) => {
    2231            0 :                 // Persist updates
    2232            0 :                 // Ordering: write to the database before applying changes in-memory, so that
    2233            0 :                 // we will not appear time-travel backwards on a restart.
    2234            0 :                 let mut schedule_context = ScheduleContext::default();
    2235              :                 for ShardUpdate {
    2236            0 :                     tenant_shard_id,
    2237            0 :                     placement_policy,
    2238            0 :                     tenant_config,
    2239            0 :                     generation,
    2240            0 :                 } in &updates
    2241              :                 {
    2242            0 :                     self.persistence
    2243            0 :                         .update_tenant_shard(
    2244            0 :                             TenantFilter::Shard(*tenant_shard_id),
    2245            0 :                             Some(placement_policy.clone()),
    2246            0 :                             Some(tenant_config.clone()),
    2247            0 :                             *generation,
    2248            0 :                             None,
    2249            0 :                         )
    2250            0 :                         .await?;
    2251              :                 }
    2252              : 
    2253              :                 // Apply updates in-memory
    2254            0 :                 let mut waiters = Vec::new();
    2255            0 :                 {
    2256            0 :                     let mut locked = self.inner.write().unwrap();
    2257            0 :                     let (nodes, tenants, scheduler) = locked.parts_mut();
    2258              : 
    2259              :                     for ShardUpdate {
    2260            0 :                         tenant_shard_id,
    2261            0 :                         placement_policy,
    2262            0 :                         tenant_config,
    2263            0 :                         generation: update_generation,
    2264            0 :                     } in updates
    2265              :                     {
    2266            0 :                         let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    2267            0 :                             tracing::warn!("Shard {tenant_shard_id} removed while updating");
    2268            0 :                             continue;
    2269              :                         };
    2270              : 
    2271              :                         // Update stripe size
    2272            0 :                         if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
    2273            0 :                             result.stripe_size = Some(shard.shard.stripe_size);
    2274            0 :                         }
    2275              : 
    2276            0 :                         shard.policy = placement_policy;
    2277            0 :                         shard.config = tenant_config;
    2278            0 :                         if let Some(generation) = update_generation {
    2279            0 :                             shard.generation = Some(generation);
    2280            0 :                         }
    2281              : 
    2282            0 :                         shard.schedule(scheduler, &mut schedule_context)?;
    2283              : 
    2284            0 :                         let maybe_waiter = self.maybe_reconcile_shard(shard, nodes);
    2285            0 :                         if let Some(waiter) = maybe_waiter {
    2286            0 :                             waiters.push(waiter);
    2287            0 :                         }
    2288              : 
    2289            0 :                         if let Some(node_id) = shard.intent.get_attached() {
    2290            0 :                             result.shards.push(TenantShardLocation {
    2291            0 :                                 shard_id: tenant_shard_id,
    2292            0 :                                 node_id: *node_id,
    2293            0 :                             })
    2294            0 :                         }
    2295              :                     }
    2296              :                 }
    2297            0 :                 waiters
    2298              :             }
    2299              :         };
    2300              : 
    2301            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    2302              :             // Do not treat a reconcile error as fatal: we have already applied any requested
    2303              :             // Intent changes, and the reconcile can fail for external reasons like unavailable
    2304              :             // compute notification API.  In these cases, it is important that we do not
    2305              :             // cause the cloud control plane to retry forever on this API.
    2306            0 :             tracing::warn!(
    2307            0 :                 "Failed to reconcile after /location_config: {e}, returning success anyway"
    2308              :             );
    2309            0 :         }
    2310              : 
    2311              :         // Logging the full result is useful because it lets us cross-check what the cloud control
    2312              :         // plane's tenant_shards table should contain.
    2313            0 :         tracing::info!("Complete, returning {result:?}");
    2314              : 
    2315            0 :         Ok(result)
    2316            0 :     }
    2317              : 
    2318            0 :     pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
    2319              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    2320            0 :         let _tenant_lock = trace_exclusive_lock(
    2321            0 :             &self.tenant_op_locks,
    2322            0 :             req.tenant_id,
    2323            0 :             TenantOperations::ConfigSet,
    2324            0 :         )
    2325            0 :         .await;
    2326              : 
    2327            0 :         let tenant_id = req.tenant_id;
    2328            0 :         let config = req.config;
    2329            0 : 
    2330            0 :         self.persistence
    2331            0 :             .update_tenant_shard(
    2332            0 :                 TenantFilter::Tenant(req.tenant_id),
    2333            0 :                 None,
    2334            0 :                 Some(config.clone()),
    2335            0 :                 None,
    2336            0 :                 None,
    2337            0 :             )
    2338            0 :             .await?;
    2339              : 
    2340            0 :         let waiters = {
    2341            0 :             let mut waiters = Vec::new();
    2342            0 :             let mut locked = self.inner.write().unwrap();
    2343            0 :             let (nodes, tenants, _scheduler) = locked.parts_mut();
    2344            0 :             for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    2345            0 :                 shard.config = config.clone();
    2346            0 :                 if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
    2347            0 :                     waiters.push(waiter);
    2348            0 :                 }
    2349              :             }
    2350            0 :             waiters
    2351              :         };
    2352              : 
    2353            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    2354              :             // Treat this as success because we have stored the configuration.  If e.g.
    2355              :             // a node was unavailable at this time, it should not stop us accepting a
    2356              :             // configuration change.
    2357            0 :             tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
    2358            0 :         }
    2359              : 
    2360            0 :         Ok(())
    2361            0 :     }
    2362              : 
    2363            0 :     pub(crate) fn tenant_config_get(
    2364            0 :         &self,
    2365            0 :         tenant_id: TenantId,
    2366            0 :     ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
    2367            0 :         let config = {
    2368            0 :             let locked = self.inner.read().unwrap();
    2369            0 : 
    2370            0 :             match locked
    2371            0 :                 .tenants
    2372            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    2373            0 :                 .next()
    2374              :             {
    2375            0 :                 Some((_tenant_shard_id, shard)) => shard.config.clone(),
    2376              :                 None => {
    2377            0 :                     return Err(ApiError::NotFound(
    2378            0 :                         anyhow::anyhow!("Tenant not found").into(),
    2379            0 :                     ))
    2380              :                 }
    2381              :             }
    2382              :         };
    2383              : 
    2384              :         // Unlike the pageserver, we do not have a set of global defaults: the config is
    2385              :         // entirely per-tenant.  Therefore the distinction between `tenant_specific_overrides`
    2386              :         // and `effective_config` in the response is meaningless, but we retain that syntax
    2387              :         // in order to remain compatible with the pageserver API.
    2388              : 
    2389            0 :         let response = HashMap::from([
    2390              :             (
    2391              :                 "tenant_specific_overrides",
    2392            0 :                 serde_json::to_value(&config)
    2393            0 :                     .context("serializing tenant specific overrides")
    2394            0 :                     .map_err(ApiError::InternalServerError)?,
    2395              :             ),
    2396              :             (
    2397            0 :                 "effective_config",
    2398            0 :                 serde_json::to_value(&config)
    2399            0 :                     .context("serializing effective config")
    2400            0 :                     .map_err(ApiError::InternalServerError)?,
    2401              :             ),
    2402              :         ]);
    2403              : 
    2404            0 :         Ok(response)
    2405            0 :     }
    2406              : 
    2407            0 :     pub(crate) async fn tenant_time_travel_remote_storage(
    2408            0 :         &self,
    2409            0 :         time_travel_req: &TenantTimeTravelRequest,
    2410            0 :         tenant_id: TenantId,
    2411            0 :         timestamp: Cow<'_, str>,
    2412            0 :         done_if_after: Cow<'_, str>,
    2413            0 :     ) -> Result<(), ApiError> {
    2414            0 :         let _tenant_lock = trace_exclusive_lock(
    2415            0 :             &self.tenant_op_locks,
    2416            0 :             tenant_id,
    2417            0 :             TenantOperations::TimeTravelRemoteStorage,
    2418            0 :         )
    2419            0 :         .await;
    2420              : 
    2421            0 :         let node = {
    2422            0 :             let locked = self.inner.read().unwrap();
    2423              :             // Just a sanity check to prevent misuse: the API expects that the tenant is fully
    2424              :             // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
    2425              :             // but only at the start of the process, so it's really just to prevent operator
    2426              :             // mistakes.
    2427            0 :             for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
    2428            0 :                 if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
    2429              :                 {
    2430            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    2431            0 :                         "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
    2432            0 :                     )));
    2433            0 :                 }
    2434            0 :                 let maybe_attached = shard
    2435            0 :                     .observed
    2436            0 :                     .locations
    2437            0 :                     .iter()
    2438            0 :                     .filter_map(|(node_id, observed_location)| {
    2439            0 :                         observed_location
    2440            0 :                             .conf
    2441            0 :                             .as_ref()
    2442            0 :                             .map(|loc| (node_id, observed_location, loc.mode))
    2443            0 :                     })
    2444            0 :                     .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
    2445            0 :                 if let Some((node_id, _observed_location, mode)) = maybe_attached {
    2446            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!("We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}")));
    2447            0 :                 }
    2448              :             }
    2449            0 :             let scheduler = &locked.scheduler;
    2450              :             // Right now we only perform the operation on a single node without parallelization
    2451              :             // TODO fan out the operation to multiple nodes for better performance
    2452            0 :             let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?;
    2453            0 :             let node = locked
    2454            0 :                 .nodes
    2455            0 :                 .get(&node_id)
    2456            0 :                 .expect("Pageservers may not be deleted while lock is active");
    2457            0 :             node.clone()
    2458            0 :         };
    2459            0 : 
    2460            0 :         // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
    2461            0 :         let mut counts = time_travel_req
    2462            0 :             .shard_counts
    2463            0 :             .iter()
    2464            0 :             .copied()
    2465            0 :             .collect::<HashSet<_>>()
    2466            0 :             .into_iter()
    2467            0 :             .collect::<Vec<_>>();
    2468            0 :         counts.sort_unstable();
    2469              : 
    2470            0 :         for count in counts {
    2471            0 :             let shard_ids = (0..count.count())
    2472            0 :                 .map(|i| TenantShardId {
    2473            0 :                     tenant_id,
    2474            0 :                     shard_number: ShardNumber(i),
    2475            0 :                     shard_count: count,
    2476            0 :                 })
    2477            0 :                 .collect::<Vec<_>>();
    2478            0 :             for tenant_shard_id in shard_ids {
    2479            0 :                 let client = PageserverClient::new(
    2480            0 :                     node.get_id(),
    2481            0 :                     node.base_url(),
    2482            0 :                     self.config.jwt_token.as_deref(),
    2483            0 :                 );
    2484            0 : 
    2485            0 :                 tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
    2486              : 
    2487            0 :                 client
    2488            0 :                     .tenant_time_travel_remote_storage(
    2489            0 :                         tenant_shard_id,
    2490            0 :                         &timestamp,
    2491            0 :                         &done_if_after,
    2492            0 :                     )
    2493            0 :                     .await
    2494            0 :                     .map_err(|e| {
    2495            0 :                         ApiError::InternalServerError(anyhow::anyhow!(
    2496            0 :                             "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
    2497            0 :                             node
    2498            0 :                         ))
    2499            0 :                     })?;
    2500              :             }
    2501              :         }
    2502            0 :         Ok(())
    2503            0 :     }
    2504              : 
    2505            0 :     pub(crate) async fn tenant_secondary_download(
    2506            0 :         &self,
    2507            0 :         tenant_id: TenantId,
    2508            0 :         wait: Option<Duration>,
    2509            0 :     ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
    2510            0 :         let _tenant_lock = trace_shared_lock(
    2511            0 :             &self.tenant_op_locks,
    2512            0 :             tenant_id,
    2513            0 :             TenantOperations::SecondaryDownload,
    2514            0 :         )
    2515            0 :         .await;
    2516              : 
    2517              :         // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
    2518            0 :         let targets = {
    2519            0 :             let locked = self.inner.read().unwrap();
    2520            0 :             let mut targets = Vec::new();
    2521              : 
    2522            0 :             for (tenant_shard_id, shard) in
    2523            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    2524              :             {
    2525            0 :                 for node_id in shard.intent.get_secondary() {
    2526            0 :                     let node = locked
    2527            0 :                         .nodes
    2528            0 :                         .get(node_id)
    2529            0 :                         .expect("Pageservers may not be deleted while referenced");
    2530            0 : 
    2531            0 :                     targets.push((*tenant_shard_id, node.clone()));
    2532            0 :                 }
    2533              :             }
    2534            0 :             targets
    2535            0 :         };
    2536            0 : 
    2537            0 :         // Issue concurrent requests to all shards' locations
    2538            0 :         let mut futs = FuturesUnordered::new();
    2539            0 :         for (tenant_shard_id, node) in targets {
    2540            0 :             let client = PageserverClient::new(
    2541            0 :                 node.get_id(),
    2542            0 :                 node.base_url(),
    2543            0 :                 self.config.jwt_token.as_deref(),
    2544            0 :             );
    2545            0 :             futs.push(async move {
    2546            0 :                 let result = client
    2547            0 :                     .tenant_secondary_download(tenant_shard_id, wait)
    2548            0 :                     .await;
    2549            0 :                 (result, node, tenant_shard_id)
    2550            0 :             })
    2551              :         }
    2552              : 
    2553              :         // Handle any errors returned by pageservers.  This includes cases like this request racing with
    2554              :         // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
    2555              :         // well as more general cases like 503s, 500s, or timeouts.
    2556            0 :         let mut aggregate_progress = SecondaryProgress::default();
    2557            0 :         let mut aggregate_status: Option<StatusCode> = None;
    2558            0 :         let mut error: Option<mgmt_api::Error> = None;
    2559            0 :         while let Some((result, node, tenant_shard_id)) = futs.next().await {
    2560            0 :             match result {
    2561            0 :                 Err(e) => {
    2562            0 :                     // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
    2563            0 :                     // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
    2564            0 :                     // than they had hoped for.
    2565            0 :                     tracing::warn!("Secondary download error from pageserver {node}: {e}",);
    2566            0 :                     error = Some(e)
    2567              :                 }
    2568            0 :                 Ok((status_code, progress)) => {
    2569            0 :                     tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
    2570            0 :                     aggregate_progress.layers_downloaded += progress.layers_downloaded;
    2571            0 :                     aggregate_progress.layers_total += progress.layers_total;
    2572            0 :                     aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
    2573            0 :                     aggregate_progress.bytes_total += progress.bytes_total;
    2574            0 :                     aggregate_progress.heatmap_mtime =
    2575            0 :                         std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
    2576            0 :                     aggregate_status = match aggregate_status {
    2577            0 :                         None => Some(status_code),
    2578            0 :                         Some(StatusCode::OK) => Some(status_code),
    2579            0 :                         Some(cur) => {
    2580            0 :                             // Other status codes (e.g. 202) -- do not overwrite.
    2581            0 :                             Some(cur)
    2582              :                         }
    2583              :                     };
    2584              :                 }
    2585              :             }
    2586              :         }
    2587              : 
    2588              :         // If any of the shards return 202, indicate our result as 202.
    2589            0 :         match aggregate_status {
    2590              :             None => {
    2591            0 :                 match error {
    2592            0 :                     Some(e) => {
    2593            0 :                         // No successes, and an error: surface it
    2594            0 :                         Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
    2595              :                     }
    2596              :                     None => {
    2597              :                         // No shards found
    2598            0 :                         Err(ApiError::NotFound(
    2599            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    2600            0 :                         ))
    2601              :                     }
    2602              :                 }
    2603              :             }
    2604            0 :             Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
    2605              :         }
    2606            0 :     }
    2607              : 
    2608            0 :     pub(crate) async fn tenant_delete(&self, tenant_id: TenantId) -> Result<StatusCode, ApiError> {
    2609            0 :         let _tenant_lock =
    2610            0 :             trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
    2611              : 
    2612              :         // Detach all shards
    2613            0 :         let (detach_waiters, shard_ids, node) = {
    2614            0 :             let mut shard_ids = Vec::new();
    2615            0 :             let mut detach_waiters = Vec::new();
    2616            0 :             let mut locked = self.inner.write().unwrap();
    2617            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    2618            0 :             for (tenant_shard_id, shard) in
    2619            0 :                 tenants.range_mut(TenantShardId::tenant_range(tenant_id))
    2620              :             {
    2621            0 :                 shard_ids.push(*tenant_shard_id);
    2622            0 : 
    2623            0 :                 // Update the tenant's intent to remove all attachments
    2624            0 :                 shard.policy = PlacementPolicy::Detached;
    2625            0 :                 shard
    2626            0 :                     .schedule(scheduler, &mut ScheduleContext::default())
    2627            0 :                     .expect("De-scheduling is infallible");
    2628            0 :                 debug_assert!(shard.intent.get_attached().is_none());
    2629            0 :                 debug_assert!(shard.intent.get_secondary().is_empty());
    2630              : 
    2631            0 :                 if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
    2632            0 :                     detach_waiters.push(waiter);
    2633            0 :                 }
    2634              :             }
    2635              : 
    2636              :             // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant
    2637              :             // was attached, just has to be able to see the S3 content)
    2638            0 :             let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?;
    2639            0 :             let node = nodes
    2640            0 :                 .get(&node_id)
    2641            0 :                 .expect("Pageservers may not be deleted while lock is active");
    2642            0 :             (detach_waiters, shard_ids, node.clone())
    2643            0 :         };
    2644            0 : 
    2645            0 :         // This reconcile wait can fail in a few ways:
    2646            0 :         //  A there is a very long queue for the reconciler semaphore
    2647            0 :         //  B some pageserver is failing to handle a detach promptly
    2648            0 :         //  C some pageserver goes offline right at the moment we send it a request.
    2649            0 :         //
    2650            0 :         // A and C are transient: the semaphore will eventually become available, and once a node is marked offline
    2651            0 :         // the next attempt to reconcile will silently skip detaches for an offline node and succeed.  If B happens,
    2652            0 :         // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while
    2653            0 :         // deleting the underlying data).
    2654            0 :         self.await_waiters(detach_waiters, RECONCILE_TIMEOUT)
    2655            0 :             .await?;
    2656              : 
    2657            0 :         let locations = shard_ids
    2658            0 :             .into_iter()
    2659            0 :             .map(|s| (s, node.clone()))
    2660            0 :             .collect::<Vec<_>>();
    2661            0 :         let results = self.tenant_for_shards_api(
    2662            0 :             locations,
    2663            0 :             |tenant_shard_id, client| async move { client.tenant_delete(tenant_shard_id).await },
    2664            0 :             1,
    2665            0 :             3,
    2666            0 :             RECONCILE_TIMEOUT,
    2667            0 :             &self.cancel,
    2668            0 :         )
    2669            0 :         .await;
    2670            0 :         for result in results {
    2671            0 :             match result {
    2672              :                 Ok(StatusCode::ACCEPTED) => {
    2673              :                     // This should never happen: we waited for detaches to finish above
    2674            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    2675            0 :                         "Unexpectedly still attached on {}",
    2676            0 :                         node
    2677            0 :                     )));
    2678              :                 }
    2679            0 :                 Ok(_) => {}
    2680              :                 Err(mgmt_api::Error::Cancelled) => {
    2681            0 :                     return Err(ApiError::ShuttingDown);
    2682              :                 }
    2683            0 :                 Err(e) => {
    2684            0 :                     // This is unexpected: remote deletion should be infallible, unless the object store
    2685            0 :                     // at large is unavailable.
    2686            0 :                     tracing::error!("Error deleting via node {}: {e}", node);
    2687            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
    2688              :                 }
    2689              :             }
    2690              :         }
    2691              : 
    2692              :         // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
    2693              :         // our in-memory state and database state.
    2694              : 
    2695              :         // Ordering: we delete persistent state first: if we then
    2696              :         // crash, we will drop the in-memory state.
    2697              : 
    2698              :         // Drop persistent state.
    2699            0 :         self.persistence.delete_tenant(tenant_id).await?;
    2700              : 
    2701              :         // Drop in-memory state
    2702              :         {
    2703            0 :             let mut locked = self.inner.write().unwrap();
    2704            0 :             let (_nodes, tenants, scheduler) = locked.parts_mut();
    2705              : 
    2706              :             // Dereference Scheduler from shards before dropping them
    2707            0 :             for (_tenant_shard_id, shard) in
    2708            0 :                 tenants.range_mut(TenantShardId::tenant_range(tenant_id))
    2709            0 :             {
    2710            0 :                 shard.intent.clear(scheduler);
    2711            0 :             }
    2712              : 
    2713            0 :             tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
    2714            0 :             tracing::info!(
    2715            0 :                 "Deleted tenant {tenant_id}, now have {} tenants",
    2716            0 :                 locked.tenants.len()
    2717              :             );
    2718              :         };
    2719              : 
    2720              :         // Success is represented as 404, to imitate the existing pageserver deletion API
    2721            0 :         Ok(StatusCode::NOT_FOUND)
    2722            0 :     }
    2723              : 
    2724              :     /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
    2725              :     /// for a tenant.  The TenantConfig is passed through to pageservers, whereas this function modifies
    2726              :     /// the tenant's policies (configuration) within the storage controller
    2727            0 :     pub(crate) async fn tenant_update_policy(
    2728            0 :         &self,
    2729            0 :         tenant_id: TenantId,
    2730            0 :         req: TenantPolicyRequest,
    2731            0 :     ) -> Result<(), ApiError> {
    2732              :         // We require an exclusive lock, because we are updating persistent and in-memory state
    2733            0 :         let _tenant_lock = trace_exclusive_lock(
    2734            0 :             &self.tenant_op_locks,
    2735            0 :             tenant_id,
    2736            0 :             TenantOperations::UpdatePolicy,
    2737            0 :         )
    2738            0 :         .await;
    2739              : 
    2740            0 :         failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
    2741              : 
    2742              :         let TenantPolicyRequest {
    2743            0 :             placement,
    2744            0 :             scheduling,
    2745            0 :         } = req;
    2746            0 : 
    2747            0 :         self.persistence
    2748            0 :             .update_tenant_shard(
    2749            0 :                 TenantFilter::Tenant(tenant_id),
    2750            0 :                 placement.clone(),
    2751            0 :                 None,
    2752            0 :                 None,
    2753            0 :                 scheduling,
    2754            0 :             )
    2755            0 :             .await?;
    2756              : 
    2757            0 :         let mut schedule_context = ScheduleContext::default();
    2758            0 :         let mut locked = self.inner.write().unwrap();
    2759            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    2760            0 :         for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    2761            0 :             if let Some(placement) = &placement {
    2762            0 :                 shard.policy = placement.clone();
    2763            0 : 
    2764            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    2765            0 :                                "Updated placement policy to {placement:?}");
    2766            0 :             }
    2767              : 
    2768            0 :             if let Some(scheduling) = &scheduling {
    2769            0 :                 shard.set_scheduling_policy(*scheduling);
    2770            0 : 
    2771            0 :                 tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
    2772            0 :                                "Updated scheduling policy to {scheduling:?}");
    2773            0 :             }
    2774              : 
    2775              :             // In case scheduling is being switched back on, try it now.
    2776            0 :             shard.schedule(scheduler, &mut schedule_context).ok();
    2777            0 :             self.maybe_reconcile_shard(shard, nodes);
    2778              :         }
    2779              : 
    2780            0 :         Ok(())
    2781            0 :     }
    2782              : 
    2783            0 :     pub(crate) async fn tenant_timeline_create(
    2784            0 :         &self,
    2785            0 :         tenant_id: TenantId,
    2786            0 :         mut create_req: TimelineCreateRequest,
    2787            0 :     ) -> Result<TimelineInfo, ApiError> {
    2788            0 :         tracing::info!(
    2789            0 :             "Creating timeline {}/{}",
    2790              :             tenant_id,
    2791              :             create_req.new_timeline_id,
    2792              :         );
    2793              : 
    2794            0 :         let _tenant_lock = trace_shared_lock(
    2795            0 :             &self.tenant_op_locks,
    2796            0 :             tenant_id,
    2797            0 :             TenantOperations::TimelineCreate,
    2798            0 :         )
    2799            0 :         .await;
    2800            0 :         failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock");
    2801              : 
    2802            0 :         self.ensure_attached_wait(tenant_id).await?;
    2803              : 
    2804            0 :         let mut targets = {
    2805            0 :             let locked = self.inner.read().unwrap();
    2806            0 :             let mut targets = Vec::new();
    2807              : 
    2808            0 :             for (tenant_shard_id, shard) in
    2809            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    2810            0 :             {
    2811            0 :                 let node_id = shard.intent.get_attached().ok_or_else(|| {
    2812            0 :                     ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
    2813            0 :                 })?;
    2814            0 :                 let node = locked
    2815            0 :                     .nodes
    2816            0 :                     .get(&node_id)
    2817            0 :                     .expect("Pageservers may not be deleted while referenced");
    2818            0 : 
    2819            0 :                 targets.push((*tenant_shard_id, node.clone()));
    2820              :             }
    2821            0 :             targets
    2822            0 :         };
    2823            0 : 
    2824            0 :         if targets.is_empty() {
    2825            0 :             return Err(ApiError::NotFound(
    2826            0 :                 anyhow::anyhow!("Tenant not found").into(),
    2827            0 :             ));
    2828            0 :         };
    2829            0 :         let shard_zero = targets.remove(0);
    2830              : 
    2831            0 :         async fn create_one(
    2832            0 :             tenant_shard_id: TenantShardId,
    2833            0 :             node: Node,
    2834            0 :             jwt: Option<String>,
    2835            0 :             create_req: TimelineCreateRequest,
    2836            0 :         ) -> Result<TimelineInfo, ApiError> {
    2837            0 :             tracing::info!(
    2838            0 :                 "Creating timeline on shard {}/{}, attached to node {node}",
    2839              :                 tenant_shard_id,
    2840              :                 create_req.new_timeline_id,
    2841              :             );
    2842            0 :             let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
    2843            0 : 
    2844            0 :             client
    2845            0 :                 .timeline_create(tenant_shard_id, &create_req)
    2846            0 :                 .await
    2847            0 :                 .map_err(|e| passthrough_api_error(&node, e))
    2848            0 :         }
    2849              : 
    2850              :         // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
    2851              :         // use whatever LSN that shard picked when creating on subsequent shards.  We arbitrarily use shard zero as the shard
    2852              :         // that will get the first creation request, and propagate the LSN to all the >0 shards.
    2853            0 :         let timeline_info = create_one(
    2854            0 :             shard_zero.0,
    2855            0 :             shard_zero.1,
    2856            0 :             self.config.jwt_token.clone(),
    2857            0 :             create_req.clone(),
    2858            0 :         )
    2859            0 :         .await?;
    2860              : 
    2861              :         // Propagate the LSN that shard zero picked, if caller didn't provide one
    2862            0 :         if create_req.ancestor_timeline_id.is_some() && create_req.ancestor_start_lsn.is_none() {
    2863            0 :             create_req.ancestor_start_lsn = timeline_info.ancestor_lsn;
    2864            0 :         }
    2865              : 
    2866              :         // Create timeline on remaining shards with number >0
    2867            0 :         if !targets.is_empty() {
    2868              :             // If we had multiple shards, issue requests for the remainder now.
    2869            0 :             let jwt = &self.config.jwt_token;
    2870            0 :             self.tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
    2871            0 :                 let create_req = create_req.clone();
    2872            0 :                 Box::pin(create_one(tenant_shard_id, node, jwt.clone(), create_req))
    2873            0 :             })
    2874            0 :             .await?;
    2875            0 :         }
    2876              : 
    2877            0 :         Ok(timeline_info)
    2878            0 :     }
    2879              : 
    2880            0 :     pub(crate) async fn tenant_timeline_detach_ancestor(
    2881            0 :         &self,
    2882            0 :         tenant_id: TenantId,
    2883            0 :         timeline_id: TimelineId,
    2884            0 :     ) -> Result<models::detach_ancestor::AncestorDetached, ApiError> {
    2885            0 :         tracing::info!("Detaching timeline {tenant_id}/{timeline_id}",);
    2886              : 
    2887            0 :         let _tenant_lock = trace_shared_lock(
    2888            0 :             &self.tenant_op_locks,
    2889            0 :             tenant_id,
    2890            0 :             TenantOperations::TimelineDetachAncestor,
    2891            0 :         )
    2892            0 :         .await;
    2893              : 
    2894            0 :         self.ensure_attached_wait(tenant_id).await?;
    2895              : 
    2896            0 :         let targets = {
    2897            0 :             let locked = self.inner.read().unwrap();
    2898            0 :             let mut targets = Vec::new();
    2899              : 
    2900            0 :             for (tenant_shard_id, shard) in
    2901            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    2902            0 :             {
    2903            0 :                 let node_id = shard.intent.get_attached().ok_or_else(|| {
    2904            0 :                     ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
    2905            0 :                 })?;
    2906            0 :                 let node = locked
    2907            0 :                     .nodes
    2908            0 :                     .get(&node_id)
    2909            0 :                     .expect("Pageservers may not be deleted while referenced");
    2910            0 : 
    2911            0 :                 targets.push((*tenant_shard_id, node.clone()));
    2912              :             }
    2913            0 :             targets
    2914            0 :         };
    2915            0 : 
    2916            0 :         if targets.is_empty() {
    2917            0 :             return Err(ApiError::NotFound(
    2918            0 :                 anyhow::anyhow!("Tenant not found").into(),
    2919            0 :             ));
    2920            0 :         }
    2921              : 
    2922            0 :         async fn detach_one(
    2923            0 :             tenant_shard_id: TenantShardId,
    2924            0 :             timeline_id: TimelineId,
    2925            0 :             node: Node,
    2926            0 :             jwt: Option<String>,
    2927            0 :         ) -> Result<(ShardNumber, models::detach_ancestor::AncestorDetached), ApiError> {
    2928            0 :             tracing::info!(
    2929            0 :                 "Detaching timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    2930              :             );
    2931              : 
    2932            0 :             let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
    2933            0 :             client
    2934            0 :                 .timeline_detach_ancestor(tenant_shard_id, timeline_id)
    2935            0 :                 .await
    2936            0 :                 .map_err(|e| {
    2937            0 :                     use mgmt_api::Error;
    2938            0 : 
    2939            0 :                     match e {
    2940              :                         // no ancestor (ever)
    2941            0 :                         Error::ApiError(StatusCode::CONFLICT, msg) => ApiError::Conflict(format!(
    2942            0 :                             "{node}: {}",
    2943            0 :                             msg.strip_prefix("Conflict: ").unwrap_or(&msg)
    2944            0 :                         )),
    2945              :                         // too many ancestors
    2946            0 :                         Error::ApiError(StatusCode::BAD_REQUEST, msg) => {
    2947            0 :                             ApiError::BadRequest(anyhow::anyhow!("{node}: {msg}"))
    2948              :                         }
    2949              :                         // rest can be mapped
    2950            0 :                         other => passthrough_api_error(&node, other),
    2951              :                     }
    2952            0 :                 })
    2953            0 :                 .map(|res| (tenant_shard_id.shard_number, res))
    2954            0 :         }
    2955              : 
    2956              :         // no shard needs to go first/last; the operation should be idempotent
    2957              :         // TODO: it would be great to ensure that all shards return the same error
    2958            0 :         let mut results = self
    2959            0 :             .tenant_for_shards(targets, |tenant_shard_id, node| {
    2960            0 :                 futures::FutureExt::boxed(detach_one(
    2961            0 :                     tenant_shard_id,
    2962            0 :                     timeline_id,
    2963            0 :                     node,
    2964            0 :                     self.config.jwt_token.clone(),
    2965            0 :                 ))
    2966            0 :             })
    2967            0 :             .await?;
    2968              : 
    2969            0 :         let any = results.pop().expect("we must have at least one response");
    2970            0 : 
    2971            0 :         let mismatching = results
    2972            0 :             .iter()
    2973            0 :             .filter(|(_, res)| res != &any.1)
    2974            0 :             .collect::<Vec<_>>();
    2975            0 :         if !mismatching.is_empty() {
    2976            0 :             let matching = results.len() - mismatching.len();
    2977            0 :             tracing::error!(
    2978              :                 matching,
    2979              :                 compared_against=?any,
    2980              :                 ?mismatching,
    2981            0 :                 "shards returned different results"
    2982              :             );
    2983            0 :         }
    2984              : 
    2985            0 :         Ok(any.1)
    2986            0 :     }
    2987              : 
    2988              :     /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
    2989              :     ///
    2990              :     /// On success, the returned vector contains exactly the same number of elements as the input `locations`.
    2991            0 :     async fn tenant_for_shards<F, R>(
    2992            0 :         &self,
    2993            0 :         locations: Vec<(TenantShardId, Node)>,
    2994            0 :         mut req_fn: F,
    2995            0 :     ) -> Result<Vec<R>, ApiError>
    2996            0 :     where
    2997            0 :         F: FnMut(
    2998            0 :             TenantShardId,
    2999            0 :             Node,
    3000            0 :         )
    3001            0 :             -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
    3002            0 :     {
    3003            0 :         let mut futs = FuturesUnordered::new();
    3004            0 :         let mut results = Vec::with_capacity(locations.len());
    3005              : 
    3006            0 :         for (tenant_shard_id, node) in locations {
    3007            0 :             futs.push(req_fn(tenant_shard_id, node));
    3008            0 :         }
    3009              : 
    3010            0 :         while let Some(r) = futs.next().await {
    3011            0 :             results.push(r?);
    3012              :         }
    3013              : 
    3014            0 :         Ok(results)
    3015            0 :     }
    3016              : 
    3017              :     /// Concurrently invoke a pageserver API call on many shards at once
    3018            0 :     pub(crate) async fn tenant_for_shards_api<T, O, F>(
    3019            0 :         &self,
    3020            0 :         locations: Vec<(TenantShardId, Node)>,
    3021            0 :         op: O,
    3022            0 :         warn_threshold: u32,
    3023            0 :         max_retries: u32,
    3024            0 :         timeout: Duration,
    3025            0 :         cancel: &CancellationToken,
    3026            0 :     ) -> Vec<mgmt_api::Result<T>>
    3027            0 :     where
    3028            0 :         O: Fn(TenantShardId, PageserverClient) -> F + Copy,
    3029            0 :         F: std::future::Future<Output = mgmt_api::Result<T>>,
    3030            0 :     {
    3031            0 :         let mut futs = FuturesUnordered::new();
    3032            0 :         let mut results = Vec::with_capacity(locations.len());
    3033              : 
    3034            0 :         for (tenant_shard_id, node) in locations {
    3035            0 :             futs.push(async move {
    3036            0 :                 node.with_client_retries(
    3037            0 :                     |client| op(tenant_shard_id, client),
    3038            0 :                     &self.config.jwt_token,
    3039            0 :                     warn_threshold,
    3040            0 :                     max_retries,
    3041            0 :                     timeout,
    3042            0 :                     cancel,
    3043            0 :                 )
    3044            0 :                 .await
    3045            0 :             });
    3046            0 :         }
    3047              : 
    3048            0 :         while let Some(r) = futs.next().await {
    3049            0 :             let r = r.unwrap_or(Err(mgmt_api::Error::Cancelled));
    3050            0 :             results.push(r);
    3051            0 :         }
    3052              : 
    3053            0 :         results
    3054            0 :     }
    3055              : 
    3056            0 :     pub(crate) async fn tenant_timeline_delete(
    3057            0 :         &self,
    3058            0 :         tenant_id: TenantId,
    3059            0 :         timeline_id: TimelineId,
    3060            0 :     ) -> Result<StatusCode, ApiError> {
    3061            0 :         tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
    3062            0 :         let _tenant_lock = trace_shared_lock(
    3063            0 :             &self.tenant_op_locks,
    3064            0 :             tenant_id,
    3065            0 :             TenantOperations::TimelineDelete,
    3066            0 :         )
    3067            0 :         .await;
    3068              : 
    3069            0 :         self.ensure_attached_wait(tenant_id).await?;
    3070              : 
    3071            0 :         let mut targets = {
    3072            0 :             let locked = self.inner.read().unwrap();
    3073            0 :             let mut targets = Vec::new();
    3074              : 
    3075            0 :             for (tenant_shard_id, shard) in
    3076            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    3077            0 :             {
    3078            0 :                 let node_id = shard.intent.get_attached().ok_or_else(|| {
    3079            0 :                     ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
    3080            0 :                 })?;
    3081            0 :                 let node = locked
    3082            0 :                     .nodes
    3083            0 :                     .get(&node_id)
    3084            0 :                     .expect("Pageservers may not be deleted while referenced");
    3085            0 : 
    3086            0 :                 targets.push((*tenant_shard_id, node.clone()));
    3087              :             }
    3088            0 :             targets
    3089            0 :         };
    3090            0 : 
    3091            0 :         if targets.is_empty() {
    3092            0 :             return Err(ApiError::NotFound(
    3093            0 :                 anyhow::anyhow!("Tenant not found").into(),
    3094            0 :             ));
    3095            0 :         }
    3096            0 :         let shard_zero = targets.remove(0);
    3097              : 
    3098            0 :         async fn delete_one(
    3099            0 :             tenant_shard_id: TenantShardId,
    3100            0 :             timeline_id: TimelineId,
    3101            0 :             node: Node,
    3102            0 :             jwt: Option<String>,
    3103            0 :         ) -> Result<StatusCode, ApiError> {
    3104            0 :             tracing::info!(
    3105            0 :                 "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
    3106              :             );
    3107              : 
    3108            0 :             let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
    3109            0 :             client
    3110            0 :                 .timeline_delete(tenant_shard_id, timeline_id)
    3111            0 :                 .await
    3112            0 :                 .map_err(|e| {
    3113            0 :                     ApiError::InternalServerError(anyhow::anyhow!(
    3114            0 :                         "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
    3115            0 :                     ))
    3116            0 :                 })
    3117            0 :         }
    3118              : 
    3119            0 :         let statuses = self
    3120            0 :             .tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
    3121            0 :                 Box::pin(delete_one(
    3122            0 :                     tenant_shard_id,
    3123            0 :                     timeline_id,
    3124            0 :                     node,
    3125            0 :                     self.config.jwt_token.clone(),
    3126            0 :                 ))
    3127            0 :             })
    3128            0 :             .await?;
    3129              : 
    3130              :         // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero
    3131            0 :         if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
    3132            0 :             return Ok(StatusCode::ACCEPTED);
    3133            0 :         }
    3134              : 
    3135              :         // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
    3136              :         // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
    3137            0 :         let shard_zero_status = delete_one(
    3138            0 :             shard_zero.0,
    3139            0 :             timeline_id,
    3140            0 :             shard_zero.1,
    3141            0 :             self.config.jwt_token.clone(),
    3142            0 :         )
    3143            0 :         .await?;
    3144              : 
    3145            0 :         Ok(shard_zero_status)
    3146            0 :     }
    3147              : 
    3148              :     /// When you need to send an HTTP request to the pageserver that holds shard0 of a tenant, this
    3149              :     /// function looks up and returns node. If the tenant isn't found, returns Err(ApiError::NotFound)
    3150            0 :     pub(crate) fn tenant_shard0_node(
    3151            0 :         &self,
    3152            0 :         tenant_id: TenantId,
    3153            0 :     ) -> Result<(Node, TenantShardId), ApiError> {
    3154            0 :         let locked = self.inner.read().unwrap();
    3155            0 :         let Some((tenant_shard_id, shard)) = locked
    3156            0 :             .tenants
    3157            0 :             .range(TenantShardId::tenant_range(tenant_id))
    3158            0 :             .next()
    3159              :         else {
    3160            0 :             return Err(ApiError::NotFound(
    3161            0 :                 anyhow::anyhow!("Tenant {tenant_id} not found").into(),
    3162            0 :             ));
    3163              :         };
    3164              : 
    3165              :         // TODO: should use the ID last published to compute_hook, rather than the intent: the intent might
    3166              :         // point to somewhere we haven't attached yet.
    3167            0 :         let Some(node_id) = shard.intent.get_attached() else {
    3168            0 :             tracing::warn!(
    3169            0 :                 tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
    3170            0 :                 "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
    3171              :                 shard.policy
    3172              :             );
    3173            0 :             return Err(ApiError::Conflict(
    3174            0 :                 "Cannot call timeline API on non-attached tenant".to_string(),
    3175            0 :             ));
    3176              :         };
    3177              : 
    3178            0 :         let Some(node) = locked.nodes.get(node_id) else {
    3179              :             // This should never happen
    3180            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3181            0 :                 "Shard refers to nonexistent node"
    3182            0 :             )));
    3183              :         };
    3184              : 
    3185            0 :         Ok((node.clone(), *tenant_shard_id))
    3186            0 :     }
    3187              : 
    3188            0 :     pub(crate) fn tenant_locate(
    3189            0 :         &self,
    3190            0 :         tenant_id: TenantId,
    3191            0 :     ) -> Result<TenantLocateResponse, ApiError> {
    3192            0 :         let locked = self.inner.read().unwrap();
    3193            0 :         tracing::info!("Locating shards for tenant {tenant_id}");
    3194              : 
    3195            0 :         let mut result = Vec::new();
    3196            0 :         let mut shard_params: Option<ShardParameters> = None;
    3197              : 
    3198            0 :         for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    3199              :         {
    3200            0 :             let node_id =
    3201            0 :                 shard
    3202            0 :                     .intent
    3203            0 :                     .get_attached()
    3204            0 :                     .ok_or(ApiError::BadRequest(anyhow::anyhow!(
    3205            0 :                         "Cannot locate a tenant that is not attached"
    3206            0 :                     )))?;
    3207              : 
    3208            0 :             let node = locked
    3209            0 :                 .nodes
    3210            0 :                 .get(&node_id)
    3211            0 :                 .expect("Pageservers may not be deleted while referenced");
    3212            0 : 
    3213            0 :             result.push(node.shard_location(*tenant_shard_id));
    3214            0 : 
    3215            0 :             match &shard_params {
    3216            0 :                 None => {
    3217            0 :                     shard_params = Some(ShardParameters {
    3218            0 :                         stripe_size: shard.shard.stripe_size,
    3219            0 :                         count: shard.shard.count,
    3220            0 :                     });
    3221            0 :                 }
    3222            0 :                 Some(params) => {
    3223            0 :                     if params.stripe_size != shard.shard.stripe_size {
    3224              :                         // This should never happen.  We enforce at runtime because it's simpler than
    3225              :                         // adding an extra per-tenant data structure to store the things that should be the same
    3226            0 :                         return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3227            0 :                             "Inconsistent shard stripe size parameters!"
    3228            0 :                         )));
    3229            0 :                     }
    3230              :                 }
    3231              :             }
    3232              :         }
    3233              : 
    3234            0 :         if result.is_empty() {
    3235            0 :             return Err(ApiError::NotFound(
    3236            0 :                 anyhow::anyhow!("No shards for this tenant ID found").into(),
    3237            0 :             ));
    3238            0 :         }
    3239            0 :         let shard_params = shard_params.expect("result is non-empty, therefore this is set");
    3240            0 :         tracing::info!(
    3241            0 :             "Located tenant {} with params {:?} on shards {}",
    3242            0 :             tenant_id,
    3243            0 :             shard_params,
    3244            0 :             result
    3245            0 :                 .iter()
    3246            0 :                 .map(|s| format!("{:?}", s))
    3247            0 :                 .collect::<Vec<_>>()
    3248            0 :                 .join(",")
    3249              :         );
    3250              : 
    3251            0 :         Ok(TenantLocateResponse {
    3252            0 :             shards: result,
    3253            0 :             shard_params,
    3254            0 :         })
    3255            0 :     }
    3256              : 
    3257              :     /// Returns None if the input iterator of shards does not include a shard with number=0
    3258            0 :     fn tenant_describe_impl<'a>(
    3259            0 :         &self,
    3260            0 :         shards: impl Iterator<Item = &'a TenantShard>,
    3261            0 :     ) -> Option<TenantDescribeResponse> {
    3262            0 :         let mut shard_zero = None;
    3263            0 :         let mut describe_shards = Vec::new();
    3264              : 
    3265            0 :         for shard in shards {
    3266            0 :             if shard.tenant_shard_id.is_shard_zero() {
    3267            0 :                 shard_zero = Some(shard);
    3268            0 :             }
    3269              : 
    3270            0 :             describe_shards.push(TenantDescribeResponseShard {
    3271            0 :                 tenant_shard_id: shard.tenant_shard_id,
    3272            0 :                 node_attached: *shard.intent.get_attached(),
    3273            0 :                 node_secondary: shard.intent.get_secondary().to_vec(),
    3274            0 :                 last_error: shard
    3275            0 :                     .last_error
    3276            0 :                     .lock()
    3277            0 :                     .unwrap()
    3278            0 :                     .as_ref()
    3279            0 :                     .map(|e| format!("{e}"))
    3280            0 :                     .unwrap_or("".to_string())
    3281            0 :                     .clone(),
    3282            0 :                 is_reconciling: shard.reconciler.is_some(),
    3283            0 :                 is_pending_compute_notification: shard.pending_compute_notification,
    3284            0 :                 is_splitting: matches!(shard.splitting, SplitState::Splitting),
    3285            0 :                 scheduling_policy: *shard.get_scheduling_policy(),
    3286              :             })
    3287              :         }
    3288              : 
    3289            0 :         let shard_zero = shard_zero?;
    3290              : 
    3291            0 :         Some(TenantDescribeResponse {
    3292            0 :             tenant_id: shard_zero.tenant_shard_id.tenant_id,
    3293            0 :             shards: describe_shards,
    3294            0 :             stripe_size: shard_zero.shard.stripe_size,
    3295            0 :             policy: shard_zero.policy.clone(),
    3296            0 :             config: shard_zero.config.clone(),
    3297            0 :         })
    3298            0 :     }
    3299              : 
    3300            0 :     pub(crate) fn tenant_describe(
    3301            0 :         &self,
    3302            0 :         tenant_id: TenantId,
    3303            0 :     ) -> Result<TenantDescribeResponse, ApiError> {
    3304            0 :         let locked = self.inner.read().unwrap();
    3305            0 : 
    3306            0 :         self.tenant_describe_impl(
    3307            0 :             locked
    3308            0 :                 .tenants
    3309            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3310            0 :                 .map(|(_k, v)| v),
    3311            0 :         )
    3312            0 :         .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
    3313            0 :     }
    3314              : 
    3315            0 :     pub(crate) fn tenant_list(&self) -> Vec<TenantDescribeResponse> {
    3316            0 :         let locked = self.inner.read().unwrap();
    3317            0 : 
    3318            0 :         let mut result = Vec::new();
    3319            0 :         for (_tenant_id, tenant_shards) in
    3320            0 :             &locked.tenants.iter().group_by(|(id, _shard)| id.tenant_id)
    3321            0 :         {
    3322            0 :             result.push(
    3323            0 :                 self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
    3324            0 :                     .expect("Groups are always non-empty"),
    3325            0 :             );
    3326            0 :         }
    3327              : 
    3328            0 :         result
    3329            0 :     }
    3330              : 
    3331            0 :     #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
    3332              :     async fn abort_tenant_shard_split(
    3333              :         &self,
    3334              :         op: &TenantShardSplitAbort,
    3335              :     ) -> Result<(), TenantShardSplitAbortError> {
    3336              :         // Cleaning up a split:
    3337              :         // - Parent shards are not destroyed during a split, just detached.
    3338              :         // - Failed pageserver split API calls can leave the remote node with just the parent attached,
    3339              :         //   just the children attached, or both.
    3340              :         //
    3341              :         // Therefore our work to do is to:
    3342              :         // 1. Clean up storage controller's internal state to just refer to parents, no children
    3343              :         // 2. Call out to pageservers to ensure that children are detached
    3344              :         // 3. Call out to pageservers to ensure that parents are attached.
    3345              :         //
    3346              :         // Crash safety:
    3347              :         // - If the storage controller stops running during this cleanup *after* clearing the splitting state
    3348              :         //   from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
    3349              :         //   and detach them.
    3350              :         // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
    3351              :         //   from our database, then we will re-enter this cleanup routine on startup.
    3352              : 
    3353              :         let TenantShardSplitAbort {
    3354              :             tenant_id,
    3355              :             new_shard_count,
    3356              :             new_stripe_size,
    3357              :             ..
    3358              :         } = op;
    3359              : 
    3360              :         // First abort persistent state, if any exists.
    3361              :         match self
    3362              :             .persistence
    3363              :             .abort_shard_split(*tenant_id, *new_shard_count)
    3364              :             .await?
    3365              :         {
    3366              :             AbortShardSplitStatus::Aborted => {
    3367              :                 // Proceed to roll back any child shards created on pageservers
    3368              :             }
    3369              :             AbortShardSplitStatus::Complete => {
    3370              :                 // The split completed (we might hit that path if e.g. our database transaction
    3371              :                 // to write the completion landed in the database, but we dropped connection
    3372              :                 // before seeing the result).
    3373              :                 //
    3374              :                 // We must update in-memory state to reflect the successful split.
    3375              :                 self.tenant_shard_split_commit_inmem(
    3376              :                     *tenant_id,
    3377              :                     *new_shard_count,
    3378              :                     *new_stripe_size,
    3379              :                 );
    3380              :                 return Ok(());
    3381              :             }
    3382              :         }
    3383              : 
    3384              :         // Clean up in-memory state, and accumulate the list of child locations that need detaching
    3385              :         let detach_locations: Vec<(Node, TenantShardId)> = {
    3386              :             let mut detach_locations = Vec::new();
    3387              :             let mut locked = self.inner.write().unwrap();
    3388              :             let (nodes, tenants, scheduler) = locked.parts_mut();
    3389              : 
    3390              :             for (tenant_shard_id, shard) in
    3391              :                 tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
    3392              :             {
    3393              :                 if shard.shard.count == op.new_shard_count {
    3394              :                     // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
    3395              :                     // is infallible, so if we got an error we shouldn't have got that far.
    3396              :                     tracing::warn!(
    3397              :                         "During split abort, child shard {tenant_shard_id} found in-memory"
    3398              :                     );
    3399              :                     continue;
    3400              :                 }
    3401              : 
    3402              :                 // Add the children of this shard to this list of things to detach
    3403              :                 if let Some(node_id) = shard.intent.get_attached() {
    3404              :                     for child_id in tenant_shard_id.split(*new_shard_count) {
    3405              :                         detach_locations.push((
    3406              :                             nodes
    3407              :                                 .get(node_id)
    3408              :                                 .expect("Intent references nonexistent node")
    3409              :                                 .clone(),
    3410              :                             child_id,
    3411              :                         ));
    3412              :                     }
    3413              :                 } else {
    3414              :                     tracing::warn!(
    3415              :                         "During split abort, shard {tenant_shard_id} has no attached location"
    3416              :                     );
    3417              :                 }
    3418              : 
    3419              :                 tracing::info!("Restoring parent shard {tenant_shard_id}");
    3420              :                 shard.splitting = SplitState::Idle;
    3421              :                 if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
    3422              :                     // If this shard can't be scheduled now (perhaps due to offline nodes or
    3423              :                     // capacity issues), that must not prevent us rolling back a split.  In this
    3424              :                     // case it should be eventually scheduled in the background.
    3425              :                     tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
    3426              :                 }
    3427              : 
    3428              :                 self.maybe_reconcile_shard(shard, nodes);
    3429              :             }
    3430              : 
    3431              :             // We don't expect any new_shard_count shards to exist here, but drop them just in case
    3432            0 :             tenants.retain(|_id, s| s.shard.count != *new_shard_count);
    3433              : 
    3434              :             detach_locations
    3435              :         };
    3436              : 
    3437              :         for (node, child_id) in detach_locations {
    3438              :             if !node.is_available() {
    3439              :                 // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
    3440              :                 // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
    3441              :                 // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
    3442              :                 // them from the node.
    3443              :                 tracing::warn!("Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated.");
    3444              :                 continue;
    3445              :             }
    3446              : 
    3447              :             // Detach the remote child.  If the pageserver split API call is still in progress, this call will get
    3448              :             // a 503 and retry, up to our limit.
    3449              :             tracing::info!("Detaching {child_id} on {node}...");
    3450              :             match node
    3451              :                 .with_client_retries(
    3452            0 :                     |client| async move {
    3453            0 :                         let config = LocationConfig {
    3454            0 :                             mode: LocationConfigMode::Detached,
    3455            0 :                             generation: None,
    3456            0 :                             secondary_conf: None,
    3457            0 :                             shard_number: child_id.shard_number.0,
    3458            0 :                             shard_count: child_id.shard_count.literal(),
    3459            0 :                             // Stripe size and tenant config don't matter when detaching
    3460            0 :                             shard_stripe_size: 0,
    3461            0 :                             tenant_conf: TenantConfig::default(),
    3462            0 :                         };
    3463            0 : 
    3464            0 :                         client.location_config(child_id, config, None, false).await
    3465            0 :                     },
    3466              :                     &self.config.jwt_token,
    3467              :                     1,
    3468              :                     10,
    3469              :                     Duration::from_secs(5),
    3470              :                     &self.cancel,
    3471              :                 )
    3472              :                 .await
    3473              :             {
    3474              :                 Some(Ok(_)) => {}
    3475              :                 Some(Err(e)) => {
    3476              :                     // We failed to communicate with the remote node.  This is problematic: we may be
    3477              :                     // leaving it with a rogue child shard.
    3478              :                     tracing::warn!(
    3479              :                         "Failed to detach child {child_id} from node {node} during abort"
    3480              :                     );
    3481              :                     return Err(e.into());
    3482              :                 }
    3483              :                 None => {
    3484              :                     // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
    3485              :                     // clean up on restart. The node going offline requires a retry.
    3486              :                     return Err(TenantShardSplitAbortError::Unavailable);
    3487              :                 }
    3488              :             };
    3489              :         }
    3490              : 
    3491              :         tracing::info!("Successfully aborted split");
    3492              :         Ok(())
    3493              :     }
    3494              : 
    3495              :     /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
    3496              :     /// of the tenant map to reflect the child shards that exist after the split.
    3497            0 :     fn tenant_shard_split_commit_inmem(
    3498            0 :         &self,
    3499            0 :         tenant_id: TenantId,
    3500            0 :         new_shard_count: ShardCount,
    3501            0 :         new_stripe_size: Option<ShardStripeSize>,
    3502            0 :     ) -> (
    3503            0 :         TenantShardSplitResponse,
    3504            0 :         Vec<(TenantShardId, NodeId, ShardStripeSize)>,
    3505            0 :         Vec<ReconcilerWaiter>,
    3506            0 :     ) {
    3507            0 :         let mut response = TenantShardSplitResponse {
    3508            0 :             new_shards: Vec::new(),
    3509            0 :         };
    3510            0 :         let mut child_locations = Vec::new();
    3511            0 :         let mut waiters = Vec::new();
    3512            0 : 
    3513            0 :         {
    3514            0 :             let mut locked = self.inner.write().unwrap();
    3515            0 : 
    3516            0 :             let parent_ids = locked
    3517            0 :                 .tenants
    3518            0 :                 .range(TenantShardId::tenant_range(tenant_id))
    3519            0 :                 .map(|(shard_id, _)| *shard_id)
    3520            0 :                 .collect::<Vec<_>>();
    3521            0 : 
    3522            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    3523            0 :             for parent_id in parent_ids {
    3524            0 :                 let child_ids = parent_id.split(new_shard_count);
    3525              : 
    3526            0 :                 let (pageserver, generation, policy, parent_ident, config) = {
    3527            0 :                     let mut old_state = tenants
    3528            0 :                         .remove(&parent_id)
    3529            0 :                         .expect("It was present, we just split it");
    3530            0 : 
    3531            0 :                     // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
    3532            0 :                     // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
    3533            0 :                     // nothing else can clear this.
    3534            0 :                     assert!(matches!(old_state.splitting, SplitState::Splitting));
    3535              : 
    3536            0 :                     let old_attached = old_state.intent.get_attached().unwrap();
    3537            0 :                     old_state.intent.clear(scheduler);
    3538            0 :                     let generation = old_state.generation.expect("Shard must have been attached");
    3539            0 :                     (
    3540            0 :                         old_attached,
    3541            0 :                         generation,
    3542            0 :                         old_state.policy,
    3543            0 :                         old_state.shard,
    3544            0 :                         old_state.config,
    3545            0 :                     )
    3546            0 :                 };
    3547            0 : 
    3548            0 :                 let mut schedule_context = ScheduleContext::default();
    3549            0 :                 for child in child_ids {
    3550            0 :                     let mut child_shard = parent_ident;
    3551            0 :                     child_shard.number = child.shard_number;
    3552            0 :                     child_shard.count = child.shard_count;
    3553            0 :                     if let Some(stripe_size) = new_stripe_size {
    3554            0 :                         child_shard.stripe_size = stripe_size;
    3555            0 :                     }
    3556              : 
    3557            0 :                     let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
    3558            0 :                     child_observed.insert(
    3559            0 :                         pageserver,
    3560            0 :                         ObservedStateLocation {
    3561            0 :                             conf: Some(attached_location_conf(
    3562            0 :                                 generation,
    3563            0 :                                 &child_shard,
    3564            0 :                                 &config,
    3565            0 :                                 &policy,
    3566            0 :                             )),
    3567            0 :                         },
    3568            0 :                     );
    3569            0 : 
    3570            0 :                     let mut child_state = TenantShard::new(child, child_shard, policy.clone());
    3571            0 :                     child_state.intent = IntentState::single(scheduler, Some(pageserver));
    3572            0 :                     child_state.observed = ObservedState {
    3573            0 :                         locations: child_observed,
    3574            0 :                     };
    3575            0 :                     child_state.generation = Some(generation);
    3576            0 :                     child_state.config = config.clone();
    3577            0 : 
    3578            0 :                     // The child's TenantShard::splitting is intentionally left at the default value of Idle,
    3579            0 :                     // as at this point in the split process we have succeeded and this part is infallible:
    3580            0 :                     // we will never need to do any special recovery from this state.
    3581            0 : 
    3582            0 :                     child_locations.push((child, pageserver, child_shard.stripe_size));
    3583              : 
    3584            0 :                     if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
    3585              :                         // This is not fatal, because we've implicitly already got an attached
    3586              :                         // location for the child shard.  Failure here just means we couldn't
    3587              :                         // find a secondary (e.g. because cluster is overloaded).
    3588            0 :                         tracing::warn!("Failed to schedule child shard {child}: {e}");
    3589            0 :                     }
    3590              :                     // In the background, attach secondary locations for the new shards
    3591            0 :                     if let Some(waiter) = self.maybe_reconcile_shard(&mut child_state, nodes) {
    3592            0 :                         waiters.push(waiter);
    3593            0 :                     }
    3594              : 
    3595            0 :                     tenants.insert(child, child_state);
    3596            0 :                     response.new_shards.push(child);
    3597              :                 }
    3598              :             }
    3599            0 :             (response, child_locations, waiters)
    3600            0 :         }
    3601            0 :     }
    3602              : 
    3603            0 :     async fn tenant_shard_split_start_secondaries(
    3604            0 :         &self,
    3605            0 :         tenant_id: TenantId,
    3606            0 :         waiters: Vec<ReconcilerWaiter>,
    3607            0 :     ) {
    3608              :         // Wait for initial reconcile of child shards, this creates the secondary locations
    3609            0 :         if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    3610              :             // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
    3611              :             // their secondaries couldn't be attached.
    3612            0 :             tracing::warn!("Failed to reconcile after split: {e}");
    3613            0 :             return;
    3614            0 :         }
    3615              : 
    3616              :         // Take the state lock to discover the attached & secondary intents for all shards
    3617            0 :         let (attached, secondary) = {
    3618            0 :             let locked = self.inner.read().unwrap();
    3619            0 :             let mut attached = Vec::new();
    3620            0 :             let mut secondary = Vec::new();
    3621              : 
    3622            0 :             for (tenant_shard_id, shard) in
    3623            0 :                 locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    3624              :             {
    3625            0 :                 let Some(node_id) = shard.intent.get_attached() else {
    3626              :                     // Unexpected.  Race with a PlacementPolicy change?
    3627            0 :                     tracing::warn!(
    3628            0 :                         "No attached node on {tenant_shard_id} immediately after shard split!"
    3629              :                     );
    3630            0 :                     continue;
    3631              :                 };
    3632              : 
    3633            0 :                 let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
    3634              :                     // No secondary location.  Nothing for us to do.
    3635            0 :                     continue;
    3636              :                 };
    3637              : 
    3638            0 :                 let attached_node = locked
    3639            0 :                     .nodes
    3640            0 :                     .get(node_id)
    3641            0 :                     .expect("Pageservers may not be deleted while referenced");
    3642            0 : 
    3643            0 :                 let secondary_node = locked
    3644            0 :                     .nodes
    3645            0 :                     .get(secondary_node_id)
    3646            0 :                     .expect("Pageservers may not be deleted while referenced");
    3647            0 : 
    3648            0 :                 attached.push((*tenant_shard_id, attached_node.clone()));
    3649            0 :                 secondary.push((*tenant_shard_id, secondary_node.clone()));
    3650              :             }
    3651            0 :             (attached, secondary)
    3652            0 :         };
    3653            0 : 
    3654            0 :         if secondary.is_empty() {
    3655              :             // No secondary locations; nothing for us to do
    3656            0 :             return;
    3657            0 :         }
    3658              : 
    3659            0 :         for result in self
    3660            0 :             .tenant_for_shards_api(
    3661            0 :                 attached,
    3662            0 :                 |tenant_shard_id, client| async move {
    3663            0 :                     client.tenant_heatmap_upload(tenant_shard_id).await
    3664            0 :                 },
    3665            0 :                 1,
    3666            0 :                 1,
    3667            0 :                 SHORT_RECONCILE_TIMEOUT,
    3668            0 :                 &self.cancel,
    3669            0 :             )
    3670            0 :             .await
    3671              :         {
    3672            0 :             if let Err(e) = result {
    3673            0 :                 tracing::warn!("Error calling heatmap upload after shard split: {e}");
    3674            0 :                 return;
    3675            0 :             }
    3676              :         }
    3677              : 
    3678            0 :         for result in self
    3679            0 :             .tenant_for_shards_api(
    3680            0 :                 secondary,
    3681            0 :                 |tenant_shard_id, client| async move {
    3682            0 :                     client
    3683            0 :                         .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
    3684            0 :                         .await
    3685            0 :                 },
    3686            0 :                 1,
    3687            0 :                 1,
    3688            0 :                 SHORT_RECONCILE_TIMEOUT,
    3689            0 :                 &self.cancel,
    3690            0 :             )
    3691            0 :             .await
    3692              :         {
    3693            0 :             if let Err(e) = result {
    3694            0 :                 tracing::warn!("Error calling secondary download after shard split: {e}");
    3695            0 :                 return;
    3696            0 :             }
    3697              :         }
    3698            0 :     }
    3699              : 
    3700            0 :     pub(crate) async fn tenant_shard_split(
    3701            0 :         &self,
    3702            0 :         tenant_id: TenantId,
    3703            0 :         split_req: TenantShardSplitRequest,
    3704            0 :     ) -> Result<TenantShardSplitResponse, ApiError> {
    3705              :         // TODO: return 503 if we get stuck waiting for this lock
    3706              :         // (issue https://github.com/neondatabase/neon/issues/7108)
    3707            0 :         let _tenant_lock = trace_exclusive_lock(
    3708            0 :             &self.tenant_op_locks,
    3709            0 :             tenant_id,
    3710            0 :             TenantOperations::ShardSplit,
    3711            0 :         )
    3712            0 :         .await;
    3713              : 
    3714            0 :         let new_shard_count = ShardCount::new(split_req.new_shard_count);
    3715            0 :         let new_stripe_size = split_req.new_stripe_size;
    3716              : 
    3717              :         // Validate the request and construct parameters.  This phase is fallible, but does not require
    3718              :         // rollback on errors, as it does no I/O and mutates no state.
    3719            0 :         let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
    3720            0 :             ShardSplitAction::NoOp(resp) => return Ok(resp),
    3721            0 :             ShardSplitAction::Split(params) => params,
    3722              :         };
    3723              : 
    3724              :         // Execute this split: this phase mutates state and does remote I/O on pageservers.  If it fails,
    3725              :         // we must roll back.
    3726            0 :         let r = self
    3727            0 :             .do_tenant_shard_split(tenant_id, shard_split_params)
    3728            0 :             .await;
    3729              : 
    3730            0 :         let (response, waiters) = match r {
    3731            0 :             Ok(r) => r,
    3732            0 :             Err(e) => {
    3733            0 :                 // Split might be part-done, we must do work to abort it.
    3734            0 :                 tracing::warn!("Enqueuing background abort of split on {tenant_id}");
    3735            0 :                 self.abort_tx
    3736            0 :                     .send(TenantShardSplitAbort {
    3737            0 :                         tenant_id,
    3738            0 :                         new_shard_count,
    3739            0 :                         new_stripe_size,
    3740            0 :                         _tenant_lock,
    3741            0 :                     })
    3742            0 :                     // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
    3743            0 :                     .ok();
    3744            0 :                 return Err(e);
    3745              :             }
    3746              :         };
    3747              : 
    3748              :         // The split is now complete.  As an optimization, we will trigger all the child shards to upload
    3749              :         // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
    3750              :         // for the background heatmap/download interval before secondaries get warm enough to migrate shards
    3751              :         // in [`Self::optimize_all`]
    3752            0 :         self.tenant_shard_split_start_secondaries(tenant_id, waiters)
    3753            0 :             .await;
    3754            0 :         Ok(response)
    3755            0 :     }
    3756              : 
    3757            0 :     fn prepare_tenant_shard_split(
    3758            0 :         &self,
    3759            0 :         tenant_id: TenantId,
    3760            0 :         split_req: TenantShardSplitRequest,
    3761            0 :     ) -> Result<ShardSplitAction, ApiError> {
    3762            0 :         fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
    3763            0 :             anyhow::anyhow!("failpoint")
    3764            0 :         )));
    3765              : 
    3766            0 :         let mut policy = None;
    3767            0 :         let mut config = None;
    3768            0 :         let mut shard_ident = None;
    3769              :         // Validate input, and calculate which shards we will create
    3770            0 :         let (old_shard_count, targets) =
    3771              :             {
    3772            0 :                 let locked = self.inner.read().unwrap();
    3773            0 : 
    3774            0 :                 let pageservers = locked.nodes.clone();
    3775            0 : 
    3776            0 :                 let mut targets = Vec::new();
    3777            0 : 
    3778            0 :                 // In case this is a retry, count how many already-split shards we found
    3779            0 :                 let mut children_found = Vec::new();
    3780            0 :                 let mut old_shard_count = None;
    3781              : 
    3782            0 :                 for (tenant_shard_id, shard) in
    3783            0 :                     locked.tenants.range(TenantShardId::tenant_range(tenant_id))
    3784              :                 {
    3785            0 :                     match shard.shard.count.count().cmp(&split_req.new_shard_count) {
    3786              :                         Ordering::Equal => {
    3787              :                             //  Already split this
    3788            0 :                             children_found.push(*tenant_shard_id);
    3789            0 :                             continue;
    3790              :                         }
    3791              :                         Ordering::Greater => {
    3792            0 :                             return Err(ApiError::BadRequest(anyhow::anyhow!(
    3793            0 :                                 "Requested count {} but already have shards at count {}",
    3794            0 :                                 split_req.new_shard_count,
    3795            0 :                                 shard.shard.count.count()
    3796            0 :                             )));
    3797              :                         }
    3798            0 :                         Ordering::Less => {
    3799            0 :                             // Fall through: this shard has lower count than requested,
    3800            0 :                             // is a candidate for splitting.
    3801            0 :                         }
    3802            0 :                     }
    3803            0 : 
    3804            0 :                     match old_shard_count {
    3805            0 :                         None => old_shard_count = Some(shard.shard.count),
    3806            0 :                         Some(old_shard_count) => {
    3807            0 :                             if old_shard_count != shard.shard.count {
    3808              :                                 // We may hit this case if a caller asked for two splits to
    3809              :                                 // different sizes, before the first one is complete.
    3810              :                                 // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
    3811              :                                 // of shard_count=1 and shard_count=2 shards in the map.
    3812            0 :                                 return Err(ApiError::Conflict(
    3813            0 :                                     "Cannot split, currently mid-split".to_string(),
    3814            0 :                                 ));
    3815            0 :                             }
    3816              :                         }
    3817              :                     }
    3818            0 :                     if policy.is_none() {
    3819            0 :                         policy = Some(shard.policy.clone());
    3820            0 :                     }
    3821            0 :                     if shard_ident.is_none() {
    3822            0 :                         shard_ident = Some(shard.shard);
    3823            0 :                     }
    3824            0 :                     if config.is_none() {
    3825            0 :                         config = Some(shard.config.clone());
    3826            0 :                     }
    3827              : 
    3828            0 :                     if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
    3829            0 :                         tracing::info!(
    3830            0 :                             "Tenant shard {} already has shard count {}",
    3831              :                             tenant_shard_id,
    3832              :                             split_req.new_shard_count
    3833              :                         );
    3834            0 :                         continue;
    3835            0 :                     }
    3836              : 
    3837            0 :                     let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
    3838            0 :                         anyhow::anyhow!("Cannot split a tenant that is not attached"),
    3839            0 :                     ))?;
    3840              : 
    3841            0 :                     let node = pageservers
    3842            0 :                         .get(&node_id)
    3843            0 :                         .expect("Pageservers may not be deleted while referenced");
    3844            0 : 
    3845            0 :                     targets.push(ShardSplitTarget {
    3846            0 :                         parent_id: *tenant_shard_id,
    3847            0 :                         node: node.clone(),
    3848            0 :                         child_ids: tenant_shard_id
    3849            0 :                             .split(ShardCount::new(split_req.new_shard_count)),
    3850            0 :                     });
    3851              :                 }
    3852              : 
    3853            0 :                 if targets.is_empty() {
    3854            0 :                     if children_found.len() == split_req.new_shard_count as usize {
    3855            0 :                         return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
    3856            0 :                             new_shards: children_found,
    3857            0 :                         }));
    3858              :                     } else {
    3859              :                         // No shards found to split, and no existing children found: the
    3860              :                         // tenant doesn't exist at all.
    3861            0 :                         return Err(ApiError::NotFound(
    3862            0 :                             anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
    3863            0 :                         ));
    3864              :                     }
    3865            0 :                 }
    3866            0 : 
    3867            0 :                 (old_shard_count, targets)
    3868            0 :             };
    3869            0 : 
    3870            0 :         // unwrap safety: we would have returned above if we didn't find at least one shard to split
    3871            0 :         let old_shard_count = old_shard_count.unwrap();
    3872            0 :         let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
    3873              :             // This ShardIdentity will be used as the template for all children, so this implicitly
    3874              :             // applies the new stripe size to the children.
    3875            0 :             let mut shard_ident = shard_ident.unwrap();
    3876            0 :             if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
    3877            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!("Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards", shard_ident.stripe_size)));
    3878            0 :             }
    3879            0 : 
    3880            0 :             shard_ident.stripe_size = new_stripe_size;
    3881            0 :             tracing::info!("applied  stripe size {}", shard_ident.stripe_size.0);
    3882            0 :             shard_ident
    3883              :         } else {
    3884            0 :             shard_ident.unwrap()
    3885              :         };
    3886            0 :         let policy = policy.unwrap();
    3887            0 :         let config = config.unwrap();
    3888            0 : 
    3889            0 :         Ok(ShardSplitAction::Split(ShardSplitParams {
    3890            0 :             old_shard_count,
    3891            0 :             new_shard_count: ShardCount::new(split_req.new_shard_count),
    3892            0 :             new_stripe_size: split_req.new_stripe_size,
    3893            0 :             targets,
    3894            0 :             policy,
    3895            0 :             config,
    3896            0 :             shard_ident,
    3897            0 :         }))
    3898            0 :     }
    3899              : 
    3900            0 :     async fn do_tenant_shard_split(
    3901            0 :         &self,
    3902            0 :         tenant_id: TenantId,
    3903            0 :         params: ShardSplitParams,
    3904            0 :     ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
    3905            0 :         // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
    3906            0 :         // request could occur here, deleting or mutating the tenant.  begin_shard_split checks that the
    3907            0 :         // parent shards exist as expected, but it would be neater to do the above pre-checks within the
    3908            0 :         // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
    3909            0 :         // (https://github.com/neondatabase/neon/issues/6676)
    3910            0 : 
    3911            0 :         let ShardSplitParams {
    3912            0 :             old_shard_count,
    3913            0 :             new_shard_count,
    3914            0 :             new_stripe_size,
    3915            0 :             mut targets,
    3916            0 :             policy,
    3917            0 :             config,
    3918            0 :             shard_ident,
    3919            0 :         } = params;
    3920              : 
    3921              :         // Drop any secondary locations: pageservers do not support splitting these, and in any case the
    3922              :         // end-state for a split tenant will usually be to have secondary locations on different nodes.
    3923              :         // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
    3924              :         // at the time of split.
    3925            0 :         let waiters = {
    3926            0 :             let mut locked = self.inner.write().unwrap();
    3927            0 :             let mut waiters = Vec::new();
    3928            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    3929            0 :             for target in &mut targets {
    3930            0 :                 let Some(shard) = tenants.get_mut(&target.parent_id) else {
    3931              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    3932            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    3933            0 :                         "Shard {} not found",
    3934            0 :                         target.parent_id
    3935            0 :                     )));
    3936              :                 };
    3937              : 
    3938            0 :                 if shard.intent.get_attached() != &Some(target.node.get_id()) {
    3939              :                     // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
    3940            0 :                     return Err(ApiError::Conflict(format!(
    3941            0 :                         "Shard {} unexpectedly rescheduled during split",
    3942            0 :                         target.parent_id
    3943            0 :                     )));
    3944            0 :                 }
    3945            0 : 
    3946            0 :                 // Irrespective of PlacementPolicy, clear secondary locations from intent
    3947            0 :                 shard.intent.clear_secondary(scheduler);
    3948              : 
    3949              :                 // Run Reconciler to execute detach fo secondary locations.
    3950            0 :                 if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
    3951            0 :                     waiters.push(waiter);
    3952            0 :                 }
    3953              :             }
    3954            0 :             waiters
    3955            0 :         };
    3956            0 :         self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
    3957              : 
    3958              :         // Before creating any new child shards in memory or on the pageservers, persist them: this
    3959              :         // enables us to ensure that we will always be able to clean up if something goes wrong.  This also
    3960              :         // acts as the protection against two concurrent attempts to split: one of them will get a database
    3961              :         // error trying to insert the child shards.
    3962            0 :         let mut child_tsps = Vec::new();
    3963            0 :         for target in &targets {
    3964            0 :             let mut this_child_tsps = Vec::new();
    3965            0 :             for child in &target.child_ids {
    3966            0 :                 let mut child_shard = shard_ident;
    3967            0 :                 child_shard.number = child.shard_number;
    3968            0 :                 child_shard.count = child.shard_count;
    3969            0 : 
    3970            0 :                 tracing::info!(
    3971            0 :                     "Create child shard persistence with stripe size {}",
    3972              :                     shard_ident.stripe_size.0
    3973              :                 );
    3974              : 
    3975            0 :                 this_child_tsps.push(TenantShardPersistence {
    3976            0 :                     tenant_id: child.tenant_id.to_string(),
    3977            0 :                     shard_number: child.shard_number.0 as i32,
    3978            0 :                     shard_count: child.shard_count.literal() as i32,
    3979            0 :                     shard_stripe_size: shard_ident.stripe_size.0 as i32,
    3980            0 :                     // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
    3981            0 :                     // populate the correct generation as part of its transaction, to protect us
    3982            0 :                     // against racing with changes in the state of the parent.
    3983            0 :                     generation: None,
    3984            0 :                     generation_pageserver: Some(target.node.get_id().0 as i64),
    3985            0 :                     placement_policy: serde_json::to_string(&policy).unwrap(),
    3986            0 :                     config: serde_json::to_string(&config).unwrap(),
    3987            0 :                     splitting: SplitState::Splitting,
    3988            0 : 
    3989            0 :                     // Scheduling policies do not carry through to children
    3990            0 :                     scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
    3991            0 :                         .unwrap(),
    3992            0 :                 });
    3993              :             }
    3994              : 
    3995            0 :             child_tsps.push((target.parent_id, this_child_tsps));
    3996              :         }
    3997              : 
    3998            0 :         if let Err(e) = self
    3999            0 :             .persistence
    4000            0 :             .begin_shard_split(old_shard_count, tenant_id, child_tsps)
    4001            0 :             .await
    4002              :         {
    4003            0 :             match e {
    4004              :                 DatabaseError::Query(diesel::result::Error::DatabaseError(
    4005              :                     DatabaseErrorKind::UniqueViolation,
    4006              :                     _,
    4007              :                 )) => {
    4008              :                     // Inserting a child shard violated a unique constraint: we raced with another call to
    4009              :                     // this function
    4010            0 :                     tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
    4011            0 :                     return Err(ApiError::Conflict("Tenant is already splitting".into()));
    4012              :                 }
    4013            0 :                 _ => return Err(ApiError::InternalServerError(e.into())),
    4014              :             }
    4015            0 :         }
    4016            0 :         fail::fail_point!("shard-split-post-begin", |_| Err(
    4017            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    4018            0 :         ));
    4019              : 
    4020              :         // Now that I have persisted the splitting state, apply it in-memory.  This is infallible, so
    4021              :         // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
    4022              :         // is not set in memory, then it was not persisted.
    4023              :         {
    4024            0 :             let mut locked = self.inner.write().unwrap();
    4025            0 :             for target in &targets {
    4026            0 :                 if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
    4027            0 :                     parent_shard.splitting = SplitState::Splitting;
    4028            0 :                     // Put the observed state to None, to reflect that it is indeterminate once we start the
    4029            0 :                     // split operation.
    4030            0 :                     parent_shard
    4031            0 :                         .observed
    4032            0 :                         .locations
    4033            0 :                         .insert(target.node.get_id(), ObservedStateLocation { conf: None });
    4034            0 :                 }
    4035              :             }
    4036              :         }
    4037              : 
    4038              :         // TODO: issue split calls concurrently (this only matters once we're splitting
    4039              :         // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
    4040              : 
    4041            0 :         for target in &targets {
    4042              :             let ShardSplitTarget {
    4043            0 :                 parent_id,
    4044            0 :                 node,
    4045            0 :                 child_ids,
    4046            0 :             } = target;
    4047            0 :             let client = PageserverClient::new(
    4048            0 :                 node.get_id(),
    4049            0 :                 node.base_url(),
    4050            0 :                 self.config.jwt_token.as_deref(),
    4051            0 :             );
    4052            0 :             let response = client
    4053            0 :                 .tenant_shard_split(
    4054            0 :                     *parent_id,
    4055            0 :                     TenantShardSplitRequest {
    4056            0 :                         new_shard_count: new_shard_count.literal(),
    4057            0 :                         new_stripe_size,
    4058            0 :                     },
    4059            0 :                 )
    4060            0 :                 .await
    4061            0 :                 .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?;
    4062              : 
    4063            0 :             fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
    4064            0 :                 "failpoint".to_string()
    4065            0 :             )));
    4066              : 
    4067            0 :             failpoint_support::sleep_millis_async!("shard-split-post-remote-sleep", &self.cancel);
    4068              : 
    4069            0 :             tracing::info!(
    4070            0 :                 "Split {} into {}",
    4071            0 :                 parent_id,
    4072            0 :                 response
    4073            0 :                     .new_shards
    4074            0 :                     .iter()
    4075            0 :                     .map(|s| format!("{:?}", s))
    4076            0 :                     .collect::<Vec<_>>()
    4077            0 :                     .join(",")
    4078              :             );
    4079              : 
    4080            0 :             if &response.new_shards != child_ids {
    4081              :                 // This should never happen: the pageserver should agree with us on how shard splits work.
    4082            0 :                 return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4083            0 :                     "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
    4084            0 :                     parent_id,
    4085            0 :                     response.new_shards,
    4086            0 :                     child_ids
    4087            0 :                 )));
    4088            0 :             }
    4089              :         }
    4090              : 
    4091              :         // TODO: if the pageserver restarted concurrently with our split API call,
    4092              :         // the actual generation of the child shard might differ from the generation
    4093              :         // we expect it to have.  In order for our in-database generation to end up
    4094              :         // correct, we should carry the child generation back in the response and apply it here
    4095              :         // in complete_shard_split (and apply the correct generation in memory)
    4096              :         // (or, we can carry generation in the request and reject the request if
    4097              :         //  it doesn't match, but that requires more retry logic on this side)
    4098              : 
    4099            0 :         self.persistence
    4100            0 :             .complete_shard_split(tenant_id, old_shard_count)
    4101            0 :             .await?;
    4102              : 
    4103            0 :         fail::fail_point!("shard-split-post-complete", |_| Err(
    4104            0 :             ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
    4105            0 :         ));
    4106              : 
    4107              :         // Replace all the shards we just split with their children: this phase is infallible.
    4108            0 :         let (response, child_locations, waiters) =
    4109            0 :             self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
    4110            0 : 
    4111            0 :         // Send compute notifications for all the new shards
    4112            0 :         let mut failed_notifications = Vec::new();
    4113            0 :         for (child_id, child_ps, stripe_size) in child_locations {
    4114            0 :             if let Err(e) = self
    4115            0 :                 .compute_hook
    4116            0 :                 .notify(child_id, child_ps, stripe_size, &self.cancel)
    4117            0 :                 .await
    4118              :             {
    4119            0 :                 tracing::warn!("Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
    4120              :                         child_id, child_ps);
    4121            0 :                 failed_notifications.push(child_id);
    4122            0 :             }
    4123              :         }
    4124              : 
    4125              :         // If we failed any compute notifications, make a note to retry later.
    4126            0 :         if !failed_notifications.is_empty() {
    4127            0 :             let mut locked = self.inner.write().unwrap();
    4128            0 :             for failed in failed_notifications {
    4129            0 :                 if let Some(shard) = locked.tenants.get_mut(&failed) {
    4130            0 :                     shard.pending_compute_notification = true;
    4131            0 :                 }
    4132              :             }
    4133            0 :         }
    4134              : 
    4135            0 :         Ok((response, waiters))
    4136            0 :     }
    4137              : 
    4138            0 :     pub(crate) async fn tenant_shard_migrate(
    4139            0 :         &self,
    4140            0 :         tenant_shard_id: TenantShardId,
    4141            0 :         migrate_req: TenantShardMigrateRequest,
    4142            0 :     ) -> Result<TenantShardMigrateResponse, ApiError> {
    4143            0 :         let waiter = {
    4144            0 :             let mut locked = self.inner.write().unwrap();
    4145            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    4146              : 
    4147            0 :             let Some(node) = nodes.get(&migrate_req.node_id) else {
    4148            0 :                 return Err(ApiError::BadRequest(anyhow::anyhow!(
    4149            0 :                     "Node {} not found",
    4150            0 :                     migrate_req.node_id
    4151            0 :                 )));
    4152              :             };
    4153              : 
    4154            0 :             if !node.is_available() {
    4155              :                 // Warn but proceed: the caller may intend to manually adjust the placement of
    4156              :                 // a shard even if the node is down, e.g. if intervening during an incident.
    4157            0 :                 tracing::warn!("Migrating to unavailable node {node}");
    4158            0 :             }
    4159              : 
    4160            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    4161            0 :                 return Err(ApiError::NotFound(
    4162            0 :                     anyhow::anyhow!("Tenant shard not found").into(),
    4163            0 :                 ));
    4164              :             };
    4165              : 
    4166            0 :             if shard.intent.get_attached() == &Some(migrate_req.node_id) {
    4167              :                 // No-op case: we will still proceed to wait for reconciliation in case it is
    4168              :                 // incomplete from an earlier update to the intent.
    4169            0 :                 tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
    4170              :             } else {
    4171            0 :                 let old_attached = *shard.intent.get_attached();
    4172            0 : 
    4173            0 :                 match shard.policy {
    4174            0 :                     PlacementPolicy::Attached(n) => {
    4175            0 :                         // If our new attached node was a secondary, it no longer should be.
    4176            0 :                         shard.intent.remove_secondary(scheduler, migrate_req.node_id);
    4177              : 
    4178              :                         // If we were already attached to something, demote that to a secondary
    4179            0 :                         if let Some(old_attached) = old_attached {
    4180            0 :                             if n > 0 {
    4181              :                                 // Remove other secondaries to make room for the location we'll demote
    4182            0 :                                 while shard.intent.get_secondary().len() >= n {
    4183            0 :                                     shard.intent.pop_secondary(scheduler);
    4184            0 :                                 }
    4185              : 
    4186            0 :                                 shard.intent.push_secondary(scheduler, old_attached);
    4187            0 :                             }
    4188            0 :                         }
    4189              : 
    4190            0 :                         shard.intent.set_attached(scheduler, Some(migrate_req.node_id));
    4191              :                     }
    4192            0 :                     PlacementPolicy::Secondary => {
    4193            0 :                         shard.intent.clear(scheduler);
    4194            0 :                         shard.intent.push_secondary(scheduler, migrate_req.node_id);
    4195            0 :                     }
    4196              :                     PlacementPolicy::Detached => {
    4197            0 :                         return Err(ApiError::BadRequest(anyhow::anyhow!(
    4198            0 :                             "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
    4199            0 :                         )))
    4200              :                     }
    4201              :                 }
    4202              : 
    4203            0 :                 tracing::info!("Migrating: new intent {:?}", shard.intent);
    4204            0 :                 shard.sequence = shard.sequence.next();
    4205              :             }
    4206              : 
    4207            0 :             self.maybe_reconcile_shard(shard, nodes)
    4208              :         };
    4209              : 
    4210            0 :         if let Some(waiter) = waiter {
    4211            0 :             waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
    4212              :         } else {
    4213            0 :             tracing::info!("Migration is a no-op");
    4214              :         }
    4215              : 
    4216            0 :         Ok(TenantShardMigrateResponse {})
    4217            0 :     }
    4218              : 
    4219              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    4220              :     /// detaching or deleting it on pageservers.
    4221            0 :     pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
    4222            0 :         self.persistence.delete_tenant(tenant_id).await?;
    4223              : 
    4224            0 :         let mut locked = self.inner.write().unwrap();
    4225            0 :         let (_nodes, tenants, scheduler) = locked.parts_mut();
    4226            0 :         let mut shards = Vec::new();
    4227            0 :         for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
    4228            0 :             shards.push(*tenant_shard_id);
    4229            0 :         }
    4230              : 
    4231            0 :         for shard_id in shards {
    4232            0 :             if let Some(mut shard) = tenants.remove(&shard_id) {
    4233            0 :                 shard.intent.clear(scheduler);
    4234            0 :             }
    4235              :         }
    4236              : 
    4237            0 :         Ok(())
    4238            0 :     }
    4239              : 
    4240              :     /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
    4241              :     /// tenant with a very high generation number so that it will see the existing data.
    4242            0 :     pub(crate) async fn tenant_import(
    4243            0 :         &self,
    4244            0 :         tenant_id: TenantId,
    4245            0 :     ) -> Result<TenantCreateResponse, ApiError> {
    4246            0 :         // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
    4247            0 :         let maybe_node = {
    4248            0 :             self.inner
    4249            0 :                 .read()
    4250            0 :                 .unwrap()
    4251            0 :                 .nodes
    4252            0 :                 .values()
    4253            0 :                 .find(|n| n.is_available())
    4254            0 :                 .cloned()
    4255              :         };
    4256            0 :         let Some(node) = maybe_node else {
    4257            0 :             return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
    4258              :         };
    4259              : 
    4260            0 :         let client = PageserverClient::new(
    4261            0 :             node.get_id(),
    4262            0 :             node.base_url(),
    4263            0 :             self.config.jwt_token.as_deref(),
    4264            0 :         );
    4265              : 
    4266            0 :         let scan_result = client
    4267            0 :             .tenant_scan_remote_storage(tenant_id)
    4268            0 :             .await
    4269            0 :             .map_err(|e| passthrough_api_error(&node, e))?;
    4270              : 
    4271              :         // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
    4272            0 :         let Some(shard_count) = scan_result
    4273            0 :             .shards
    4274            0 :             .iter()
    4275            0 :             .map(|s| s.tenant_shard_id.shard_count)
    4276            0 :             .max()
    4277              :         else {
    4278            0 :             return Err(ApiError::NotFound(
    4279            0 :                 anyhow::anyhow!("No shards found").into(),
    4280            0 :             ));
    4281              :         };
    4282              : 
    4283              :         // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
    4284              :         // to
    4285            0 :         let generation = scan_result
    4286            0 :             .shards
    4287            0 :             .iter()
    4288            0 :             .map(|s| s.generation)
    4289            0 :             .max()
    4290            0 :             .expect("We already validated >0 shards");
    4291            0 : 
    4292            0 :         // FIXME: we have no way to recover the shard stripe size from contents of remote storage: this will
    4293            0 :         // only work if they were using the default stripe size.
    4294            0 :         let stripe_size = ShardParameters::DEFAULT_STRIPE_SIZE;
    4295              : 
    4296            0 :         let (response, waiters) = self
    4297            0 :             .do_tenant_create(TenantCreateRequest {
    4298            0 :                 new_tenant_id: TenantShardId::unsharded(tenant_id),
    4299            0 :                 generation,
    4300            0 : 
    4301            0 :                 shard_parameters: ShardParameters {
    4302            0 :                     count: shard_count,
    4303            0 :                     stripe_size,
    4304            0 :                 },
    4305            0 :                 placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
    4306            0 : 
    4307            0 :                 // There is no way to know what the tenant's config was: revert to defaults
    4308            0 :                 //
    4309            0 :                 // TODO: remove `switch_aux_file_policy` once we finish auxv2 migration
    4310            0 :                 //
    4311            0 :                 // we write to both v1+v2 storage, so that the test case can use either storage format for testing
    4312            0 :                 config: TenantConfig {
    4313            0 :                     switch_aux_file_policy: Some(models::AuxFilePolicy::CrossValidation),
    4314            0 :                     ..TenantConfig::default()
    4315            0 :                 },
    4316            0 :             })
    4317            0 :             .await?;
    4318              : 
    4319            0 :         if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
    4320              :             // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
    4321              :             // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
    4322              :             // reconcile, as reconciliation includes notifying compute.
    4323            0 :             tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
    4324            0 :         }
    4325              : 
    4326            0 :         Ok(response)
    4327            0 :     }
    4328              : 
    4329              :     /// For debug/support: a full JSON dump of TenantShards.  Returns a response so that
    4330              :     /// we don't have to make TenantShard clonable in the return path.
    4331            0 :     pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    4332            0 :         let serialized = {
    4333            0 :             let locked = self.inner.read().unwrap();
    4334            0 :             let result = locked.tenants.values().collect::<Vec<_>>();
    4335            0 :             serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
    4336              :         };
    4337              : 
    4338            0 :         hyper::Response::builder()
    4339            0 :             .status(hyper::StatusCode::OK)
    4340            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    4341            0 :             .body(hyper::Body::from(serialized))
    4342            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    4343            0 :     }
    4344              : 
    4345              :     /// Check the consistency of in-memory state vs. persistent state, and check that the
    4346              :     /// scheduler's statistics are up to date.
    4347              :     ///
    4348              :     /// These consistency checks expect an **idle** system.  If changes are going on while
    4349              :     /// we run, then we can falsely indicate a consistency issue.  This is sufficient for end-of-test
    4350              :     /// checks, but not suitable for running continuously in the background in the field.
    4351            0 :     pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
    4352            0 :         let (mut expect_nodes, mut expect_shards) = {
    4353            0 :             let locked = self.inner.read().unwrap();
    4354            0 : 
    4355            0 :             locked
    4356            0 :                 .scheduler
    4357            0 :                 .consistency_check(locked.nodes.values(), locked.tenants.values())
    4358            0 :                 .context("Scheduler checks")
    4359            0 :                 .map_err(ApiError::InternalServerError)?;
    4360              : 
    4361            0 :             let expect_nodes = locked
    4362            0 :                 .nodes
    4363            0 :                 .values()
    4364            0 :                 .map(|n| n.to_persistent())
    4365            0 :                 .collect::<Vec<_>>();
    4366            0 : 
    4367            0 :             let expect_shards = locked
    4368            0 :                 .tenants
    4369            0 :                 .values()
    4370            0 :                 .map(|t| t.to_persistent())
    4371            0 :                 .collect::<Vec<_>>();
    4372              : 
    4373              :             // This method can only validate the state of an idle system: if a reconcile is in
    4374              :             // progress, fail out early to avoid giving false errors on state that won't match
    4375              :             // between database and memory under a ReconcileResult is processed.
    4376            0 :             for t in locked.tenants.values() {
    4377            0 :                 if t.reconciler.is_some() {
    4378            0 :                     return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4379            0 :                         "Shard {} reconciliation in progress",
    4380            0 :                         t.tenant_shard_id
    4381            0 :                     )));
    4382            0 :                 }
    4383              :             }
    4384              : 
    4385            0 :             (expect_nodes, expect_shards)
    4386              :         };
    4387              : 
    4388            0 :         let mut nodes = self.persistence.list_nodes().await?;
    4389            0 :         expect_nodes.sort_by_key(|n| n.node_id);
    4390            0 :         nodes.sort_by_key(|n| n.node_id);
    4391            0 : 
    4392            0 :         if nodes != expect_nodes {
    4393            0 :             tracing::error!("Consistency check failed on nodes.");
    4394            0 :             tracing::error!(
    4395            0 :                 "Nodes in memory: {}",
    4396            0 :                 serde_json::to_string(&expect_nodes)
    4397            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    4398              :             );
    4399            0 :             tracing::error!(
    4400            0 :                 "Nodes in database: {}",
    4401            0 :                 serde_json::to_string(&nodes)
    4402            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    4403              :             );
    4404            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4405            0 :                 "Node consistency failure"
    4406            0 :             )));
    4407            0 :         }
    4408              : 
    4409            0 :         let mut shards = self.persistence.list_tenant_shards().await?;
    4410            0 :         shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    4411            0 :         expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
    4412            0 : 
    4413            0 :         if shards != expect_shards {
    4414            0 :             tracing::error!("Consistency check failed on shards.");
    4415            0 :             tracing::error!(
    4416            0 :                 "Shards in memory: {}",
    4417            0 :                 serde_json::to_string(&expect_shards)
    4418            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    4419              :             );
    4420            0 :             tracing::error!(
    4421            0 :                 "Shards in database: {}",
    4422            0 :                 serde_json::to_string(&shards)
    4423            0 :                     .map_err(|e| ApiError::InternalServerError(e.into()))?
    4424              :             );
    4425            0 :             return Err(ApiError::InternalServerError(anyhow::anyhow!(
    4426            0 :                 "Shard consistency failure"
    4427            0 :             )));
    4428            0 :         }
    4429            0 : 
    4430            0 :         Ok(())
    4431            0 :     }
    4432              : 
    4433              :     /// For debug/support: a JSON dump of the [`Scheduler`].  Returns a response so that
    4434              :     /// we don't have to make TenantShard clonable in the return path.
    4435            0 :     pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
    4436            0 :         let serialized = {
    4437            0 :             let locked = self.inner.read().unwrap();
    4438            0 :             serde_json::to_string(&locked.scheduler)
    4439            0 :                 .map_err(|e| ApiError::InternalServerError(e.into()))?
    4440              :         };
    4441              : 
    4442            0 :         hyper::Response::builder()
    4443            0 :             .status(hyper::StatusCode::OK)
    4444            0 :             .header(hyper::header::CONTENT_TYPE, "application/json")
    4445            0 :             .body(hyper::Body::from(serialized))
    4446            0 :             .map_err(|e| ApiError::InternalServerError(e.into()))
    4447            0 :     }
    4448              : 
    4449              :     /// This is for debug/support only: we simply drop all state for a tenant, without
    4450              :     /// detaching or deleting it on pageservers.  We do not try and re-schedule any
    4451              :     /// tenants that were on this node.
    4452            0 :     pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
    4453            0 :         self.persistence.delete_node(node_id).await?;
    4454              : 
    4455            0 :         let mut locked = self.inner.write().unwrap();
    4456              : 
    4457            0 :         for shard in locked.tenants.values_mut() {
    4458            0 :             shard.deref_node(node_id);
    4459            0 :             shard.observed.locations.remove(&node_id);
    4460            0 :         }
    4461              : 
    4462            0 :         let mut nodes = (*locked.nodes).clone();
    4463            0 :         nodes.remove(&node_id);
    4464            0 :         locked.nodes = Arc::new(nodes);
    4465            0 : 
    4466            0 :         locked.scheduler.node_remove(node_id);
    4467            0 : 
    4468            0 :         Ok(())
    4469            0 :     }
    4470              : 
    4471              :     /// If a node has any work on it, it will be rescheduled: this is "clean" in the sense
    4472              :     /// that we don't leave any bad state behind in the storage controller, but unclean
    4473              :     /// in the sense that we are not carefully draining the node.
    4474            0 :     pub(crate) async fn node_delete(&self, node_id: NodeId) -> Result<(), ApiError> {
    4475            0 :         let _node_lock =
    4476            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Delete).await;
    4477              : 
    4478              :         // 1. Atomically update in-memory state:
    4479              :         //    - set the scheduling state to Pause to make subsequent scheduling ops skip it
    4480              :         //    - update shards' intents to exclude the node, and reschedule any shards whose intents we modified.
    4481              :         //    - drop the node from the main nodes map, so that when running reconciles complete they do not
    4482              :         //      re-insert references to this node into the ObservedState of shards
    4483              :         //    - drop the node from the scheduler
    4484              :         {
    4485            0 :             let mut locked = self.inner.write().unwrap();
    4486            0 :             let (nodes, tenants, scheduler) = locked.parts_mut();
    4487            0 : 
    4488            0 :             {
    4489            0 :                 let mut nodes_mut = (*nodes).deref().clone();
    4490            0 :                 match nodes_mut.get_mut(&node_id) {
    4491            0 :                     Some(node) => {
    4492            0 :                         // We do not bother setting this in the database, because we're about to delete the row anyway, and
    4493            0 :                         // if we crash it would not be desirable to leave the node paused after a restart.
    4494            0 :                         node.set_scheduling(NodeSchedulingPolicy::Pause);
    4495            0 :                     }
    4496              :                     None => {
    4497            0 :                         tracing::info!(
    4498            0 :                             "Node not found: presuming this is a retry and returning success"
    4499              :                         );
    4500            0 :                         return Ok(());
    4501              :                     }
    4502              :                 }
    4503              : 
    4504            0 :                 *nodes = Arc::new(nodes_mut);
    4505              :             }
    4506              : 
    4507            0 :             for (tenant_shard_id, shard) in tenants {
    4508            0 :                 if shard.deref_node(node_id) {
    4509              :                     // FIXME: we need to build a ScheduleContext that reflects this shard's peers, otherwise
    4510              :                     // it won't properly do anti-affinity.
    4511            0 :                     let mut schedule_context = ScheduleContext::default();
    4512              : 
    4513            0 :                     if let Err(e) = shard.schedule(scheduler, &mut schedule_context) {
    4514              :                         // TODO: implement force flag to remove a node even if we can't reschedule
    4515              :                         // a tenant
    4516            0 :                         tracing::error!("Refusing to delete node, shard {tenant_shard_id} can't be rescheduled: {e}");
    4517            0 :                         return Err(e.into());
    4518              :                     } else {
    4519            0 :                         tracing::info!(
    4520            0 :                             "Rescheduled shard {tenant_shard_id} away from node during deletion"
    4521              :                         )
    4522              :                     }
    4523              : 
    4524            0 :                     self.maybe_reconcile_shard(shard, nodes);
    4525            0 :                 }
    4526              : 
    4527              :                 // Here we remove an existing observed location for the node we're removing, and it will
    4528              :                 // not be re-added by a reconciler's completion because we filter out removed nodes in
    4529              :                 // process_result.
    4530              :                 //
    4531              :                 // Note that we update the shard's observed state _after_ calling maybe_reconcile_shard: that
    4532              :                 // means any reconciles we spawned will know about the node we're deleting, enabling them
    4533              :                 // to do live migrations if it's still online.
    4534            0 :                 shard.observed.locations.remove(&node_id);
    4535              :             }
    4536              : 
    4537            0 :             scheduler.node_remove(node_id);
    4538            0 : 
    4539            0 :             {
    4540            0 :                 let mut nodes_mut = (**nodes).clone();
    4541            0 :                 nodes_mut.remove(&node_id);
    4542            0 :                 *nodes = Arc::new(nodes_mut);
    4543            0 :             }
    4544            0 :         }
    4545            0 : 
    4546            0 :         // Note: some `generation_pageserver` columns on tenant shards in the database may still refer to
    4547            0 :         // the removed node, as this column means "The pageserver to which this generation was issued", and
    4548            0 :         // their generations won't get updated until the reconcilers moving them away from this node complete.
    4549            0 :         // That is safe because in Service::spawn we only use generation_pageserver if it refers to a node
    4550            0 :         // that exists.
    4551            0 : 
    4552            0 :         // 2. Actually delete the node from the database and from in-memory state
    4553            0 :         tracing::info!("Deleting node from database");
    4554            0 :         self.persistence.delete_node(node_id).await?;
    4555              : 
    4556            0 :         Ok(())
    4557            0 :     }
    4558              : 
    4559            0 :     pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
    4560            0 :         let nodes = {
    4561            0 :             self.inner
    4562            0 :                 .read()
    4563            0 :                 .unwrap()
    4564            0 :                 .nodes
    4565            0 :                 .values()
    4566            0 :                 .cloned()
    4567            0 :                 .collect::<Vec<_>>()
    4568            0 :         };
    4569            0 : 
    4570            0 :         Ok(nodes)
    4571            0 :     }
    4572              : 
    4573            0 :     pub(crate) async fn get_node(&self, node_id: NodeId) -> Result<Node, ApiError> {
    4574            0 :         self.inner
    4575            0 :             .read()
    4576            0 :             .unwrap()
    4577            0 :             .nodes
    4578            0 :             .get(&node_id)
    4579            0 :             .cloned()
    4580            0 :             .ok_or(ApiError::NotFound(
    4581            0 :                 format!("Node {node_id} not registered").into(),
    4582            0 :             ))
    4583            0 :     }
    4584              : 
    4585            0 :     pub(crate) async fn node_register(
    4586            0 :         &self,
    4587            0 :         register_req: NodeRegisterRequest,
    4588            0 :     ) -> Result<(), ApiError> {
    4589            0 :         let _node_lock = trace_exclusive_lock(
    4590            0 :             &self.node_op_locks,
    4591            0 :             register_req.node_id,
    4592            0 :             NodeOperations::Register,
    4593            0 :         )
    4594            0 :         .await;
    4595              : 
    4596              :         {
    4597            0 :             let locked = self.inner.read().unwrap();
    4598            0 :             if let Some(node) = locked.nodes.get(&register_req.node_id) {
    4599              :                 // Note that we do not do a total equality of the struct, because we don't require
    4600              :                 // the availability/scheduling states to agree for a POST to be idempotent.
    4601            0 :                 if node.registration_match(&register_req) {
    4602            0 :                     tracing::info!(
    4603            0 :                         "Node {} re-registered with matching address",
    4604              :                         register_req.node_id
    4605              :                     );
    4606            0 :                     return Ok(());
    4607              :                 } else {
    4608              :                     // TODO: decide if we want to allow modifying node addresses without removing and re-adding
    4609              :                     // the node.  Safest/simplest thing is to refuse it, and usually we deploy with
    4610              :                     // a fixed address through the lifetime of a node.
    4611            0 :                     tracing::warn!(
    4612            0 :                         "Node {} tried to register with different address",
    4613              :                         register_req.node_id
    4614              :                     );
    4615            0 :                     return Err(ApiError::Conflict(
    4616            0 :                         "Node is already registered with different address".to_string(),
    4617            0 :                     ));
    4618              :                 }
    4619            0 :             }
    4620            0 :         }
    4621            0 : 
    4622            0 :         // We do not require that a node is actually online when registered (it will start life
    4623            0 :         // with it's  availability set to Offline), but we _do_ require that its DNS record exists. We're
    4624            0 :         // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
    4625            0 :         // that register themselves with a broken DNS config.  We check only the HTTP hostname, because
    4626            0 :         // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
    4627            0 :         if tokio::net::lookup_host(format!(
    4628            0 :             "{}:{}",
    4629            0 :             register_req.listen_http_addr, register_req.listen_http_port
    4630            0 :         ))
    4631            0 :         .await
    4632            0 :         .is_err()
    4633              :         {
    4634              :             // If we have a transient DNS issue, it's up to the caller to retry their registration.  Because
    4635              :             // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
    4636              :             // we return a soft 503 error, to encourage callers to retry past transient issues.
    4637            0 :             return Err(ApiError::ResourceUnavailable(
    4638            0 :                 format!(
    4639            0 :                     "Node {} tried to register with unknown DNS name '{}'",
    4640            0 :                     register_req.node_id, register_req.listen_http_addr
    4641            0 :                 )
    4642            0 :                 .into(),
    4643            0 :             ));
    4644            0 :         }
    4645            0 : 
    4646            0 :         // Ordering: we must persist the new node _before_ adding it to in-memory state.
    4647            0 :         // This ensures that before we use it for anything or expose it via any external
    4648            0 :         // API, it is guaranteed to be available after a restart.
    4649            0 :         let new_node = Node::new(
    4650            0 :             register_req.node_id,
    4651            0 :             register_req.listen_http_addr,
    4652            0 :             register_req.listen_http_port,
    4653            0 :             register_req.listen_pg_addr,
    4654            0 :             register_req.listen_pg_port,
    4655            0 :         );
    4656            0 : 
    4657            0 :         // TODO: idempotency if the node already exists in the database
    4658            0 :         self.persistence.insert_node(&new_node).await?;
    4659              : 
    4660            0 :         let mut locked = self.inner.write().unwrap();
    4661            0 :         let mut new_nodes = (*locked.nodes).clone();
    4662            0 : 
    4663            0 :         locked.scheduler.node_upsert(&new_node);
    4664            0 :         new_nodes.insert(register_req.node_id, new_node);
    4665            0 : 
    4666            0 :         locked.nodes = Arc::new(new_nodes);
    4667            0 : 
    4668            0 :         tracing::info!(
    4669            0 :             "Registered pageserver {}, now have {} pageservers",
    4670            0 :             register_req.node_id,
    4671            0 :             locked.nodes.len()
    4672              :         );
    4673            0 :         Ok(())
    4674            0 :     }
    4675              : 
    4676            0 :     pub(crate) async fn node_configure(
    4677            0 :         &self,
    4678            0 :         node_id: NodeId,
    4679            0 :         availability: Option<NodeAvailability>,
    4680            0 :         scheduling: Option<NodeSchedulingPolicy>,
    4681            0 :     ) -> Result<(), ApiError> {
    4682            0 :         let _node_lock =
    4683            0 :             trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
    4684              : 
    4685            0 :         if let Some(scheduling) = scheduling {
    4686              :             // Scheduling is a persistent part of Node: we must write updates to the database before
    4687              :             // applying them in memory
    4688            0 :             self.persistence.update_node(node_id, scheduling).await?;
    4689            0 :         }
    4690              : 
    4691              :         // If we're activating a node, then before setting it active we must reconcile any shard locations
    4692              :         // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
    4693              :         // by calling [`Self::node_activate_reconcile`]
    4694              :         //
    4695              :         // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
    4696              :         // nothing else can mutate its availability while we run.
    4697            0 :         let availability_transition = if let Some(input_availability) = availability {
    4698            0 :             let (activate_node, availability_transition) = {
    4699            0 :                 let locked = self.inner.read().unwrap();
    4700            0 :                 let Some(node) = locked.nodes.get(&node_id) else {
    4701            0 :                     return Err(ApiError::NotFound(
    4702            0 :                         anyhow::anyhow!("Node {} not registered", node_id).into(),
    4703            0 :                     ));
    4704              :                 };
    4705              : 
    4706            0 :                 (
    4707            0 :                     node.clone(),
    4708            0 :                     node.get_availability_transition(input_availability),
    4709            0 :                 )
    4710              :             };
    4711              : 
    4712            0 :             if matches!(availability_transition, AvailabilityTransition::ToActive) {
    4713            0 :                 self.node_activate_reconcile(activate_node, &_node_lock)
    4714            0 :                     .await?;
    4715            0 :             }
    4716            0 :             availability_transition
    4717              :         } else {
    4718            0 :             AvailabilityTransition::Unchanged
    4719              :         };
    4720              : 
    4721              :         // Apply changes from the request to our in-memory state for the Node
    4722            0 :         let mut locked = self.inner.write().unwrap();
    4723            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    4724            0 : 
    4725            0 :         let mut new_nodes = (**nodes).clone();
    4726              : 
    4727            0 :         let Some(node) = new_nodes.get_mut(&node_id) else {
    4728            0 :             return Err(ApiError::NotFound(
    4729            0 :                 anyhow::anyhow!("Node not registered").into(),
    4730            0 :             ));
    4731              :         };
    4732              : 
    4733            0 :         if let Some(availability) = &availability {
    4734            0 :             node.set_availability(*availability);
    4735            0 :         }
    4736              : 
    4737            0 :         if let Some(scheduling) = scheduling {
    4738            0 :             node.set_scheduling(scheduling);
    4739            0 :         }
    4740              : 
    4741              :         // Update the scheduler, in case the elegibility of the node for new shards has changed
    4742            0 :         scheduler.node_upsert(node);
    4743            0 : 
    4744            0 :         let new_nodes = Arc::new(new_nodes);
    4745            0 : 
    4746            0 :         // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
    4747            0 :         match availability_transition {
    4748              :             AvailabilityTransition::ToOffline => {
    4749            0 :                 tracing::info!("Node {} transition to offline", node_id);
    4750            0 :                 let mut tenants_affected: usize = 0;
    4751              : 
    4752            0 :                 for (tenant_shard_id, tenant_shard) in tenants {
    4753            0 :                     if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
    4754            0 :                         // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
    4755            0 :                         // not assume our knowledge of the node's configuration is accurate until it comes back online
    4756            0 :                         observed_loc.conf = None;
    4757            0 :                     }
    4758              : 
    4759            0 :                     if new_nodes.len() == 1 {
    4760              :                         // Special case for single-node cluster: there is no point trying to reschedule
    4761              :                         // any tenant shards: avoid doing so, in order to avoid spewing warnings about
    4762              :                         // failures to schedule them.
    4763            0 :                         continue;
    4764            0 :                     }
    4765            0 : 
    4766            0 :                     if !new_nodes
    4767            0 :                         .values()
    4768            0 :                         .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    4769              :                     {
    4770              :                         // Special case for when all nodes are unavailable and/or unschedulable: there is no point
    4771              :                         // trying to reschedule since there's nowhere else to go. Without this
    4772              :                         // branch we incorrectly detach tenants in response to node unavailability.
    4773            0 :                         continue;
    4774            0 :                     }
    4775            0 : 
    4776            0 :                     if tenant_shard.intent.demote_attached(scheduler, node_id) {
    4777            0 :                         tenant_shard.sequence = tenant_shard.sequence.next();
    4778            0 : 
    4779            0 :                         // TODO: populate a ScheduleContext including all shards in the same tenant_id (only matters
    4780            0 :                         // for tenants without secondary locations: if they have a secondary location, then this
    4781            0 :                         // schedule() call is just promoting an existing secondary)
    4782            0 :                         let mut schedule_context = ScheduleContext::default();
    4783            0 : 
    4784            0 :                         match tenant_shard.schedule(scheduler, &mut schedule_context) {
    4785            0 :                             Err(e) => {
    4786            0 :                                 // It is possible that some tenants will become unschedulable when too many pageservers
    4787            0 :                                 // go offline: in this case there isn't much we can do other than make the issue observable.
    4788            0 :                                 // TODO: give TenantShard a scheduling error attribute to be queried later.
    4789            0 :                                 tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
    4790              :                             }
    4791              :                             Ok(()) => {
    4792            0 :                                 if self
    4793            0 :                                     .maybe_reconcile_shard(tenant_shard, &new_nodes)
    4794            0 :                                     .is_some()
    4795            0 :                                 {
    4796            0 :                                     tenants_affected += 1;
    4797            0 :                                 };
    4798              :                             }
    4799              :                         }
    4800            0 :                     }
    4801              :                 }
    4802            0 :                 tracing::info!(
    4803            0 :                     "Launched {} reconciler tasks for tenants affected by node {} going offline",
    4804              :                     tenants_affected,
    4805              :                     node_id
    4806              :                 )
    4807              :             }
    4808              :             AvailabilityTransition::ToActive => {
    4809            0 :                 tracing::info!("Node {} transition to active", node_id);
    4810              :                 // When a node comes back online, we must reconcile any tenant that has a None observed
    4811              :                 // location on the node.
    4812            0 :                 for tenant_shard in locked.tenants.values_mut() {
    4813              :                     // If a reconciliation is already in progress, rely on the previous scheduling
    4814              :                     // decision and skip triggering a new reconciliation.
    4815            0 :                     if tenant_shard.reconciler.is_some() {
    4816            0 :                         continue;
    4817            0 :                     }
    4818              : 
    4819            0 :                     if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
    4820            0 :                         if observed_loc.conf.is_none() {
    4821            0 :                             self.maybe_reconcile_shard(tenant_shard, &new_nodes);
    4822            0 :                         }
    4823            0 :                     }
    4824              :                 }
    4825              : 
    4826              :                 // TODO: in the background, we should balance work back onto this pageserver
    4827              :             }
    4828              :             // No action required for the intermediate unavailable state.
    4829              :             // When we transition into active or offline from the unavailable state,
    4830              :             // the correct handling above will kick in.
    4831              :             AvailabilityTransition::ToWarmingUpFromActive => {
    4832            0 :                 tracing::info!("Node {} transition to unavailable from active", node_id);
    4833              :             }
    4834              :             AvailabilityTransition::ToWarmingUpFromOffline => {
    4835            0 :                 tracing::info!("Node {} transition to unavailable from offline", node_id);
    4836              :             }
    4837              :             AvailabilityTransition::Unchanged => {
    4838            0 :                 tracing::debug!("Node {} no availability change during config", node_id);
    4839              :             }
    4840              :         }
    4841              : 
    4842            0 :         locked.nodes = new_nodes;
    4843            0 : 
    4844            0 :         Ok(())
    4845            0 :     }
    4846              : 
    4847            0 :     pub(crate) async fn start_node_drain(
    4848            0 :         self: &Arc<Self>,
    4849            0 :         node_id: NodeId,
    4850            0 :     ) -> Result<(), ApiError> {
    4851            0 :         let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = {
    4852            0 :             let locked = self.inner.read().unwrap();
    4853            0 :             let nodes = &locked.nodes;
    4854            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    4855            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    4856            0 :             ))?;
    4857            0 :             let schedulable_nodes_count = nodes
    4858            0 :                 .iter()
    4859            0 :                 .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
    4860            0 :                 .count();
    4861            0 : 
    4862            0 :             (
    4863            0 :                 locked
    4864            0 :                     .ongoing_operation
    4865            0 :                     .as_ref()
    4866            0 :                     .map(|ongoing| ongoing.operation),
    4867            0 :                 node.is_available(),
    4868            0 :                 node.get_scheduling(),
    4869            0 :                 schedulable_nodes_count,
    4870            0 :             )
    4871            0 :         };
    4872              : 
    4873            0 :         if let Some(ongoing) = ongoing_op {
    4874            0 :             return Err(ApiError::PreconditionFailed(
    4875            0 :                 format!("Background operation already ongoing for node: {}", ongoing).into(),
    4876            0 :             ));
    4877            0 :         }
    4878            0 : 
    4879            0 :         if !node_available {
    4880            0 :             return Err(ApiError::ResourceUnavailable(
    4881            0 :                 format!("Node {node_id} is currently unavailable").into(),
    4882            0 :             ));
    4883            0 :         }
    4884            0 : 
    4885            0 :         if schedulable_nodes_count == 0 {
    4886            0 :             return Err(ApiError::PreconditionFailed(
    4887            0 :                 "No other schedulable nodes to drain to".into(),
    4888            0 :             ));
    4889            0 :         }
    4890            0 : 
    4891            0 :         match node_policy {
    4892              :             NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => {
    4893            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining))
    4894            0 :                     .await?;
    4895              : 
    4896            0 :                 let cancel = self.cancel.child_token();
    4897            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    4898              : 
    4899            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    4900            0 :                     operation: Operation::Drain(Drain { node_id }),
    4901            0 :                     cancel: cancel.clone(),
    4902            0 :                 });
    4903            0 : 
    4904            0 :                 tokio::task::spawn({
    4905            0 :                     let service = self.clone();
    4906            0 :                     let cancel = cancel.clone();
    4907            0 :                     async move {
    4908            0 :                         let _gate_guard = gate_guard;
    4909              : 
    4910              :                         scopeguard::defer! {
    4911              :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    4912              : 
    4913            0 :                             if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) {
    4914              :                                 assert_eq!(removed_drain.node_id, node_id, "We always take the same operation");
    4915              :                             } else {
    4916              :                                 panic!("We always remove the same operation")
    4917              :                             }
    4918              :                         }
    4919              : 
    4920            0 :                         tracing::info!(%node_id, "Drain background operation starting");
    4921            0 :                         let res = service.drain_node(node_id, cancel).await;
    4922            0 :                         match res {
    4923              :                             Ok(()) => {
    4924            0 :                                 tracing::info!(%node_id, "Drain background operation completed successfully");
    4925              :                             }
    4926              :                             Err(OperationError::Cancelled) => {
    4927            0 :                                 tracing::info!(%node_id, "Drain background operation was cancelled");
    4928              :                             }
    4929            0 :                             Err(err) => {
    4930            0 :                                 tracing::error!(%node_id, "Drain background operation encountered: {err}")
    4931              :                             }
    4932              :                         }
    4933            0 :                     }
    4934            0 :                 });
    4935            0 :             }
    4936              :             NodeSchedulingPolicy::Draining => {
    4937            0 :                 return Err(ApiError::Conflict(format!(
    4938            0 :                     "Node {node_id} has drain in progress"
    4939            0 :                 )));
    4940              :             }
    4941            0 :             policy => {
    4942            0 :                 return Err(ApiError::PreconditionFailed(
    4943            0 :                     format!("Node {node_id} cannot be drained due to {policy:?} policy").into(),
    4944            0 :                 ));
    4945              :             }
    4946              :         }
    4947              : 
    4948            0 :         Ok(())
    4949            0 :     }
    4950              : 
    4951            0 :     pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> {
    4952            0 :         let (node_available, node_policy) = {
    4953            0 :             let locked = self.inner.read().unwrap();
    4954            0 :             let nodes = &locked.nodes;
    4955            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    4956            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    4957            0 :             ))?;
    4958              : 
    4959            0 :             (node.is_available(), node.get_scheduling())
    4960            0 :         };
    4961            0 : 
    4962            0 :         if !node_available {
    4963            0 :             return Err(ApiError::ResourceUnavailable(
    4964            0 :                 format!("Node {node_id} is currently unavailable").into(),
    4965            0 :             ));
    4966            0 :         }
    4967              : 
    4968            0 :         if !matches!(node_policy, NodeSchedulingPolicy::Draining) {
    4969            0 :             return Err(ApiError::PreconditionFailed(
    4970            0 :                 format!("Node {node_id} has no drain in progress").into(),
    4971            0 :             ));
    4972            0 :         }
    4973              : 
    4974            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    4975            0 :             if let Operation::Drain(drain) = op_handler.operation {
    4976            0 :                 if drain.node_id == node_id {
    4977            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    4978            0 :                     op_handler.cancel.cancel();
    4979            0 :                     return Ok(());
    4980            0 :                 }
    4981            0 :             }
    4982            0 :         }
    4983              : 
    4984            0 :         Err(ApiError::PreconditionFailed(
    4985            0 :             format!("Node {node_id} has no drain in progress").into(),
    4986            0 :         ))
    4987            0 :     }
    4988              : 
    4989            0 :     pub(crate) async fn start_node_fill(self: &Arc<Self>, node_id: NodeId) -> Result<(), ApiError> {
    4990            0 :         let (ongoing_op, node_available, node_policy, total_nodes_count) = {
    4991            0 :             let locked = self.inner.read().unwrap();
    4992            0 :             let nodes = &locked.nodes;
    4993            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    4994            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    4995            0 :             ))?;
    4996              : 
    4997            0 :             (
    4998            0 :                 locked
    4999            0 :                     .ongoing_operation
    5000            0 :                     .as_ref()
    5001            0 :                     .map(|ongoing| ongoing.operation),
    5002            0 :                 node.is_available(),
    5003            0 :                 node.get_scheduling(),
    5004            0 :                 nodes.len(),
    5005            0 :             )
    5006            0 :         };
    5007              : 
    5008            0 :         if let Some(ongoing) = ongoing_op {
    5009            0 :             return Err(ApiError::PreconditionFailed(
    5010            0 :                 format!("Background operation already ongoing for node: {}", ongoing).into(),
    5011            0 :             ));
    5012            0 :         }
    5013            0 : 
    5014            0 :         if !node_available {
    5015            0 :             return Err(ApiError::ResourceUnavailable(
    5016            0 :                 format!("Node {node_id} is currently unavailable").into(),
    5017            0 :             ));
    5018            0 :         }
    5019            0 : 
    5020            0 :         if total_nodes_count <= 1 {
    5021            0 :             return Err(ApiError::PreconditionFailed(
    5022            0 :                 "No other nodes to fill from".into(),
    5023            0 :             ));
    5024            0 :         }
    5025            0 : 
    5026            0 :         match node_policy {
    5027              :             NodeSchedulingPolicy::Active => {
    5028            0 :                 self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling))
    5029            0 :                     .await?;
    5030              : 
    5031            0 :                 let cancel = self.cancel.child_token();
    5032            0 :                 let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
    5033              : 
    5034            0 :                 self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
    5035            0 :                     operation: Operation::Fill(Fill { node_id }),
    5036            0 :                     cancel: cancel.clone(),
    5037            0 :                 });
    5038            0 : 
    5039            0 :                 tokio::task::spawn({
    5040            0 :                     let service = self.clone();
    5041            0 :                     let cancel = cancel.clone();
    5042            0 :                     async move {
    5043            0 :                         let _gate_guard = gate_guard;
    5044              : 
    5045              :                         scopeguard::defer! {
    5046              :                             let prev = service.inner.write().unwrap().ongoing_operation.take();
    5047              : 
    5048            0 :                             if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) {
    5049              :                                 assert_eq!(removed_fill.node_id, node_id, "We always take the same operation");
    5050              :                             } else {
    5051              :                                 panic!("We always remove the same operation")
    5052              :                             }
    5053              :                         }
    5054              : 
    5055            0 :                         tracing::info!(%node_id, "Fill background operation starting");
    5056            0 :                         let res = service.fill_node(node_id, cancel).await;
    5057            0 :                         match res {
    5058              :                             Ok(()) => {
    5059            0 :                                 tracing::info!(%node_id, "Fill background operation completed successfully");
    5060              :                             }
    5061              :                             Err(OperationError::Cancelled) => {
    5062            0 :                                 tracing::info!(%node_id, "Fill background operation was cancelled");
    5063              :                             }
    5064            0 :                             Err(err) => {
    5065            0 :                                 tracing::error!(%node_id, "Fill background operation encountered: {err}")
    5066              :                             }
    5067              :                         }
    5068            0 :                     }
    5069            0 :                 });
    5070            0 :             }
    5071              :             NodeSchedulingPolicy::Filling => {
    5072            0 :                 return Err(ApiError::Conflict(format!(
    5073            0 :                     "Node {node_id} has fill in progress"
    5074            0 :                 )));
    5075              :             }
    5076            0 :             policy => {
    5077            0 :                 return Err(ApiError::PreconditionFailed(
    5078            0 :                     format!("Node {node_id} cannot be filled due to {policy:?} policy").into(),
    5079            0 :                 ));
    5080              :             }
    5081              :         }
    5082              : 
    5083            0 :         Ok(())
    5084            0 :     }
    5085              : 
    5086            0 :     pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> {
    5087            0 :         let (node_available, node_policy) = {
    5088            0 :             let locked = self.inner.read().unwrap();
    5089            0 :             let nodes = &locked.nodes;
    5090            0 :             let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
    5091            0 :                 anyhow::anyhow!("Node {} not registered", node_id).into(),
    5092            0 :             ))?;
    5093              : 
    5094            0 :             (node.is_available(), node.get_scheduling())
    5095            0 :         };
    5096            0 : 
    5097            0 :         if !node_available {
    5098            0 :             return Err(ApiError::ResourceUnavailable(
    5099            0 :                 format!("Node {node_id} is currently unavailable").into(),
    5100            0 :             ));
    5101            0 :         }
    5102              : 
    5103            0 :         if !matches!(node_policy, NodeSchedulingPolicy::Filling) {
    5104            0 :             return Err(ApiError::PreconditionFailed(
    5105            0 :                 format!("Node {node_id} has no fill in progress").into(),
    5106            0 :             ));
    5107            0 :         }
    5108              : 
    5109            0 :         if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
    5110            0 :             if let Operation::Fill(fill) = op_handler.operation {
    5111            0 :                 if fill.node_id == node_id {
    5112            0 :                     tracing::info!("Cancelling background drain operation for node {node_id}");
    5113            0 :                     op_handler.cancel.cancel();
    5114            0 :                     return Ok(());
    5115            0 :                 }
    5116            0 :             }
    5117            0 :         }
    5118              : 
    5119            0 :         Err(ApiError::PreconditionFailed(
    5120            0 :             format!("Node {node_id} has no fill in progress").into(),
    5121            0 :         ))
    5122            0 :     }
    5123              : 
    5124              :     /// Helper for methods that will try and call pageserver APIs for
    5125              :     /// a tenant, such as timeline CRUD: they cannot proceed unless the tenant
    5126              :     /// is attached somewhere.
    5127            0 :     fn ensure_attached_schedule(
    5128            0 :         &self,
    5129            0 :         mut locked: std::sync::RwLockWriteGuard<'_, ServiceState>,
    5130            0 :         tenant_id: TenantId,
    5131            0 :     ) -> Result<Vec<ReconcilerWaiter>, anyhow::Error> {
    5132            0 :         let mut waiters = Vec::new();
    5133            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    5134            0 : 
    5135            0 :         let mut schedule_context = ScheduleContext::default();
    5136            0 :         for (tenant_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
    5137            0 :             shard.schedule(scheduler, &mut schedule_context)?;
    5138              : 
    5139              :             // The shard's policies may not result in an attached location being scheduled: this
    5140              :             // is an error because our caller needs it attached somewhere.
    5141            0 :             if shard.intent.get_attached().is_none() {
    5142            0 :                 return Err(anyhow::anyhow!(
    5143            0 :                     "Tenant {tenant_id} not scheduled to be attached"
    5144            0 :                 ));
    5145            0 :             };
    5146            0 : 
    5147            0 :             if shard.stably_attached().is_some() {
    5148              :                 // We do not require the shard to be totally up to date on reconciliation: we just require
    5149              :                 // that it has been attached on the intended node.   Other dirty state such as unattached secondary
    5150              :                 // locations, or compute hook notifications can be ignored.
    5151            0 :                 continue;
    5152            0 :             }
    5153              : 
    5154            0 :             if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
    5155            0 :                 tracing::info!("Waiting for shard {tenant_shard_id} to reconcile, in order to ensure it is attached");
    5156            0 :                 waiters.push(waiter);
    5157            0 :             }
    5158              :         }
    5159            0 :         Ok(waiters)
    5160            0 :     }
    5161              : 
    5162            0 :     async fn ensure_attached_wait(&self, tenant_id: TenantId) -> Result<(), ApiError> {
    5163            0 :         let ensure_waiters = {
    5164            0 :             let locked = self.inner.write().unwrap();
    5165              : 
    5166              :             // Check if the tenant is splitting: in this case, even if it is attached,
    5167              :             // we must act as if it is not: this blocks e.g. timeline creation/deletion
    5168              :             // operations during the split.
    5169            0 :             for (_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
    5170            0 :                 if !matches!(shard.splitting, SplitState::Idle) {
    5171            0 :                     return Err(ApiError::ResourceUnavailable(
    5172            0 :                         "Tenant shards are currently splitting".into(),
    5173            0 :                     ));
    5174            0 :                 }
    5175              :             }
    5176              : 
    5177            0 :             self.ensure_attached_schedule(locked, tenant_id)
    5178            0 :                 .map_err(ApiError::InternalServerError)?
    5179              :         };
    5180              : 
    5181            0 :         let deadline = Instant::now().checked_add(Duration::from_secs(5)).unwrap();
    5182            0 :         for waiter in ensure_waiters {
    5183            0 :             let timeout = deadline.duration_since(Instant::now());
    5184            0 :             waiter.wait_timeout(timeout).await?;
    5185              :         }
    5186              : 
    5187            0 :         Ok(())
    5188            0 :     }
    5189              : 
    5190              :     /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
    5191            0 :     fn maybe_reconcile_shard(
    5192            0 :         &self,
    5193            0 :         shard: &mut TenantShard,
    5194            0 :         nodes: &Arc<HashMap<NodeId, Node>>,
    5195            0 :     ) -> Option<ReconcilerWaiter> {
    5196            0 :         let reconcile_needed = shard.get_reconcile_needed(nodes);
    5197            0 : 
    5198            0 :         match reconcile_needed {
    5199            0 :             ReconcileNeeded::No => return None,
    5200            0 :             ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
    5201            0 :             ReconcileNeeded::Yes => {
    5202            0 :                 // Fall through to try and acquire units for spawning reconciler
    5203            0 :             }
    5204              :         };
    5205              : 
    5206            0 :         let units = match self.reconciler_concurrency.clone().try_acquire_owned() {
    5207            0 :             Ok(u) => ReconcileUnits::new(u),
    5208              :             Err(_) => {
    5209            0 :                 tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
    5210            0 :                     "Concurrency limited: enqueued for reconcile later");
    5211            0 :                 if !shard.delayed_reconcile {
    5212            0 :                     match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
    5213            0 :                         Err(TrySendError::Closed(_)) => {
    5214            0 :                             // Weird mid-shutdown case?
    5215            0 :                         }
    5216              :                         Err(TrySendError::Full(_)) => {
    5217              :                             // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
    5218            0 :                             tracing::warn!(
    5219            0 :                                 "Many shards are waiting to reconcile: delayed_reconcile queue is full"
    5220              :                             );
    5221              :                         }
    5222            0 :                         Ok(()) => {
    5223            0 :                             shard.delayed_reconcile = true;
    5224            0 :                         }
    5225              :                     }
    5226            0 :                 }
    5227              : 
    5228              :                 // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
    5229              :                 // number to advance.  When this function is eventually called again and succeeds in getting units,
    5230              :                 // it will spawn a reconciler that makes this waiter complete.
    5231            0 :                 return Some(shard.future_reconcile_waiter());
    5232              :             }
    5233              :         };
    5234              : 
    5235            0 :         let Ok(gate_guard) = self.reconcilers_gate.enter() else {
    5236              :             // Gate closed: we're shutting down, drop out.
    5237            0 :             return None;
    5238              :         };
    5239              : 
    5240            0 :         shard.spawn_reconciler(
    5241            0 :             &self.result_tx,
    5242            0 :             nodes,
    5243            0 :             &self.compute_hook,
    5244            0 :             &self.config,
    5245            0 :             &self.persistence,
    5246            0 :             units,
    5247            0 :             gate_guard,
    5248            0 :             &self.reconcilers_cancel,
    5249            0 :         )
    5250            0 :     }
    5251              : 
    5252              :     /// Check all tenants for pending reconciliation work, and reconcile those in need.
    5253              :     /// Additionally, reschedule tenants that require it.
    5254              :     ///
    5255              :     /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
    5256              :     /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
    5257              :     /// available.  A return value of 0 indicates that everything is fully reconciled already.
    5258            0 :     fn reconcile_all(&self) -> usize {
    5259            0 :         let mut locked = self.inner.write().unwrap();
    5260            0 :         let (nodes, tenants, _scheduler) = locked.parts_mut();
    5261            0 :         let pageservers = nodes.clone();
    5262            0 : 
    5263            0 :         let mut schedule_context = ScheduleContext::default();
    5264            0 : 
    5265            0 :         let mut reconciles_spawned = 0;
    5266            0 :         for (tenant_shard_id, shard) in tenants.iter_mut() {
    5267            0 :             if tenant_shard_id.is_shard_zero() {
    5268            0 :                 schedule_context = ScheduleContext::default();
    5269            0 :             }
    5270              : 
    5271              :             // Skip checking if this shard is already enqueued for reconciliation
    5272            0 :             if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
    5273              :                 // If there is something delayed, then return a nonzero count so that
    5274              :                 // callers like reconcile_all_now do not incorrectly get the impression
    5275              :                 // that the system is in a quiescent state.
    5276            0 :                 reconciles_spawned = std::cmp::max(1, reconciles_spawned);
    5277            0 :                 continue;
    5278            0 :             }
    5279            0 : 
    5280            0 :             // Eventual consistency: if an earlier reconcile job failed, and the shard is still
    5281            0 :             // dirty, spawn another rone
    5282            0 :             if self.maybe_reconcile_shard(shard, &pageservers).is_some() {
    5283            0 :                 reconciles_spawned += 1;
    5284            0 :             }
    5285              : 
    5286            0 :             schedule_context.avoid(&shard.intent.all_pageservers());
    5287              :         }
    5288              : 
    5289            0 :         reconciles_spawned
    5290            0 :     }
    5291              : 
    5292              :     /// `optimize` in this context means identifying shards which have valid scheduled locations, but
    5293              :     /// could be scheduled somewhere better:
    5294              :     /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
    5295              :     ///    * e.g. after a node fails then recovers, to move some work back to it
    5296              :     /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
    5297              :     ///    * e.g. after a shard split, the initial attached locations will all be on the node where
    5298              :     ///      we did the split, but are probably better placed elsewhere.
    5299              :     /// - Creating new secondary locations if it improves the spreading of a sharded tenant
    5300              :     ///    * e.g. after a shard split, some locations will be on the same node (where the split
    5301              :     ///      happened), and will probably be better placed elsewhere.
    5302              :     ///
    5303              :     /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
    5304              :     /// the time of scheduling, this function looks for cases where a better-scoring location is available
    5305              :     /// according to those same soft constraints.
    5306            0 :     async fn optimize_all(&self) -> usize {
    5307            0 :         // Limit on how many shards' optmizations each call to this function will execute.  Combined
    5308            0 :         // with the frequency of background calls, this acts as an implicit rate limit that runs a small
    5309            0 :         // trickle of optimizations in the background, rather than executing a large number in parallel
    5310            0 :         // when a change occurs.
    5311            0 :         const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 2;
    5312            0 : 
    5313            0 :         // Synchronous prepare: scan shards for possible scheduling optimizations
    5314            0 :         let candidate_work = self.optimize_all_plan();
    5315            0 :         let candidate_work_len = candidate_work.len();
    5316              : 
    5317              :         // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
    5318            0 :         let validated_work = self.optimize_all_validate(candidate_work).await;
    5319              : 
    5320            0 :         let was_work_filtered = validated_work.len() != candidate_work_len;
    5321            0 : 
    5322            0 :         // Synchronous apply: update the shards' intent states according to validated optimisations
    5323            0 :         let mut reconciles_spawned = 0;
    5324            0 :         let mut optimizations_applied = 0;
    5325            0 :         let mut locked = self.inner.write().unwrap();
    5326            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    5327            0 :         for (tenant_shard_id, optimization) in validated_work {
    5328            0 :             let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
    5329              :                 // Shard was dropped between planning and execution;
    5330            0 :                 continue;
    5331              :             };
    5332            0 :             if shard.apply_optimization(scheduler, optimization) {
    5333            0 :                 optimizations_applied += 1;
    5334            0 :                 if self.maybe_reconcile_shard(shard, nodes).is_some() {
    5335            0 :                     reconciles_spawned += 1;
    5336            0 :                 }
    5337            0 :             }
    5338              : 
    5339            0 :             if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
    5340            0 :                 break;
    5341            0 :             }
    5342              :         }
    5343              : 
    5344            0 :         if was_work_filtered {
    5345            0 :             // If we filtered any work out during validation, ensure we return a nonzero value to indicate
    5346            0 :             // to callers that the system is not in a truly quiet state, it's going to do some work as soon
    5347            0 :             // as these validations start passing.
    5348            0 :             reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
    5349            0 :         }
    5350              : 
    5351            0 :         reconciles_spawned
    5352            0 :     }
    5353              : 
    5354            0 :     fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
    5355            0 :         let mut schedule_context = ScheduleContext::default();
    5356            0 : 
    5357            0 :         let mut tenant_shards: Vec<&TenantShard> = Vec::new();
    5358            0 : 
    5359            0 :         // How many candidate optimizations we will generate, before evaluating them for readniess: setting
    5360            0 :         // this higher than the execution limit gives us a chance to execute some work even if the first
    5361            0 :         // few optimizations we find are not ready.
    5362            0 :         const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 8;
    5363            0 : 
    5364            0 :         let mut work = Vec::new();
    5365            0 : 
    5366            0 :         let mut locked = self.inner.write().unwrap();
    5367            0 :         let (nodes, tenants, scheduler) = locked.parts_mut();
    5368            0 :         for (tenant_shard_id, shard) in tenants.iter() {
    5369            0 :             if tenant_shard_id.is_shard_zero() {
    5370            0 :                 // Reset accumulators on the first shard in a tenant
    5371            0 :                 schedule_context = ScheduleContext::default();
    5372            0 :                 schedule_context.mode = ScheduleMode::Speculative;
    5373            0 :                 tenant_shards.clear();
    5374            0 :             }
    5375              : 
    5376            0 :             if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
    5377            0 :                 break;
    5378            0 :             }
    5379            0 : 
    5380            0 :             match shard.get_scheduling_policy() {
    5381            0 :                 ShardSchedulingPolicy::Active => {
    5382            0 :                     // Ok to do optimization
    5383            0 :                 }
    5384              :                 ShardSchedulingPolicy::Essential
    5385              :                 | ShardSchedulingPolicy::Pause
    5386              :                 | ShardSchedulingPolicy::Stop => {
    5387              :                     // Policy prevents optimizing this shard.
    5388            0 :                     continue;
    5389              :                 }
    5390              :             }
    5391              : 
    5392              :             // Accumulate the schedule context for all the shards in a tenant: we must have
    5393              :             // the total view of all shards before we can try to optimize any of them.
    5394            0 :             schedule_context.avoid(&shard.intent.all_pageservers());
    5395            0 :             if let Some(attached) = shard.intent.get_attached() {
    5396            0 :                 schedule_context.push_attached(*attached);
    5397            0 :             }
    5398            0 :             tenant_shards.push(shard);
    5399            0 : 
    5400            0 :             // Once we have seen the last shard in the tenant, proceed to search across all shards
    5401            0 :             // in the tenant for optimizations
    5402            0 :             if shard.shard.number.0 == shard.shard.count.count() - 1 {
    5403            0 :                 if tenant_shards.iter().any(|s| s.reconciler.is_some()) {
    5404              :                     // Do not start any optimizations while another change to the tenant is ongoing: this
    5405              :                     // is not necessary for correctness, but simplifies operations and implicitly throttles
    5406              :                     // optimization changes to happen in a "trickle" over time.
    5407            0 :                     continue;
    5408            0 :                 }
    5409            0 : 
    5410            0 :                 if tenant_shards.iter().any(|s| {
    5411            0 :                     !matches!(s.splitting, SplitState::Idle)
    5412            0 :                         || matches!(s.policy, PlacementPolicy::Detached)
    5413            0 :                 }) {
    5414              :                     // Never attempt to optimize a tenant that is currently being split, or
    5415              :                     // a tenant that is meant to be detached
    5416            0 :                     continue;
    5417            0 :                 }
    5418              : 
    5419              :                 // TODO: optimization calculations are relatively expensive: create some fast-path for
    5420              :                 // the common idle case (avoiding the search on tenants that we have recently checked)
    5421              : 
    5422            0 :                 for shard in &tenant_shards {
    5423            0 :                     if let Some(optimization) =
    5424              :                         // If idle, maybe ptimize attachments: if a shard has a secondary location that is preferable to
    5425              :                         // its primary location based on soft constraints, cut it over.
    5426            0 :                         shard.optimize_attachment(nodes, &schedule_context)
    5427              :                     {
    5428            0 :                         work.push((shard.tenant_shard_id, optimization));
    5429            0 :                         break;
    5430            0 :                     } else if let Some(optimization) =
    5431              :                         // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
    5432              :                         // better placed on another node, based on ScheduleContext, then adjust it.  This
    5433              :                         // covers cases like after a shard split, where we might have too many shards
    5434              :                         // in the same tenant with secondary locations on the node where they originally split.
    5435            0 :                         shard.optimize_secondary(scheduler, &schedule_context)
    5436              :                     {
    5437            0 :                         work.push((shard.tenant_shard_id, optimization));
    5438            0 :                         break;
    5439            0 :                     }
    5440              : 
    5441              :                     // TODO: extend this mechanism to prefer attaching on nodes with fewer attached
    5442              :                     // tenants (i.e. extend schedule state to distinguish attached from secondary counts),
    5443              :                     // for the total number of attachments on a node (not just within a tenant.)
    5444              :                 }
    5445            0 :             }
    5446              :         }
    5447              : 
    5448            0 :         work
    5449            0 :     }
    5450              : 
    5451            0 :     async fn optimize_all_validate(
    5452            0 :         &self,
    5453            0 :         candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
    5454            0 :     ) -> Vec<(TenantShardId, ScheduleOptimization)> {
    5455            0 :         // Take a clone of the node map to use outside the lock in async validation phase
    5456            0 :         let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
    5457            0 : 
    5458            0 :         let mut want_secondary_status = Vec::new();
    5459            0 : 
    5460            0 :         // Validate our plans: this is an async phase where we may do I/O to pageservers to
    5461            0 :         // check that the state of locations is acceptable to run the optimization, such as
    5462            0 :         // checking that a secondary location is sufficiently warmed-up to cleanly cut over
    5463            0 :         // in a live migration.
    5464            0 :         let mut validated_work = Vec::new();
    5465            0 :         for (tenant_shard_id, optimization) in candidate_work {
    5466            0 :             match optimization.action {
    5467              :                 ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
    5468              :                     old_attached_node_id: _,
    5469            0 :                     new_attached_node_id,
    5470            0 :                 }) => {
    5471            0 :                     match validation_nodes.get(&new_attached_node_id) {
    5472            0 :                         None => {
    5473            0 :                             // Node was dropped between planning and validation
    5474            0 :                         }
    5475            0 :                         Some(node) => {
    5476            0 :                             if !node.is_available() {
    5477            0 :                                 tracing::info!("Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable");
    5478            0 :                             } else {
    5479            0 :                                 // Accumulate optimizations that require fetching secondary status, so that we can execute these
    5480            0 :                                 // remote API requests concurrently.
    5481            0 :                                 want_secondary_status.push((
    5482            0 :                                     tenant_shard_id,
    5483            0 :                                     node.clone(),
    5484            0 :                                     optimization,
    5485            0 :                                 ));
    5486            0 :                             }
    5487              :                         }
    5488              :                     }
    5489              :                 }
    5490              :                 ScheduleOptimizationAction::ReplaceSecondary(_) => {
    5491              :                     // No extra checks needed to replace a secondary: this does not interrupt client access
    5492            0 :                     validated_work.push((tenant_shard_id, optimization))
    5493              :                 }
    5494              :             };
    5495              :         }
    5496              : 
    5497              :         // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
    5498              :         // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
    5499              :         // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
    5500            0 :         let results = self
    5501            0 :             .tenant_for_shards_api(
    5502            0 :                 want_secondary_status
    5503            0 :                     .iter()
    5504            0 :                     .map(|i| (i.0, i.1.clone()))
    5505            0 :                     .collect(),
    5506            0 :                 |tenant_shard_id, client| async move {
    5507            0 :                     client.tenant_secondary_status(tenant_shard_id).await
    5508            0 :                 },
    5509            0 :                 1,
    5510            0 :                 1,
    5511            0 :                 SHORT_RECONCILE_TIMEOUT,
    5512            0 :                 &self.cancel,
    5513            0 :             )
    5514            0 :             .await;
    5515              : 
    5516            0 :         for ((tenant_shard_id, node, optimization), secondary_status) in
    5517            0 :             want_secondary_status.into_iter().zip(results.into_iter())
    5518              :         {
    5519            0 :             match secondary_status {
    5520            0 :                 Err(e) => {
    5521            0 :                     tracing::info!("Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}");
    5522              :                 }
    5523            0 :                 Ok(progress) => {
    5524            0 :                     // We require secondary locations to have less than 10GiB of downloads pending before we will use
    5525            0 :                     // them in an optimization
    5526            0 :                     const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
    5527            0 : 
    5528            0 :                     if progress.heatmap_mtime.is_none()
    5529            0 :                         || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
    5530            0 :                             && progress.bytes_downloaded != progress.bytes_total
    5531            0 :                         || progress.bytes_total - progress.bytes_downloaded
    5532            0 :                             > DOWNLOAD_FRESHNESS_THRESHOLD
    5533              :                     {
    5534            0 :                         tracing::info!("Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}");
    5535              :                     } else {
    5536              :                         // Location looks ready: proceed
    5537            0 :                         tracing::info!(
    5538            0 :                             "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
    5539              :                         );
    5540            0 :                         validated_work.push((tenant_shard_id, optimization))
    5541              :                     }
    5542              :                 }
    5543              :             }
    5544              :         }
    5545              : 
    5546            0 :         validated_work
    5547            0 :     }
    5548              : 
    5549              :     /// Look for shards which are oversized and in need of splitting
    5550            0 :     async fn autosplit_tenants(self: &Arc<Self>) {
    5551            0 :         let Some(split_threshold) = self.config.split_threshold else {
    5552              :             // Auto-splitting is disabled
    5553            0 :             return;
    5554              :         };
    5555              : 
    5556            0 :         let nodes = self.inner.read().unwrap().nodes.clone();
    5557            0 : 
    5558            0 :         const SPLIT_TO_MAX: ShardCount = ShardCount::new(8);
    5559            0 : 
    5560            0 :         let mut top_n = Vec::new();
    5561            0 : 
    5562            0 :         // Call into each node to look for big tenants
    5563            0 :         let top_n_request = TopTenantShardsRequest {
    5564            0 :             // We currently split based on logical size, for simplicity: logical size is a signal of
    5565            0 :             // the user's intent to run a large database, whereas physical/resident size can be symptoms
    5566            0 :             // of compaction issues.  Eventually we should switch to using resident size to bound the
    5567            0 :             // disk space impact of one shard.
    5568            0 :             order_by: models::TenantSorting::MaxLogicalSize,
    5569            0 :             limit: 10,
    5570            0 :             where_shards_lt: Some(SPLIT_TO_MAX),
    5571            0 :             where_gt: Some(split_threshold),
    5572            0 :         };
    5573            0 :         for node in nodes.values() {
    5574            0 :             let request_ref = &top_n_request;
    5575            0 :             match node
    5576            0 :                 .with_client_retries(
    5577            0 :                     |client| async move {
    5578            0 :                         let request = request_ref.clone();
    5579            0 :                         client.top_tenant_shards(request.clone()).await
    5580            0 :                     },
    5581            0 :                     &self.config.jwt_token,
    5582            0 :                     3,
    5583            0 :                     3,
    5584            0 :                     Duration::from_secs(5),
    5585            0 :                     &self.cancel,
    5586            0 :                 )
    5587            0 :                 .await
    5588              :             {
    5589            0 :                 Some(Ok(node_top_n)) => {
    5590            0 :                     top_n.extend(node_top_n.shards.into_iter());
    5591            0 :                 }
    5592              :                 Some(Err(mgmt_api::Error::Cancelled)) => {
    5593            0 :                     continue;
    5594              :                 }
    5595            0 :                 Some(Err(e)) => {
    5596            0 :                     tracing::warn!("Failed to fetch top N tenants from {node}: {e}");
    5597            0 :                     continue;
    5598              :                 }
    5599              :                 None => {
    5600              :                     // Node is shutting down
    5601            0 :                     continue;
    5602              :                 }
    5603              :             };
    5604              :         }
    5605              : 
    5606              :         // Pick the biggest tenant to split first
    5607            0 :         top_n.sort_by_key(|i| i.resident_size);
    5608            0 :         let Some(split_candidate) = top_n.into_iter().next() else {
    5609            0 :             tracing::debug!("No split-elegible shards found");
    5610            0 :             return;
    5611              :         };
    5612              : 
    5613              :         // We spawn a task to run this, so it's exactly like some external API client requesting it.  We don't
    5614              :         // want to block the background reconcile loop on this.
    5615            0 :         tracing::info!("Auto-splitting tenant for size threshold {split_threshold}: current size {split_candidate:?}");
    5616              : 
    5617            0 :         let this = self.clone();
    5618            0 :         tokio::spawn(
    5619            0 :             async move {
    5620            0 :                 match this
    5621            0 :                     .tenant_shard_split(
    5622            0 :                         split_candidate.id.tenant_id,
    5623            0 :                         TenantShardSplitRequest {
    5624            0 :                             // Always split to the max number of shards: this avoids stepping through
    5625            0 :                             // intervening shard counts and encountering the overrhead of a split+cleanup
    5626            0 :                             // each time as a tenant grows, and is not too expensive because our max shard
    5627            0 :                             // count is relatively low anyway.
    5628            0 :                             // This policy will be adjusted in future once we support higher shard count.
    5629            0 :                             new_shard_count: SPLIT_TO_MAX.literal(),
    5630            0 :                             new_stripe_size: Some(ShardParameters::DEFAULT_STRIPE_SIZE),
    5631            0 :                         },
    5632            0 :                     )
    5633            0 :                     .await
    5634              :                 {
    5635              :                     Ok(_) => {
    5636            0 :                         tracing::info!("Successful auto-split");
    5637              :                     }
    5638            0 :                     Err(e) => {
    5639            0 :                         tracing::error!("Auto-split failed: {e}");
    5640              :                     }
    5641              :                 }
    5642            0 :             }
    5643            0 :             .instrument(tracing::info_span!("auto_split", tenant_id=%split_candidate.id.tenant_id)),
    5644              :         );
    5645            0 :     }
    5646              : 
    5647              :     /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
    5648              :     /// also wait for any generated Reconcilers to complete.  Calling this until it returns zero should
    5649              :     /// put the system into a quiescent state where future background reconciliations won't do anything.
    5650            0 :     pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
    5651            0 :         let reconciles_spawned = self.reconcile_all();
    5652            0 :         let reconciles_spawned = if reconciles_spawned == 0 {
    5653              :             // Only optimize when we are otherwise idle
    5654            0 :             self.optimize_all().await
    5655              :         } else {
    5656            0 :             reconciles_spawned
    5657              :         };
    5658              : 
    5659            0 :         let waiters = {
    5660            0 :             let mut waiters = Vec::new();
    5661            0 :             let locked = self.inner.read().unwrap();
    5662            0 :             for (_tenant_shard_id, shard) in locked.tenants.iter() {
    5663            0 :                 if let Some(waiter) = shard.get_waiter() {
    5664            0 :                     waiters.push(waiter);
    5665            0 :                 }
    5666              :             }
    5667            0 :             waiters
    5668            0 :         };
    5669            0 : 
    5670            0 :         let waiter_count = waiters.len();
    5671            0 :         match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
    5672            0 :             Ok(()) => {}
    5673            0 :             Err(ReconcileWaitError::Failed(_, reconcile_error))
    5674            0 :                 if matches!(*reconcile_error, ReconcileError::Cancel) =>
    5675            0 :             {
    5676            0 :                 // Ignore reconciler cancel errors: this reconciler might have shut down
    5677            0 :                 // because some other change superceded it.  We will return a nonzero number,
    5678            0 :                 // so the caller knows they might have to call again to quiesce the system.
    5679            0 :             }
    5680            0 :             Err(e) => {
    5681            0 :                 return Err(e);
    5682              :             }
    5683              :         };
    5684              : 
    5685            0 :         tracing::info!(
    5686            0 :             "{} reconciles in reconcile_all, {} waiters",
    5687              :             reconciles_spawned,
    5688              :             waiter_count
    5689              :         );
    5690              : 
    5691            0 :         Ok(std::cmp::max(waiter_count, reconciles_spawned))
    5692            0 :     }
    5693              : 
    5694            0 :     async fn stop_reconciliations(&self, reason: StopReconciliationsReason) {
    5695            0 :         // Cancel all on-going reconciles and wait for them to exit the gate.
    5696            0 :         tracing::info!("{reason}: cancelling and waiting for in-flight reconciles");
    5697            0 :         self.reconcilers_cancel.cancel();
    5698            0 :         self.reconcilers_gate.close().await;
    5699              : 
    5700              :         // Signal the background loop in [`Service::process_results`] to exit once
    5701              :         // it has proccessed the results from all the reconciles we cancelled earlier.
    5702            0 :         tracing::info!("{reason}: processing results from previously in-flight reconciles");
    5703            0 :         self.result_tx.send(ReconcileResultRequest::Stop).ok();
    5704            0 :         self.result_tx.closed().await;
    5705            0 :     }
    5706              : 
    5707            0 :     pub async fn shutdown(&self) {
    5708            0 :         self.stop_reconciliations(StopReconciliationsReason::ShuttingDown)
    5709            0 :             .await;
    5710              : 
    5711              :         // Background tasks hold gate guards: this notifies them of the cancellation and
    5712              :         // waits for them all to complete.
    5713            0 :         tracing::info!("Shutting down: cancelling and waiting for background tasks to exit");
    5714            0 :         self.cancel.cancel();
    5715            0 :         self.gate.close().await;
    5716            0 :     }
    5717              : 
    5718              :     /// Drain a node by moving the shards attached to it as primaries.
    5719              :     /// This is a long running operation and it should run as a separate Tokio task.
    5720            0 :     pub(crate) async fn drain_node(
    5721            0 :         &self,
    5722            0 :         node_id: NodeId,
    5723            0 :         cancel: CancellationToken,
    5724            0 :     ) -> Result<(), OperationError> {
    5725            0 :         let mut last_inspected_shard: Option<TenantShardId> = None;
    5726            0 :         let mut inspected_all_shards = false;
    5727            0 :         let mut waiters = Vec::new();
    5728              : 
    5729            0 :         while !inspected_all_shards {
    5730            0 :             if cancel.is_cancelled() {
    5731            0 :                 match self
    5732            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    5733            0 :                     .await
    5734              :                 {
    5735            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    5736            0 :                     Err(err) => {
    5737            0 :                         return Err(OperationError::FinalizeError(
    5738            0 :                             format!(
    5739            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    5740            0 :                                 node_id, err
    5741            0 :                             )
    5742            0 :                             .into(),
    5743            0 :                         ));
    5744              :                     }
    5745              :                 }
    5746            0 :             }
    5747            0 : 
    5748            0 :             {
    5749            0 :                 let mut locked = self.inner.write().unwrap();
    5750            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    5751              : 
    5752            0 :                 let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
    5753            0 :                     format!("node {node_id} was removed").into(),
    5754            0 :                 ))?;
    5755              : 
    5756            0 :                 let current_policy = node.get_scheduling();
    5757            0 :                 if !matches!(current_policy, NodeSchedulingPolicy::Draining) {
    5758              :                     // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
    5759              :                     // about it
    5760            0 :                     return Err(OperationError::NodeStateChanged(
    5761            0 :                         format!("node {node_id} changed state to {current_policy:?}").into(),
    5762            0 :                     ));
    5763            0 :                 }
    5764            0 : 
    5765            0 :                 let mut cursor = tenants.iter_mut().skip_while({
    5766            0 :                     let skip_past = last_inspected_shard;
    5767            0 :                     move |(tid, _)| match skip_past {
    5768            0 :                         Some(last) => **tid != last,
    5769            0 :                         None => false,
    5770            0 :                     }
    5771            0 :                 });
    5772              : 
    5773            0 :                 while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    5774            0 :                     let (tid, tenant_shard) = match cursor.next() {
    5775            0 :                         Some(some) => some,
    5776              :                         None => {
    5777            0 :                             inspected_all_shards = true;
    5778            0 :                             break;
    5779              :                         }
    5780              :                     };
    5781              : 
    5782              :                     // If the shard is not attached to the node being drained, skip it.
    5783            0 :                     if *tenant_shard.intent.get_attached() != Some(node_id) {
    5784            0 :                         last_inspected_shard = Some(*tid);
    5785            0 :                         continue;
    5786            0 :                     }
    5787            0 : 
    5788            0 :                     match tenant_shard.reschedule_to_secondary(None, scheduler) {
    5789            0 :                         Err(e) => {
    5790            0 :                             tracing::warn!(
    5791            0 :                                 tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    5792            0 :                                 "Scheduling error when draining pageserver {} : {e}", node_id
    5793              :                             );
    5794              :                         }
    5795              :                         Ok(()) => {
    5796            0 :                             let scheduled_to = tenant_shard.intent.get_attached();
    5797            0 :                             tracing::info!(
    5798            0 :                                 tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    5799            0 :                                 "Rescheduled shard while draining node {}: {} -> {:?}",
    5800              :                                 node_id,
    5801              :                                 node_id,
    5802              :                                 scheduled_to
    5803              :                             );
    5804              : 
    5805            0 :                             let waiter = self.maybe_reconcile_shard(tenant_shard, nodes);
    5806            0 :                             if let Some(some) = waiter {
    5807            0 :                                 waiters.push(some);
    5808            0 :                             }
    5809              :                         }
    5810              :                     }
    5811              : 
    5812            0 :                     last_inspected_shard = Some(*tid);
    5813              :                 }
    5814              :             }
    5815              : 
    5816            0 :             waiters = self
    5817            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    5818            0 :                 .await;
    5819              : 
    5820            0 :             failpoint_support::sleep_millis_async!("sleepy-drain-loop");
    5821              :         }
    5822              : 
    5823            0 :         while !waiters.is_empty() {
    5824            0 :             if cancel.is_cancelled() {
    5825            0 :                 match self
    5826            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    5827            0 :                     .await
    5828              :                 {
    5829            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    5830            0 :                     Err(err) => {
    5831            0 :                         return Err(OperationError::FinalizeError(
    5832            0 :                             format!(
    5833            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    5834            0 :                                 node_id, err
    5835            0 :                             )
    5836            0 :                             .into(),
    5837            0 :                         ));
    5838              :                     }
    5839              :                 }
    5840            0 :             }
    5841            0 : 
    5842            0 :             tracing::info!("Awaiting {} pending drain reconciliations", waiters.len());
    5843              : 
    5844            0 :             waiters = self
    5845            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    5846            0 :                 .await;
    5847              :         }
    5848              : 
    5849              :         // At this point we have done the best we could to drain shards from this node.
    5850              :         // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]`
    5851              :         // to complete the drain.
    5852            0 :         if let Err(err) = self
    5853            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart))
    5854            0 :             .await
    5855              :         {
    5856              :             // This is not fatal. Anything that is polling the node scheduling policy to detect
    5857              :             // the end of the drain operations will hang, but all such places should enforce an
    5858              :             // overall timeout. The scheduling policy will be updated upon node re-attach and/or
    5859              :             // by the counterpart fill operation.
    5860            0 :             return Err(OperationError::FinalizeError(
    5861            0 :                 format!(
    5862            0 :                     "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}"
    5863            0 :                 )
    5864            0 :                 .into(),
    5865            0 :             ));
    5866            0 :         }
    5867            0 : 
    5868            0 :         Ok(())
    5869            0 :     }
    5870              : 
    5871              :     /// Create a node fill plan (pick secondaries to promote) that meets the following requirements:
    5872              :     /// 1. The node should be filled until it reaches the expected cluster average of
    5873              :     ///    attached shards. If there are not enough secondaries on the node, the plan stops early.
    5874              :     /// 2. Select tenant shards to promote such that the number of attached shards is balanced
    5875              :     ///    throughout the cluster. We achieve this by picking tenant shards from each node,
    5876              :     ///    starting from the ones with the largest number of attached shards, until the node
    5877              :     ///    reaches the expected cluster average.
    5878              :     /// 3. Avoid promoting more shards of the same tenant than required. The upper bound
    5879              :     ///    for the number of tenants from the same shard promoted to the node being filled is:
    5880              :     ///    shard count for the tenant divided by the number of nodes in the cluster.
    5881            0 :     fn fill_node_plan(&self, node_id: NodeId) -> Vec<TenantShardId> {
    5882            0 :         let mut locked = self.inner.write().unwrap();
    5883            0 :         let fill_requirement = locked.scheduler.compute_fill_requirement(node_id);
    5884            0 : 
    5885            0 :         let mut tids_by_node = locked
    5886            0 :             .tenants
    5887            0 :             .iter_mut()
    5888            0 :             .filter_map(|(tid, tenant_shard)| {
    5889            0 :                 if tenant_shard.intent.get_secondary().contains(&node_id) {
    5890            0 :                     if let Some(primary) = tenant_shard.intent.get_attached() {
    5891            0 :                         return Some((*primary, *tid));
    5892            0 :                     }
    5893            0 :                 }
    5894              : 
    5895            0 :                 None
    5896            0 :             })
    5897            0 :             .into_group_map();
    5898            0 : 
    5899            0 :         let expected_attached = locked.scheduler.expected_attached_shard_count();
    5900            0 :         let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count();
    5901            0 : 
    5902            0 :         let mut promoted_per_tenant: HashMap<TenantId, usize> = HashMap::new();
    5903            0 :         let mut plan = Vec::new();
    5904              : 
    5905            0 :         for (node_id, attached) in nodes_by_load {
    5906            0 :             let available = locked
    5907            0 :                 .nodes
    5908            0 :                 .get(&node_id)
    5909            0 :                 .map_or(false, |n| n.is_available());
    5910            0 :             if !available {
    5911            0 :                 continue;
    5912            0 :             }
    5913            0 : 
    5914            0 :             if plan.len() >= fill_requirement
    5915            0 :                 || tids_by_node.is_empty()
    5916            0 :                 || attached <= expected_attached
    5917              :             {
    5918            0 :                 break;
    5919            0 :             }
    5920            0 : 
    5921            0 :             let can_take = attached - expected_attached;
    5922            0 :             let needed = fill_requirement - plan.len();
    5923            0 :             let mut take = std::cmp::min(can_take, needed);
    5924            0 : 
    5925            0 :             let mut remove_node = false;
    5926            0 :             while take > 0 {
    5927            0 :                 match tids_by_node.get_mut(&node_id) {
    5928            0 :                     Some(tids) => match tids.pop() {
    5929            0 :                         Some(tid) => {
    5930            0 :                             let max_promote_for_tenant = std::cmp::max(
    5931            0 :                                 tid.shard_count.count() as usize / locked.nodes.len(),
    5932            0 :                                 1,
    5933            0 :                             );
    5934            0 :                             let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default();
    5935            0 :                             if *promoted < max_promote_for_tenant {
    5936            0 :                                 plan.push(tid);
    5937            0 :                                 *promoted += 1;
    5938            0 :                                 take -= 1;
    5939            0 :                             }
    5940              :                         }
    5941              :                         None => {
    5942            0 :                             remove_node = true;
    5943            0 :                             break;
    5944              :                         }
    5945              :                     },
    5946              :                     None => {
    5947            0 :                         break;
    5948              :                     }
    5949              :                 }
    5950              :             }
    5951              : 
    5952            0 :             if remove_node {
    5953            0 :                 tids_by_node.remove(&node_id);
    5954            0 :             }
    5955              :         }
    5956              : 
    5957            0 :         plan
    5958            0 :     }
    5959              : 
    5960              :     /// Fill a node by promoting its secondaries until the cluster is balanced
    5961              :     /// with regards to attached shard counts. Note that this operation only
    5962              :     /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`].
    5963              :     /// This is a long running operation and it should run as a separate Tokio task.
    5964            0 :     pub(crate) async fn fill_node(
    5965            0 :         &self,
    5966            0 :         node_id: NodeId,
    5967            0 :         cancel: CancellationToken,
    5968            0 :     ) -> Result<(), OperationError> {
    5969            0 :         // TODO(vlad): Currently this operates on the assumption that all
    5970            0 :         // secondaries are warm. This is not always true (e.g. we just migrated the
    5971            0 :         // tenant). Take that into consideration by checking the secondary status.
    5972            0 :         let mut tids_to_promote = self.fill_node_plan(node_id);
    5973            0 :         let mut waiters = Vec::new();
    5974              : 
    5975              :         // Execute the plan we've composed above. Before aplying each move from the plan,
    5976              :         // we validate to ensure that it has not gone stale in the meantime.
    5977            0 :         while !tids_to_promote.is_empty() {
    5978            0 :             if cancel.is_cancelled() {
    5979            0 :                 match self
    5980            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    5981            0 :                     .await
    5982              :                 {
    5983            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    5984            0 :                     Err(err) => {
    5985            0 :                         return Err(OperationError::FinalizeError(
    5986            0 :                             format!(
    5987            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    5988            0 :                                 node_id, err
    5989            0 :                             )
    5990            0 :                             .into(),
    5991            0 :                         ));
    5992              :                     }
    5993              :                 }
    5994            0 :             }
    5995            0 : 
    5996            0 :             {
    5997            0 :                 let mut locked = self.inner.write().unwrap();
    5998            0 :                 let (nodes, tenants, scheduler) = locked.parts_mut();
    5999              : 
    6000            0 :                 let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
    6001            0 :                     format!("node {node_id} was removed").into(),
    6002            0 :                 ))?;
    6003              : 
    6004            0 :                 let current_policy = node.get_scheduling();
    6005            0 :                 if !matches!(current_policy, NodeSchedulingPolicy::Filling) {
    6006              :                     // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
    6007              :                     // about it
    6008            0 :                     return Err(OperationError::NodeStateChanged(
    6009            0 :                         format!("node {node_id} changed state to {current_policy:?}").into(),
    6010            0 :                     ));
    6011            0 :                 }
    6012              : 
    6013            0 :                 while waiters.len() < MAX_RECONCILES_PER_OPERATION {
    6014            0 :                     if let Some(tid) = tids_to_promote.pop() {
    6015            0 :                         if let Some(tenant_shard) = tenants.get_mut(&tid) {
    6016              :                             // If the node being filled is not a secondary anymore,
    6017              :                             // skip the promotion.
    6018            0 :                             if !tenant_shard.intent.get_secondary().contains(&node_id) {
    6019            0 :                                 continue;
    6020            0 :                             }
    6021            0 : 
    6022            0 :                             let previously_attached_to = *tenant_shard.intent.get_attached();
    6023            0 :                             match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) {
    6024            0 :                                 Err(e) => {
    6025            0 :                                     tracing::warn!(
    6026            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    6027            0 :                                         "Scheduling error when filling pageserver {} : {e}", node_id
    6028              :                                     );
    6029              :                                 }
    6030              :                                 Ok(()) => {
    6031            0 :                                     tracing::info!(
    6032            0 :                                         tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
    6033            0 :                                         "Rescheduled shard while filling node {}: {:?} -> {}",
    6034              :                                         node_id,
    6035              :                                         previously_attached_to,
    6036              :                                         node_id
    6037              :                                     );
    6038              : 
    6039            0 :                                     if let Some(waiter) =
    6040            0 :                                         self.maybe_reconcile_shard(tenant_shard, nodes)
    6041            0 :                                     {
    6042            0 :                                         waiters.push(waiter);
    6043            0 :                                     }
    6044              :                                 }
    6045              :                             }
    6046            0 :                         }
    6047              :                     } else {
    6048            0 :                         break;
    6049              :                     }
    6050              :                 }
    6051              :             }
    6052              : 
    6053            0 :             waiters = self
    6054            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    6055            0 :                 .await;
    6056              :         }
    6057              : 
    6058            0 :         while !waiters.is_empty() {
    6059            0 :             if cancel.is_cancelled() {
    6060            0 :                 match self
    6061            0 :                     .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    6062            0 :                     .await
    6063              :                 {
    6064            0 :                     Ok(()) => return Err(OperationError::Cancelled),
    6065            0 :                     Err(err) => {
    6066            0 :                         return Err(OperationError::FinalizeError(
    6067            0 :                             format!(
    6068            0 :                                 "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
    6069            0 :                                 node_id, err
    6070            0 :                             )
    6071            0 :                             .into(),
    6072            0 :                         ));
    6073              :                     }
    6074              :                 }
    6075            0 :             }
    6076            0 : 
    6077            0 :             tracing::info!("Awaiting {} pending fill reconciliations", waiters.len());
    6078              : 
    6079            0 :             waiters = self
    6080            0 :                 .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
    6081            0 :                 .await;
    6082              :         }
    6083              : 
    6084            0 :         if let Err(err) = self
    6085            0 :             .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
    6086            0 :             .await
    6087              :         {
    6088              :             // This isn't a huge issue since the filling process starts upon request. However, it
    6089              :             // will prevent the next drain from starting. The only case in which this can fail
    6090              :             // is database unavailability. Such a case will require manual intervention.
    6091            0 :             return Err(OperationError::FinalizeError(
    6092            0 :                 format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}")
    6093            0 :                     .into(),
    6094            0 :             ));
    6095            0 :         }
    6096            0 : 
    6097            0 :         Ok(())
    6098            0 :     }
    6099              : 
    6100              :     /// Updates scrubber metadata health check results.
    6101            0 :     pub(crate) async fn metadata_health_update(
    6102            0 :         &self,
    6103            0 :         update_req: MetadataHealthUpdateRequest,
    6104            0 :     ) -> Result<(), ApiError> {
    6105            0 :         let now = chrono::offset::Utc::now();
    6106            0 :         let (healthy_records, unhealthy_records) = {
    6107            0 :             let locked = self.inner.read().unwrap();
    6108            0 :             let healthy_records = update_req
    6109            0 :                 .healthy_tenant_shards
    6110            0 :                 .into_iter()
    6111            0 :                 // Retain only health records associated with tenant shards managed by storage controller.
    6112            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    6113            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, true, now))
    6114            0 :                 .collect();
    6115            0 :             let unhealthy_records = update_req
    6116            0 :                 .unhealthy_tenant_shards
    6117            0 :                 .into_iter()
    6118            0 :                 .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
    6119            0 :                 .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, false, now))
    6120            0 :                 .collect();
    6121            0 : 
    6122            0 :             (healthy_records, unhealthy_records)
    6123            0 :         };
    6124            0 : 
    6125            0 :         self.persistence
    6126            0 :             .update_metadata_health_records(healthy_records, unhealthy_records, now)
    6127            0 :             .await?;
    6128            0 :         Ok(())
    6129            0 :     }
    6130              : 
    6131              :     /// Lists the tenant shards that has unhealthy metadata status.
    6132            0 :     pub(crate) async fn metadata_health_list_unhealthy(
    6133            0 :         &self,
    6134            0 :     ) -> Result<Vec<TenantShardId>, ApiError> {
    6135            0 :         let result = self
    6136            0 :             .persistence
    6137            0 :             .list_unhealthy_metadata_health_records()
    6138            0 :             .await?
    6139            0 :             .iter()
    6140            0 :             .map(|p| p.get_tenant_shard_id().unwrap())
    6141            0 :             .collect();
    6142            0 : 
    6143            0 :         Ok(result)
    6144            0 :     }
    6145              : 
    6146              :     /// Lists the tenant shards that have not been scrubbed for some duration.
    6147            0 :     pub(crate) async fn metadata_health_list_outdated(
    6148            0 :         &self,
    6149            0 :         not_scrubbed_for: Duration,
    6150            0 :     ) -> Result<Vec<MetadataHealthRecord>, ApiError> {
    6151            0 :         let earlier = chrono::offset::Utc::now() - not_scrubbed_for;
    6152            0 :         let result = self
    6153            0 :             .persistence
    6154            0 :             .list_outdated_metadata_health_records(earlier)
    6155            0 :             .await?
    6156            0 :             .into_iter()
    6157            0 :             .map(|record| record.into())
    6158            0 :             .collect();
    6159            0 :         Ok(result)
    6160            0 :     }
    6161              : 
    6162            0 :     pub(crate) fn get_leadership_status(&self) -> LeadershipStatus {
    6163            0 :         self.inner.read().unwrap().get_leadership_status()
    6164            0 :     }
    6165              : 
    6166            0 :     pub(crate) async fn step_down(&self) -> GlobalObservedState {
    6167            0 :         tracing::info!("Received step down request from peer");
    6168              : 
    6169            0 :         self.inner.write().unwrap().step_down();
    6170            0 :         // TODO: would it make sense to have a time-out for this?
    6171            0 :         self.stop_reconciliations(StopReconciliationsReason::SteppingDown)
    6172            0 :             .await;
    6173              : 
    6174            0 :         let mut global_observed = GlobalObservedState::default();
    6175            0 :         let locked = self.inner.read().unwrap();
    6176            0 :         for (tid, tenant_shard) in locked.tenants.iter() {
    6177            0 :             global_observed
    6178            0 :                 .0
    6179            0 :                 .insert(*tid, tenant_shard.observed.clone());
    6180            0 :         }
    6181              : 
    6182            0 :         global_observed
    6183            0 :     }
    6184              : }
        

Generated by: LCOV version 2.1-beta