LCOV - code coverage report
Current view: top level - storage_controller/src - tenant_shard.rs (source / functions) Coverage Total Hit
Test: 42f947419473a288706e86ecdf7c2863d760d5d7.info Lines: 63.9 % 894 571
Test Date: 2024-08-02 21:34:27 Functions: 40.7 % 91 37

            Line data    Source code
       1              : use std::{
       2              :     collections::{HashMap, HashSet},
       3              :     sync::Arc,
       4              :     time::Duration,
       5              : };
       6              : 
       7              : use crate::{
       8              :     metrics::{self, ReconcileCompleteLabelGroup, ReconcileOutcome},
       9              :     persistence::TenantShardPersistence,
      10              :     reconciler::ReconcileUnits,
      11              :     scheduler::{AffinityScore, MaySchedule, RefCountUpdate, ScheduleContext},
      12              :     service::ReconcileResultRequest,
      13              : };
      14              : use pageserver_api::controller_api::{
      15              :     NodeSchedulingPolicy, PlacementPolicy, ShardSchedulingPolicy,
      16              : };
      17              : use pageserver_api::{
      18              :     models::{LocationConfig, LocationConfigMode, TenantConfig},
      19              :     shard::{ShardIdentity, TenantShardId},
      20              : };
      21              : use serde::{Deserialize, Serialize};
      22              : use tokio::task::JoinHandle;
      23              : use tokio_util::sync::CancellationToken;
      24              : use tracing::{instrument, Instrument};
      25              : use utils::{
      26              :     generation::Generation,
      27              :     id::NodeId,
      28              :     seqwait::{SeqWait, SeqWaitError},
      29              :     sync::gate::GateGuard,
      30              : };
      31              : 
      32              : use crate::{
      33              :     compute_hook::ComputeHook,
      34              :     node::Node,
      35              :     persistence::{split_state::SplitState, Persistence},
      36              :     reconciler::{
      37              :         attached_location_conf, secondary_location_conf, ReconcileError, Reconciler, TargetState,
      38              :     },
      39              :     scheduler::{ScheduleError, Scheduler},
      40              :     service, Sequence,
      41              : };
      42              : 
      43              : /// Serialization helper
      44            0 : fn read_last_error<S, T>(v: &std::sync::Mutex<Option<T>>, serializer: S) -> Result<S::Ok, S::Error>
      45            0 : where
      46            0 :     S: serde::ser::Serializer,
      47            0 :     T: std::fmt::Display,
      48            0 : {
      49            0 :     serializer.collect_str(
      50            0 :         &v.lock()
      51            0 :             .unwrap()
      52            0 :             .as_ref()
      53            0 :             .map(|e| format!("{e}"))
      54            0 :             .unwrap_or("".to_string()),
      55            0 :     )
      56            0 : }
      57              : 
      58              : /// In-memory state for a particular tenant shard.
      59              : ///
      60              : /// This struct implement Serialize for debugging purposes, but is _not_ persisted
      61              : /// itself: see [`crate::persistence`] for the subset of tenant shard state that is persisted.
      62            0 : #[derive(Serialize)]
      63              : pub(crate) struct TenantShard {
      64              :     pub(crate) tenant_shard_id: TenantShardId,
      65              : 
      66              :     pub(crate) shard: ShardIdentity,
      67              : 
      68              :     // Runtime only: sequence used to coordinate when updating this object while
      69              :     // with background reconcilers may be running.  A reconciler runs to a particular
      70              :     // sequence.
      71              :     pub(crate) sequence: Sequence,
      72              : 
      73              :     // Latest generation number: next time we attach, increment this
      74              :     // and use the incremented number when attaching.
      75              :     //
      76              :     // None represents an incompletely onboarded tenant via the [`Service::location_config`]
      77              :     // API, where this tenant may only run in PlacementPolicy::Secondary.
      78              :     pub(crate) generation: Option<Generation>,
      79              : 
      80              :     // High level description of how the tenant should be set up.  Provided
      81              :     // externally.
      82              :     pub(crate) policy: PlacementPolicy,
      83              : 
      84              :     // Low level description of exactly which pageservers should fulfil
      85              :     // which role.  Generated by `Self::schedule`.
      86              :     pub(crate) intent: IntentState,
      87              : 
      88              :     // Low level description of how the tenant is configured on pageservers:
      89              :     // if this does not match `Self::intent` then the tenant needs reconciliation
      90              :     // with `Self::reconcile`.
      91              :     pub(crate) observed: ObservedState,
      92              : 
      93              :     // Tenant configuration, passed through opaquely to the pageserver.  Identical
      94              :     // for all shards in a tenant.
      95              :     pub(crate) config: TenantConfig,
      96              : 
      97              :     /// If a reconcile task is currently in flight, it may be joined here (it is
      98              :     /// only safe to join if either the result has been received or the reconciler's
      99              :     /// cancellation token has been fired)
     100              :     #[serde(skip)]
     101              :     pub(crate) reconciler: Option<ReconcilerHandle>,
     102              : 
     103              :     /// If a tenant is being split, then all shards with that TenantId will have a
     104              :     /// SplitState set, this acts as a guard against other operations such as background
     105              :     /// reconciliation, and timeline creation.
     106              :     pub(crate) splitting: SplitState,
     107              : 
     108              :     /// If a tenant was enqueued for later reconcile due to hitting concurrency limit, this flag
     109              :     /// is set. This flag is cleared when the tenant is popped off the delay queue.
     110              :     pub(crate) delayed_reconcile: bool,
     111              : 
     112              :     /// Optionally wait for reconciliation to complete up to a particular
     113              :     /// sequence number.
     114              :     #[serde(skip)]
     115              :     pub(crate) waiter: std::sync::Arc<SeqWait<Sequence, Sequence>>,
     116              : 
     117              :     /// Indicates sequence number for which we have encountered an error reconciling.  If
     118              :     /// this advances ahead of [`Self::waiter`] then a reconciliation error has occurred,
     119              :     /// and callers should stop waiting for `waiter` and propagate the error.
     120              :     #[serde(skip)]
     121              :     pub(crate) error_waiter: std::sync::Arc<SeqWait<Sequence, Sequence>>,
     122              : 
     123              :     /// The most recent error from a reconcile on this tenant.  This is a nested Arc
     124              :     /// because:
     125              :     ///  - ReconcileWaiters need to Arc-clone the overall object to read it later
     126              :     ///  - ReconcileWaitError needs to use an `Arc<ReconcileError>` because we can construct
     127              :     ///    many waiters for one shard, and the underlying error types are not Clone.
     128              :     ///
     129              :     /// TODO: generalize to an array of recent events
     130              :     /// TOOD: use a ArcSwap instead of mutex for faster reads?
     131              :     #[serde(serialize_with = "read_last_error")]
     132              :     pub(crate) last_error: std::sync::Arc<std::sync::Mutex<Option<Arc<ReconcileError>>>>,
     133              : 
     134              :     /// If we have a pending compute notification that for some reason we weren't able to send,
     135              :     /// set this to true. If this is set, calls to [`Self::get_reconcile_needed`] will return Yes
     136              :     /// and trigger a Reconciler run.  This is the mechanism by which compute notifications are included in the scope
     137              :     /// of state that we publish externally in an eventually consistent way.
     138              :     pub(crate) pending_compute_notification: bool,
     139              : 
     140              :     // Support/debug tool: if something is going wrong or flapping with scheduling, this may
     141              :     // be set to a non-active state to avoid making changes while the issue is fixed.
     142              :     scheduling_policy: ShardSchedulingPolicy,
     143              : }
     144              : 
     145              : #[derive(Default, Clone, Debug, Serialize)]
     146              : pub(crate) struct IntentState {
     147              :     attached: Option<NodeId>,
     148              :     secondary: Vec<NodeId>,
     149              : }
     150              : 
     151              : impl IntentState {
     152            4 :     pub(crate) fn new() -> Self {
     153            4 :         Self {
     154            4 :             attached: None,
     155            4 :             secondary: vec![],
     156            4 :         }
     157            4 :     }
     158            0 :     pub(crate) fn single(scheduler: &mut Scheduler, node_id: Option<NodeId>) -> Self {
     159            0 :         if let Some(node_id) = node_id {
     160            0 :             scheduler.update_node_ref_counts(node_id, RefCountUpdate::Attach);
     161            0 :         }
     162            0 :         Self {
     163            0 :             attached: node_id,
     164            0 :             secondary: vec![],
     165            0 :         }
     166            0 :     }
     167              : 
     168           26 :     pub(crate) fn set_attached(&mut self, scheduler: &mut Scheduler, new_attached: Option<NodeId>) {
     169           26 :         if self.attached != new_attached {
     170           26 :             if let Some(old_attached) = self.attached.take() {
     171            0 :                 scheduler.update_node_ref_counts(old_attached, RefCountUpdate::Detach);
     172           26 :             }
     173           26 :             if let Some(new_attached) = &new_attached {
     174           26 :                 scheduler.update_node_ref_counts(*new_attached, RefCountUpdate::Attach);
     175           26 :             }
     176           26 :             self.attached = new_attached;
     177            0 :         }
     178           26 :     }
     179              : 
     180              :     /// Like set_attached, but the node is from [`Self::secondary`].  This swaps the node from
     181              :     /// secondary to attached while maintaining the scheduler's reference counts.
     182           10 :     pub(crate) fn promote_attached(
     183           10 :         &mut self,
     184           10 :         scheduler: &mut Scheduler,
     185           10 :         promote_secondary: NodeId,
     186           10 :     ) {
     187           10 :         // If we call this with a node that isn't in secondary, it would cause incorrect
     188           10 :         // scheduler reference counting, since we assume the node is already referenced as a secondary.
     189           10 :         debug_assert!(self.secondary.contains(&promote_secondary));
     190              : 
     191           20 :         self.secondary.retain(|n| n != &promote_secondary);
     192           10 : 
     193           10 :         let demoted = self.attached;
     194           10 :         self.attached = Some(promote_secondary);
     195           10 : 
     196           10 :         scheduler.update_node_ref_counts(promote_secondary, RefCountUpdate::PromoteSecondary);
     197           10 :         if let Some(demoted) = demoted {
     198            0 :             scheduler.update_node_ref_counts(demoted, RefCountUpdate::DemoteAttached);
     199           10 :         }
     200           10 :     }
     201              : 
     202           34 :     pub(crate) fn push_secondary(&mut self, scheduler: &mut Scheduler, new_secondary: NodeId) {
     203           34 :         debug_assert!(!self.secondary.contains(&new_secondary));
     204           34 :         scheduler.update_node_ref_counts(new_secondary, RefCountUpdate::AddSecondary);
     205           34 :         self.secondary.push(new_secondary);
     206           34 :     }
     207              : 
     208              :     /// It is legal to call this with a node that is not currently a secondary: that is a no-op
     209           10 :     pub(crate) fn remove_secondary(&mut self, scheduler: &mut Scheduler, node_id: NodeId) {
     210           10 :         let index = self.secondary.iter().position(|n| *n == node_id);
     211           10 :         if let Some(index) = index {
     212           10 :             scheduler.update_node_ref_counts(node_id, RefCountUpdate::RemoveSecondary);
     213           10 :             self.secondary.remove(index);
     214           10 :         }
     215           10 :     }
     216              : 
     217           24 :     pub(crate) fn clear_secondary(&mut self, scheduler: &mut Scheduler) {
     218           24 :         for secondary in self.secondary.drain(..) {
     219           24 :             scheduler.update_node_ref_counts(secondary, RefCountUpdate::RemoveSecondary);
     220           24 :         }
     221           24 :     }
     222              : 
     223              :     /// Remove the last secondary node from the list of secondaries
     224            0 :     pub(crate) fn pop_secondary(&mut self, scheduler: &mut Scheduler) {
     225            0 :         if let Some(node_id) = self.secondary.pop() {
     226            0 :             scheduler.update_node_ref_counts(node_id, RefCountUpdate::RemoveSecondary);
     227            0 :         }
     228            0 :     }
     229              : 
     230           24 :     pub(crate) fn clear(&mut self, scheduler: &mut Scheduler) {
     231           24 :         if let Some(old_attached) = self.attached.take() {
     232           24 :             scheduler.update_node_ref_counts(old_attached, RefCountUpdate::Detach);
     233           24 :         }
     234              : 
     235           24 :         self.clear_secondary(scheduler);
     236           24 :     }
     237              : 
     238          140 :     pub(crate) fn all_pageservers(&self) -> Vec<NodeId> {
     239          140 :         let mut result = Vec::new();
     240          140 :         if let Some(p) = self.attached {
     241          136 :             result.push(p)
     242            4 :         }
     243              : 
     244          140 :         result.extend(self.secondary.iter().copied());
     245          140 : 
     246          140 :         result
     247          140 :     }
     248              : 
     249          118 :     pub(crate) fn get_attached(&self) -> &Option<NodeId> {
     250          118 :         &self.attached
     251          118 :     }
     252              : 
     253           32 :     pub(crate) fn get_secondary(&self) -> &Vec<NodeId> {
     254           32 :         &self.secondary
     255           32 :     }
     256              : 
     257              :     /// If the node is in use as the attached location, demote it into
     258              :     /// the list of secondary locations.  This is used when a node goes offline,
     259              :     /// and we want to use a different node for attachment, but not permanently
     260              :     /// forget the location on the offline node.
     261              :     ///
     262              :     /// Returns true if a change was made
     263           10 :     pub(crate) fn demote_attached(&mut self, scheduler: &mut Scheduler, node_id: NodeId) -> bool {
     264           10 :         if self.attached == Some(node_id) {
     265           10 :             self.attached = None;
     266           10 :             self.secondary.push(node_id);
     267           10 :             scheduler.update_node_ref_counts(node_id, RefCountUpdate::DemoteAttached);
     268           10 :             true
     269              :         } else {
     270            0 :             false
     271              :         }
     272           10 :     }
     273              : }
     274              : 
     275              : impl Drop for IntentState {
     276           26 :     fn drop(&mut self) {
     277           26 :         // Must clear before dropping, to avoid leaving stale refcounts in the Scheduler.
     278           26 :         // We do not check this while panicking, to avoid polluting unit test failures or
     279           26 :         // other assertions with this assertion's output.  It's still wrong to leak these,
     280           26 :         // but if we already have a panic then we don't need to independently flag this case.
     281           26 :         if !(std::thread::panicking()) {
     282           26 :             debug_assert!(self.attached.is_none() && self.secondary.is_empty());
     283            0 :         }
     284           24 :     }
     285              : }
     286              : 
     287            0 : #[derive(Default, Clone, Serialize, Deserialize, Debug)]
     288              : pub(crate) struct ObservedState {
     289              :     pub(crate) locations: HashMap<NodeId, ObservedStateLocation>,
     290              : }
     291              : 
     292              : /// Our latest knowledge of how this tenant is configured in the outside world.
     293              : ///
     294              : /// Meaning:
     295              : ///     * No instance of this type exists for a node: we are certain that we have nothing configured on that
     296              : ///       node for this shard.
     297              : ///     * Instance exists with conf==None: we *might* have some state on that node, but we don't know
     298              : ///       what it is (e.g. we failed partway through configuring it)
     299              : ///     * Instance exists with conf==Some: this tells us what we last successfully configured on this node,
     300              : ///       and that configuration will still be present unless something external interfered.
     301            0 : #[derive(Clone, Serialize, Deserialize, Debug)]
     302              : pub(crate) struct ObservedStateLocation {
     303              :     /// If None, it means we do not know the status of this shard's location on this node, but
     304              :     /// we know that we might have some state on this node.
     305              :     pub(crate) conf: Option<LocationConfig>,
     306              : }
     307              : pub(crate) struct ReconcilerWaiter {
     308              :     // For observability purposes, remember the ID of the shard we're
     309              :     // waiting for.
     310              :     pub(crate) tenant_shard_id: TenantShardId,
     311              : 
     312              :     seq_wait: std::sync::Arc<SeqWait<Sequence, Sequence>>,
     313              :     error_seq_wait: std::sync::Arc<SeqWait<Sequence, Sequence>>,
     314              :     error: std::sync::Arc<std::sync::Mutex<Option<Arc<ReconcileError>>>>,
     315              :     seq: Sequence,
     316              : }
     317              : 
     318              : pub(crate) enum ReconcilerStatus {
     319              :     Done,
     320              :     Failed,
     321              :     InProgress,
     322              : }
     323              : 
     324            0 : #[derive(thiserror::Error, Debug)]
     325              : pub(crate) enum ReconcileWaitError {
     326              :     #[error("Timeout waiting for shard {0}")]
     327              :     Timeout(TenantShardId),
     328              :     #[error("shutting down")]
     329              :     Shutdown,
     330              :     #[error("Reconcile error on shard {0}: {1}")]
     331              :     Failed(TenantShardId, Arc<ReconcileError>),
     332              : }
     333              : 
     334              : #[derive(Eq, PartialEq, Debug)]
     335              : pub(crate) struct ReplaceSecondary {
     336              :     old_node_id: NodeId,
     337              :     new_node_id: NodeId,
     338              : }
     339              : 
     340              : #[derive(Eq, PartialEq, Debug)]
     341              : pub(crate) struct MigrateAttachment {
     342              :     pub(crate) old_attached_node_id: NodeId,
     343              :     pub(crate) new_attached_node_id: NodeId,
     344              : }
     345              : 
     346              : #[derive(Eq, PartialEq, Debug)]
     347              : pub(crate) enum ScheduleOptimizationAction {
     348              :     // Replace one of our secondary locations with a different node
     349              :     ReplaceSecondary(ReplaceSecondary),
     350              :     // Migrate attachment to an existing secondary location
     351              :     MigrateAttachment(MigrateAttachment),
     352              : }
     353              : 
     354              : #[derive(Eq, PartialEq, Debug)]
     355              : pub(crate) struct ScheduleOptimization {
     356              :     // What was the reconcile sequence when we generated this optimization?  The optimization
     357              :     // should only be applied if the shard's sequence is still at this value, in case other changes
     358              :     // happened between planning the optimization and applying it.
     359              :     sequence: Sequence,
     360              : 
     361              :     pub(crate) action: ScheduleOptimizationAction,
     362              : }
     363              : 
     364              : impl ReconcilerWaiter {
     365            0 :     pub(crate) async fn wait_timeout(&self, timeout: Duration) -> Result<(), ReconcileWaitError> {
     366              :         tokio::select! {
     367              :             result = self.seq_wait.wait_for_timeout(self.seq, timeout)=> {
     368            0 :                 result.map_err(|e| match e {
     369            0 :                     SeqWaitError::Timeout => ReconcileWaitError::Timeout(self.tenant_shard_id),
     370            0 :                     SeqWaitError::Shutdown => ReconcileWaitError::Shutdown
     371            0 :                 })?;
     372              :             },
     373              :             result = self.error_seq_wait.wait_for(self.seq) => {
     374            0 :                 result.map_err(|e| match e {
     375            0 :                     SeqWaitError::Shutdown => ReconcileWaitError::Shutdown,
     376            0 :                     SeqWaitError::Timeout => unreachable!()
     377            0 :                 })?;
     378              : 
     379              :                 return Err(ReconcileWaitError::Failed(self.tenant_shard_id,
     380              :                     self.error.lock().unwrap().clone().expect("If error_seq_wait was advanced error was set").clone()))
     381              :             }
     382              :         }
     383              : 
     384            0 :         Ok(())
     385            0 :     }
     386              : 
     387            0 :     pub(crate) fn get_status(&self) -> ReconcilerStatus {
     388            0 :         if self.seq_wait.would_wait_for(self.seq).is_ok() {
     389            0 :             ReconcilerStatus::Done
     390            0 :         } else if self.error_seq_wait.would_wait_for(self.seq).is_ok() {
     391            0 :             ReconcilerStatus::Failed
     392              :         } else {
     393            0 :             ReconcilerStatus::InProgress
     394              :         }
     395            0 :     }
     396              : }
     397              : 
     398              : /// Having spawned a reconciler task, the tenant shard's state will carry enough
     399              : /// information to optionally cancel & await it later.
     400              : pub(crate) struct ReconcilerHandle {
     401              :     sequence: Sequence,
     402              :     handle: JoinHandle<()>,
     403              :     cancel: CancellationToken,
     404              : }
     405              : 
     406              : pub(crate) enum ReconcileNeeded {
     407              :     /// shard either doesn't need reconciliation, or is forbidden from spawning a reconciler
     408              :     /// in its current state (e.g. shard split in progress, or ShardSchedulingPolicy forbids it)
     409              :     No,
     410              :     /// shard has a reconciler running, and its intent hasn't changed since that one was
     411              :     /// spawned: wait for the existing reconciler rather than spawning a new one.
     412              :     WaitExisting(ReconcilerWaiter),
     413              :     /// shard needs reconciliation: call into [`TenantShard::spawn_reconciler`]
     414              :     Yes,
     415              : }
     416              : 
     417              : /// When a reconcile task completes, it sends this result object
     418              : /// to be applied to the primary TenantShard.
     419              : pub(crate) struct ReconcileResult {
     420              :     pub(crate) sequence: Sequence,
     421              :     /// On errors, `observed` should be treated as an incompleted description
     422              :     /// of state (i.e. any nodes present in the result should override nodes
     423              :     /// present in the parent tenant state, but any unmentioned nodes should
     424              :     /// not be removed from parent tenant state)
     425              :     pub(crate) result: Result<(), ReconcileError>,
     426              : 
     427              :     pub(crate) tenant_shard_id: TenantShardId,
     428              :     pub(crate) generation: Option<Generation>,
     429              :     pub(crate) observed: ObservedState,
     430              : 
     431              :     /// Set [`TenantShard::pending_compute_notification`] from this flag
     432              :     pub(crate) pending_compute_notification: bool,
     433              : }
     434              : 
     435              : impl ObservedState {
     436            0 :     pub(crate) fn new() -> Self {
     437            0 :         Self {
     438            0 :             locations: HashMap::new(),
     439            0 :         }
     440            0 :     }
     441              : }
     442              : 
     443              : impl TenantShard {
     444           22 :     pub(crate) fn new(
     445           22 :         tenant_shard_id: TenantShardId,
     446           22 :         shard: ShardIdentity,
     447           22 :         policy: PlacementPolicy,
     448           22 :     ) -> Self {
     449           22 :         Self {
     450           22 :             tenant_shard_id,
     451           22 :             policy,
     452           22 :             intent: IntentState::default(),
     453           22 :             generation: Some(Generation::new(0)),
     454           22 :             shard,
     455           22 :             observed: ObservedState::default(),
     456           22 :             config: TenantConfig::default(),
     457           22 :             reconciler: None,
     458           22 :             splitting: SplitState::Idle,
     459           22 :             sequence: Sequence(1),
     460           22 :             delayed_reconcile: false,
     461           22 :             waiter: Arc::new(SeqWait::new(Sequence(0))),
     462           22 :             error_waiter: Arc::new(SeqWait::new(Sequence(0))),
     463           22 :             last_error: Arc::default(),
     464           22 :             pending_compute_notification: false,
     465           22 :             scheduling_policy: ShardSchedulingPolicy::default(),
     466           22 :         }
     467           22 :     }
     468              : 
     469              :     /// For use on startup when learning state from pageservers: generate my [`IntentState`] from my
     470              :     /// [`ObservedState`], even if it violates my [`PlacementPolicy`].  Call [`Self::schedule`] next,
     471              :     /// to get an intent state that complies with placement policy.  The overall goal is to do scheduling
     472              :     /// in a way that makes use of any configured locations that already exist in the outside world.
     473            2 :     pub(crate) fn intent_from_observed(&mut self, scheduler: &mut Scheduler) {
     474            2 :         // Choose an attached location by filtering observed locations, and then sorting to get the highest
     475            2 :         // generation
     476            2 :         let mut attached_locs = self
     477            2 :             .observed
     478            2 :             .locations
     479            2 :             .iter()
     480            4 :             .filter_map(|(node_id, l)| {
     481            4 :                 if let Some(conf) = &l.conf {
     482            4 :                     if conf.mode == LocationConfigMode::AttachedMulti
     483            2 :                         || conf.mode == LocationConfigMode::AttachedSingle
     484            2 :                         || conf.mode == LocationConfigMode::AttachedStale
     485              :                     {
     486            4 :                         Some((node_id, conf.generation))
     487              :                     } else {
     488            0 :                         None
     489              :                     }
     490              :                 } else {
     491            0 :                     None
     492              :                 }
     493            4 :             })
     494            2 :             .collect::<Vec<_>>();
     495            2 : 
     496            4 :         attached_locs.sort_by_key(|i| i.1);
     497            2 :         if let Some((node_id, _gen)) = attached_locs.into_iter().last() {
     498            2 :             self.intent.set_attached(scheduler, Some(*node_id));
     499            2 :         }
     500              : 
     501              :         // All remaining observed locations generate secondary intents.  This includes None
     502              :         // observations, as these may well have some local content on disk that is usable (this
     503              :         // is an edge case that might occur if we restarted during a migration or other change)
     504              :         //
     505              :         // We may leave intent.attached empty if we didn't find any attached locations: [`Self::schedule`]
     506              :         // will take care of promoting one of these secondaries to be attached.
     507            4 :         self.observed.locations.keys().for_each(|node_id| {
     508            4 :             if Some(*node_id) != self.intent.attached {
     509            2 :                 self.intent.push_secondary(scheduler, *node_id);
     510            2 :             }
     511            4 :         });
     512            2 :     }
     513              : 
     514              :     /// Part of [`Self::schedule`] that is used to choose exactly one node to act as the
     515              :     /// attached pageserver for a shard.
     516              :     ///
     517              :     /// Returns whether we modified it, and the NodeId selected.
     518           14 :     fn schedule_attached(
     519           14 :         &mut self,
     520           14 :         scheduler: &mut Scheduler,
     521           14 :         context: &ScheduleContext,
     522           14 :     ) -> Result<(bool, NodeId), ScheduleError> {
     523              :         // No work to do if we already have an attached tenant
     524           14 :         if let Some(node_id) = self.intent.attached {
     525            0 :             return Ok((false, node_id));
     526           14 :         }
     527              : 
     528           14 :         if let Some(promote_secondary) = scheduler.node_preferred(&self.intent.secondary) {
     529              :             // Promote a secondary
     530            2 :             tracing::debug!("Promoted secondary {} to attached", promote_secondary);
     531            2 :             self.intent.promote_attached(scheduler, promote_secondary);
     532            2 :             Ok((true, promote_secondary))
     533              :         } else {
     534              :             // Pick a fresh node: either we had no secondaries or none were schedulable
     535           12 :             let node_id = scheduler.schedule_shard(&self.intent.secondary, context)?;
     536           12 :             tracing::debug!("Selected {} as attached", node_id);
     537           12 :             self.intent.set_attached(scheduler, Some(node_id));
     538           12 :             Ok((true, node_id))
     539              :         }
     540           14 :     }
     541              : 
     542           16 :     pub(crate) fn schedule(
     543           16 :         &mut self,
     544           16 :         scheduler: &mut Scheduler,
     545           16 :         context: &mut ScheduleContext,
     546           16 :     ) -> Result<(), ScheduleError> {
     547           16 :         let r = self.do_schedule(scheduler, context);
     548           16 : 
     549           16 :         context.avoid(&self.intent.all_pageservers());
     550           16 :         if let Some(attached) = self.intent.get_attached() {
     551           14 :             context.push_attached(*attached);
     552           14 :         }
     553              : 
     554           16 :         r
     555           16 :     }
     556              : 
     557           16 :     pub(crate) fn do_schedule(
     558           16 :         &mut self,
     559           16 :         scheduler: &mut Scheduler,
     560           16 :         context: &ScheduleContext,
     561           16 :     ) -> Result<(), ScheduleError> {
     562           16 :         // TODO: before scheduling new nodes, check if any existing content in
     563           16 :         // self.intent refers to pageservers that are offline, and pick other
     564           16 :         // pageservers if so.
     565           16 : 
     566           16 :         // TODO: respect the splitting bit on tenants: if they are currently splitting then we may not
     567           16 :         // change their attach location.
     568           16 : 
     569           16 :         match self.scheduling_policy {
     570           14 :             ShardSchedulingPolicy::Active | ShardSchedulingPolicy::Essential => {}
     571              :             ShardSchedulingPolicy::Pause | ShardSchedulingPolicy::Stop => {
     572              :                 // Warn to make it obvious why other things aren't happening/working, if we skip scheduling
     573            2 :                 tracing::warn!(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug(),
     574            0 :                     "Scheduling is disabled by policy {:?}", self.scheduling_policy);
     575            2 :                 return Ok(());
     576              :             }
     577              :         }
     578              : 
     579              :         // Build the set of pageservers already in use by this tenant, to avoid scheduling
     580              :         // more work on the same pageservers we're already using.
     581           14 :         let mut modified = false;
     582           14 : 
     583           14 :         // Add/remove nodes to fulfil policy
     584           14 :         use PlacementPolicy::*;
     585           14 :         match self.policy {
     586           14 :             Attached(secondary_count) => {
     587           14 :                 let retain_secondaries = if self.intent.attached.is_none()
     588           14 :                     && scheduler.node_preferred(&self.intent.secondary).is_some()
     589              :                 {
     590              :                     // If we have no attached, and one of the secondaries is elegible to be promoted, retain
     591              :                     // one more secondary than we usually would, as one of them will become attached futher down this function.
     592            2 :                     secondary_count + 1
     593              :                 } else {
     594           12 :                     secondary_count
     595              :                 };
     596              : 
     597           14 :                 while self.intent.secondary.len() > retain_secondaries {
     598            0 :                     // We have no particular preference for one secondary location over another: just
     599            0 :                     // arbitrarily drop from the end
     600            0 :                     self.intent.pop_secondary(scheduler);
     601            0 :                     modified = true;
     602            0 :                 }
     603              : 
     604              :                 // Should have exactly one attached, and N secondaries
     605           14 :                 let (modified_attached, attached_node_id) =
     606           14 :                     self.schedule_attached(scheduler, context)?;
     607           14 :                 modified |= modified_attached;
     608           14 : 
     609           14 :                 let mut used_pageservers = vec![attached_node_id];
     610           26 :                 while self.intent.secondary.len() < secondary_count {
     611           12 :                     let node_id = scheduler.schedule_shard(&used_pageservers, context)?;
     612           12 :                     self.intent.push_secondary(scheduler, node_id);
     613           12 :                     used_pageservers.push(node_id);
     614           12 :                     modified = true;
     615              :                 }
     616              :             }
     617              :             Secondary => {
     618            0 :                 if let Some(node_id) = self.intent.get_attached() {
     619            0 :                     // Populate secondary by demoting the attached node
     620            0 :                     self.intent.demote_attached(scheduler, *node_id);
     621            0 :                     modified = true;
     622            0 :                 } else if self.intent.secondary.is_empty() {
     623            0 :                     // Populate secondary by scheduling a fresh node
     624            0 :                     let node_id = scheduler.schedule_shard(&[], context)?;
     625            0 :                     self.intent.push_secondary(scheduler, node_id);
     626            0 :                     modified = true;
     627            0 :                 }
     628            0 :                 while self.intent.secondary.len() > 1 {
     629            0 :                     // We have no particular preference for one secondary location over another: just
     630            0 :                     // arbitrarily drop from the end
     631            0 :                     self.intent.pop_secondary(scheduler);
     632            0 :                     modified = true;
     633            0 :                 }
     634              :             }
     635              :             Detached => {
     636              :                 // Never add locations in this mode
     637            0 :                 if self.intent.get_attached().is_some() || !self.intent.get_secondary().is_empty() {
     638            0 :                     self.intent.clear(scheduler);
     639            0 :                     modified = true;
     640            0 :                 }
     641              :             }
     642              :         }
     643              : 
     644           14 :         if modified {
     645           14 :             self.sequence.0 += 1;
     646           14 :         }
     647              : 
     648           14 :         Ok(())
     649           16 :     }
     650              : 
     651              :     /// Reschedule this tenant shard to one of its secondary locations. Returns a scheduling error
     652              :     /// if the swap is not possible and leaves the intent state in its original state.
     653              :     ///
     654              :     /// Arguments:
     655              :     /// `attached_to`: the currently attached location matching the intent state (may be None if the
     656              :     /// shard is not attached)
     657              :     /// `promote_to`: an optional secondary location of this tenant shard. If set to None, we ask
     658              :     /// the scheduler to recommend a node
     659            0 :     pub(crate) fn reschedule_to_secondary(
     660            0 :         &mut self,
     661            0 :         promote_to: Option<NodeId>,
     662            0 :         scheduler: &mut Scheduler,
     663            0 :     ) -> Result<(), ScheduleError> {
     664            0 :         let promote_to = match promote_to {
     665            0 :             Some(node) => node,
     666            0 :             None => match scheduler.node_preferred(self.intent.get_secondary()) {
     667            0 :                 Some(node) => node,
     668              :                 None => {
     669            0 :                     return Err(ScheduleError::ImpossibleConstraint);
     670              :                 }
     671              :             },
     672              :         };
     673              : 
     674            0 :         assert!(self.intent.get_secondary().contains(&promote_to));
     675              : 
     676            0 :         if let Some(node) = self.intent.get_attached() {
     677            0 :             let demoted = self.intent.demote_attached(scheduler, *node);
     678            0 :             if !demoted {
     679            0 :                 return Err(ScheduleError::ImpossibleConstraint);
     680            0 :             }
     681            0 :         }
     682              : 
     683            0 :         self.intent.promote_attached(scheduler, promote_to);
     684            0 : 
     685            0 :         // Increment the sequence number for the edge case where a
     686            0 :         // reconciler is already running to avoid waiting on the
     687            0 :         // current reconcile instead of spawning a new one.
     688            0 :         self.sequence = self.sequence.next();
     689            0 : 
     690            0 :         Ok(())
     691            0 :     }
     692              : 
     693              :     /// Optimize attachments: if a shard has a secondary location that is preferable to
     694              :     /// its primary location based on soft constraints, switch that secondary location
     695              :     /// to be attached.
     696           30 :     #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
     697              :     pub(crate) fn optimize_attachment(
     698              :         &self,
     699              :         nodes: &HashMap<NodeId, Node>,
     700              :         schedule_context: &ScheduleContext,
     701              :     ) -> Option<ScheduleOptimization> {
     702              :         let attached = (*self.intent.get_attached())?;
     703              :         if self.intent.secondary.is_empty() {
     704              :             // We can only do useful work if we have both attached and secondary locations: this
     705              :             // function doesn't schedule new locations, only swaps between attached and secondaries.
     706              :             return None;
     707              :         }
     708              : 
     709              :         let current_affinity_score = schedule_context.get_node_affinity(attached);
     710              :         let current_attachment_count = schedule_context.get_node_attachments(attached);
     711              : 
     712              :         // Generate score for each node, dropping any un-schedulable nodes.
     713              :         let all_pageservers = self.intent.all_pageservers();
     714              :         let mut scores = all_pageservers
     715              :             .iter()
     716           60 :             .flat_map(|node_id| {
     717           60 :                 let node = nodes.get(node_id);
     718           60 :                 if node.is_none() {
     719            0 :                     None
     720           60 :                 } else if matches!(
     721           60 :                     node.unwrap().get_scheduling(),
     722              :                     NodeSchedulingPolicy::Filling
     723              :                 ) {
     724              :                     // If the node is currently filling, don't count it as a candidate to avoid,
     725              :                     // racing with the background fill.
     726            0 :                     None
     727           60 :                 } else if matches!(node.unwrap().may_schedule(), MaySchedule::No) {
     728            0 :                     None
     729              :                 } else {
     730           60 :                     let affinity_score = schedule_context.get_node_affinity(*node_id);
     731           60 :                     let attachment_count = schedule_context.get_node_attachments(*node_id);
     732           60 :                     Some((*node_id, affinity_score, attachment_count))
     733              :                 }
     734           60 :             })
     735              :             .collect::<Vec<_>>();
     736              : 
     737              :         // Sort precedence:
     738              :         //  1st - prefer nodes with the lowest total affinity score
     739              :         //  2nd - prefer nodes with the lowest number of attachments in this context
     740              :         //  3rd - if all else is equal, sort by node ID for determinism in tests.
     741           60 :         scores.sort_by_key(|i| (i.1, i.2, i.0));
     742              : 
     743              :         if let Some((preferred_node, preferred_affinity_score, preferred_attachment_count)) =
     744              :             scores.first()
     745              :         {
     746              :             if attached != *preferred_node {
     747              :                 // The best alternative must be more than 1 better than us, otherwise we could end
     748              :                 // up flapping back next time we're called (e.g. there's no point migrating from
     749              :                 // a location with score 1 to a score zero, because on next location the situation
     750              :                 // would be the same, but in reverse).
     751              :                 if current_affinity_score > *preferred_affinity_score + AffinityScore(1)
     752              :                     || current_attachment_count > *preferred_attachment_count + 1
     753              :                 {
     754              :                     tracing::info!(
     755              :                         "Identified optimization: migrate attachment {attached}->{preferred_node} (secondaries {:?})",
     756              :                         self.intent.get_secondary()
     757              :                     );
     758              :                     return Some(ScheduleOptimization {
     759              :                         sequence: self.sequence,
     760              :                         action: ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
     761              :                             old_attached_node_id: attached,
     762              :                             new_attached_node_id: *preferred_node,
     763              :                         }),
     764              :                     });
     765              :                 }
     766              :             } else {
     767              :                 tracing::debug!(
     768              :                     "Node {} is already preferred (score {:?})",
     769              :                     preferred_node,
     770              :                     preferred_affinity_score
     771              :                 );
     772              :             }
     773              :         }
     774              : 
     775              :         // Fall-through: we didn't find an optimization
     776              :         None
     777              :     }
     778              : 
     779           24 :     #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
     780              :     pub(crate) fn optimize_secondary(
     781              :         &self,
     782              :         scheduler: &Scheduler,
     783              :         schedule_context: &ScheduleContext,
     784              :     ) -> Option<ScheduleOptimization> {
     785              :         if self.intent.secondary.is_empty() {
     786              :             // We can only do useful work if we have both attached and secondary locations: this
     787              :             // function doesn't schedule new locations, only swaps between attached and secondaries.
     788              :             return None;
     789              :         }
     790              : 
     791              :         for secondary in self.intent.get_secondary() {
     792              :             let Some(affinity_score) = schedule_context.nodes.get(secondary) else {
     793              :                 // We're already on a node unaffected any affinity constraints,
     794              :                 // so we won't change it.
     795              :                 continue;
     796              :             };
     797              : 
     798              :             // Let the scheduler suggest a node, where it would put us if we were scheduling afresh
     799              :             // This implicitly limits the choice to nodes that are available, and prefers nodes
     800              :             // with lower utilization.
     801              :             let Ok(candidate_node) =
     802              :                 scheduler.schedule_shard(&self.intent.all_pageservers(), schedule_context)
     803              :             else {
     804              :                 // A scheduling error means we have no possible candidate replacements
     805              :                 continue;
     806              :             };
     807              : 
     808              :             let candidate_affinity_score = schedule_context
     809              :                 .nodes
     810              :                 .get(&candidate_node)
     811              :                 .unwrap_or(&AffinityScore::FREE);
     812              : 
     813              :             // The best alternative must be more than 1 better than us, otherwise we could end
     814              :             // up flapping back next time we're called.
     815              :             if *candidate_affinity_score + AffinityScore(1) < *affinity_score {
     816              :                 // If some other node is available and has a lower score than this node, then
     817              :                 // that other node is a good place to migrate to.
     818              :                 tracing::info!(
     819              :                     "Identified optimization: replace secondary {secondary}->{candidate_node} (current secondaries {:?})",
     820              :                     self.intent.get_secondary()
     821              :                 );
     822              :                 return Some(ScheduleOptimization {
     823              :                     sequence: self.sequence,
     824              :                     action: ScheduleOptimizationAction::ReplaceSecondary(ReplaceSecondary {
     825              :                         old_node_id: *secondary,
     826              :                         new_node_id: candidate_node,
     827              :                     }),
     828              :                 });
     829              :             }
     830              :         }
     831              : 
     832              :         None
     833              :     }
     834              : 
     835              :     /// Return true if the optimization was really applied: it will not be applied if the optimization's
     836              :     /// sequence is behind this tenant shard's
     837           18 :     pub(crate) fn apply_optimization(
     838           18 :         &mut self,
     839           18 :         scheduler: &mut Scheduler,
     840           18 :         optimization: ScheduleOptimization,
     841           18 :     ) -> bool {
     842           18 :         if optimization.sequence != self.sequence {
     843            0 :             return false;
     844           18 :         }
     845           18 : 
     846           18 :         metrics::METRICS_REGISTRY
     847           18 :             .metrics_group
     848           18 :             .storage_controller_schedule_optimization
     849           18 :             .inc();
     850           18 : 
     851           18 :         match optimization.action {
     852              :             ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
     853            8 :                 old_attached_node_id,
     854            8 :                 new_attached_node_id,
     855            8 :             }) => {
     856            8 :                 self.intent.demote_attached(scheduler, old_attached_node_id);
     857            8 :                 self.intent
     858            8 :                     .promote_attached(scheduler, new_attached_node_id);
     859            8 :             }
     860              :             ScheduleOptimizationAction::ReplaceSecondary(ReplaceSecondary {
     861           10 :                 old_node_id,
     862           10 :                 new_node_id,
     863           10 :             }) => {
     864           10 :                 self.intent.remove_secondary(scheduler, old_node_id);
     865           10 :                 self.intent.push_secondary(scheduler, new_node_id);
     866           10 :             }
     867              :         }
     868              : 
     869           18 :         true
     870           18 :     }
     871              : 
     872              :     /// Query whether the tenant's observed state for attached node matches its intent state, and if so,
     873              :     /// yield the node ID.  This is appropriate for emitting compute hook notifications: we are checking that
     874              :     /// the node in question is not only where we intend to attach, but that the tenant is indeed already attached there.
     875              :     ///
     876              :     /// Reconciliation may still be needed for other aspects of state such as secondaries (see [`Self::dirty`]): this
     877              :     /// funciton should not be used to decide whether to reconcile.
     878            0 :     pub(crate) fn stably_attached(&self) -> Option<NodeId> {
     879            0 :         if let Some(attach_intent) = self.intent.attached {
     880            0 :             match self.observed.locations.get(&attach_intent) {
     881            0 :                 Some(loc) => match &loc.conf {
     882            0 :                     Some(conf) => match conf.mode {
     883              :                         LocationConfigMode::AttachedMulti
     884              :                         | LocationConfigMode::AttachedSingle
     885              :                         | LocationConfigMode::AttachedStale => {
     886              :                             // Our intent and observed state agree that this node is in an attached state.
     887            0 :                             Some(attach_intent)
     888              :                         }
     889              :                         // Our observed config is not an attached state
     890            0 :                         _ => None,
     891              :                     },
     892              :                     // Our observed state is None, i.e. in flux
     893            0 :                     None => None,
     894              :                 },
     895              :                 // We have no observed state for this node
     896            0 :                 None => None,
     897              :             }
     898              :         } else {
     899              :             // Our intent is not to attach
     900            0 :             None
     901              :         }
     902            0 :     }
     903              : 
     904            0 :     fn dirty(&self, nodes: &Arc<HashMap<NodeId, Node>>) -> bool {
     905            0 :         let mut dirty_nodes = HashSet::new();
     906              : 
     907            0 :         if let Some(node_id) = self.intent.attached {
     908              :             // Maybe panic: it is a severe bug if we try to attach while generation is null.
     909            0 :             let generation = self
     910            0 :                 .generation
     911            0 :                 .expect("Attempted to enter attached state without a generation");
     912            0 : 
     913            0 :             let wanted_conf =
     914            0 :                 attached_location_conf(generation, &self.shard, &self.config, &self.policy);
     915            0 :             match self.observed.locations.get(&node_id) {
     916            0 :                 Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {}
     917            0 :                 Some(_) | None => {
     918            0 :                     dirty_nodes.insert(node_id);
     919            0 :                 }
     920              :             }
     921            0 :         }
     922              : 
     923            0 :         for node_id in &self.intent.secondary {
     924            0 :             let wanted_conf = secondary_location_conf(&self.shard, &self.config);
     925            0 :             match self.observed.locations.get(node_id) {
     926            0 :                 Some(conf) if conf.conf.as_ref() == Some(&wanted_conf) => {}
     927            0 :                 Some(_) | None => {
     928            0 :                     dirty_nodes.insert(*node_id);
     929            0 :                 }
     930              :             }
     931              :         }
     932              : 
     933            0 :         for node_id in self.observed.locations.keys() {
     934            0 :             if self.intent.attached != Some(*node_id) && !self.intent.secondary.contains(node_id) {
     935            0 :                 // We have observed state that isn't part of our intent: need to clean it up.
     936            0 :                 dirty_nodes.insert(*node_id);
     937            0 :             }
     938              :         }
     939              : 
     940            0 :         dirty_nodes.retain(|node_id| {
     941            0 :             nodes
     942            0 :                 .get(node_id)
     943            0 :                 .map(|n| n.is_available())
     944            0 :                 .unwrap_or(false)
     945            0 :         });
     946            0 : 
     947            0 :         !dirty_nodes.is_empty()
     948            0 :     }
     949              : 
     950              :     #[allow(clippy::too_many_arguments)]
     951            0 :     #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
     952              :     pub(crate) fn get_reconcile_needed(
     953              :         &mut self,
     954              :         pageservers: &Arc<HashMap<NodeId, Node>>,
     955              :     ) -> ReconcileNeeded {
     956              :         // If there are any ambiguous observed states, and the nodes they refer to are available,
     957              :         // we should reconcile to clean them up.
     958              :         let mut dirty_observed = false;
     959              :         for (node_id, observed_loc) in &self.observed.locations {
     960              :             let node = pageservers
     961              :                 .get(node_id)
     962              :                 .expect("Nodes may not be removed while referenced");
     963              :             if observed_loc.conf.is_none() && node.is_available() {
     964              :                 dirty_observed = true;
     965              :                 break;
     966              :             }
     967              :         }
     968              : 
     969              :         let active_nodes_dirty = self.dirty(pageservers);
     970              : 
     971              :         // Even if there is no pageserver work to be done, if we have a pending notification to computes,
     972              :         // wake up a reconciler to send it.
     973              :         let do_reconcile =
     974              :             active_nodes_dirty || dirty_observed || self.pending_compute_notification;
     975              : 
     976              :         if !do_reconcile {
     977              :             tracing::debug!("Not dirty, no reconciliation needed.");
     978              :             return ReconcileNeeded::No;
     979              :         }
     980              : 
     981              :         // If we are currently splitting, then never start a reconciler task: the splitting logic
     982              :         // requires that shards are not interfered with while it runs. Do this check here rather than
     983              :         // up top, so that we only log this message if we would otherwise have done a reconciliation.
     984              :         if !matches!(self.splitting, SplitState::Idle) {
     985              :             tracing::info!("Refusing to reconcile, splitting in progress");
     986              :             return ReconcileNeeded::No;
     987              :         }
     988              : 
     989              :         // Reconcile already in flight for the current sequence?
     990              :         if let Some(handle) = &self.reconciler {
     991              :             if handle.sequence == self.sequence {
     992              :                 tracing::info!(
     993              :                     "Reconciliation already in progress for sequence {:?}",
     994              :                     self.sequence,
     995              :                 );
     996              :                 return ReconcileNeeded::WaitExisting(ReconcilerWaiter {
     997              :                     tenant_shard_id: self.tenant_shard_id,
     998              :                     seq_wait: self.waiter.clone(),
     999              :                     error_seq_wait: self.error_waiter.clone(),
    1000              :                     error: self.last_error.clone(),
    1001              :                     seq: self.sequence,
    1002              :                 });
    1003              :             }
    1004              :         }
    1005              : 
    1006              :         // Pre-checks done: finally check whether we may actually do the work
    1007              :         match self.scheduling_policy {
    1008              :             ShardSchedulingPolicy::Active
    1009              :             | ShardSchedulingPolicy::Essential
    1010              :             | ShardSchedulingPolicy::Pause => {}
    1011              :             ShardSchedulingPolicy::Stop => {
    1012              :                 // We only reach this point if there is work to do and we're going to skip
    1013              :                 // doing it: warn it obvious why this tenant isn't doing what it ought to.
    1014              :                 tracing::warn!("Skipping reconcile for policy {:?}", self.scheduling_policy);
    1015              :                 return ReconcileNeeded::No;
    1016              :             }
    1017              :         }
    1018              : 
    1019              :         ReconcileNeeded::Yes
    1020              :     }
    1021              : 
    1022              :     /// Ensure the sequence number is set to a value where waiting for this value will make us wait
    1023              :     /// for the next reconcile: i.e. it is ahead of all completed or running reconcilers.
    1024              :     ///
    1025              :     /// Constructing a ReconcilerWaiter with the resulting sequence number gives the property
    1026              :     /// that the waiter will not complete until some future Reconciler is constructed and run.
    1027            0 :     fn ensure_sequence_ahead(&mut self) {
    1028            0 :         // Find the highest sequence for which a Reconciler has previously run or is currently
    1029            0 :         // running
    1030            0 :         let max_seen = std::cmp::max(
    1031            0 :             self.reconciler
    1032            0 :                 .as_ref()
    1033            0 :                 .map(|r| r.sequence)
    1034            0 :                 .unwrap_or(Sequence(0)),
    1035            0 :             std::cmp::max(self.waiter.load(), self.error_waiter.load()),
    1036            0 :         );
    1037            0 : 
    1038            0 :         if self.sequence <= max_seen {
    1039            0 :             self.sequence = max_seen.next();
    1040            0 :         }
    1041            0 :     }
    1042              : 
    1043              :     /// Create a waiter that will wait for some future Reconciler that hasn't been spawned yet.
    1044              :     ///
    1045              :     /// This is appropriate when you can't spawn a reconciler (e.g. due to resource limits), but
    1046              :     /// you would like to wait on the next reconciler that gets spawned in the background.
    1047            0 :     pub(crate) fn future_reconcile_waiter(&mut self) -> ReconcilerWaiter {
    1048            0 :         self.ensure_sequence_ahead();
    1049            0 : 
    1050            0 :         ReconcilerWaiter {
    1051            0 :             tenant_shard_id: self.tenant_shard_id,
    1052            0 :             seq_wait: self.waiter.clone(),
    1053            0 :             error_seq_wait: self.error_waiter.clone(),
    1054            0 :             error: self.last_error.clone(),
    1055            0 :             seq: self.sequence,
    1056            0 :         }
    1057            0 :     }
    1058              : 
    1059              :     #[allow(clippy::too_many_arguments)]
    1060            0 :     #[instrument(skip_all, fields(tenant_id=%self.tenant_shard_id.tenant_id, shard_id=%self.tenant_shard_id.shard_slug()))]
    1061              :     pub(crate) fn spawn_reconciler(
    1062              :         &mut self,
    1063              :         result_tx: &tokio::sync::mpsc::UnboundedSender<ReconcileResultRequest>,
    1064              :         pageservers: &Arc<HashMap<NodeId, Node>>,
    1065              :         compute_hook: &Arc<ComputeHook>,
    1066              :         service_config: &service::Config,
    1067              :         persistence: &Arc<Persistence>,
    1068              :         units: ReconcileUnits,
    1069              :         gate_guard: GateGuard,
    1070              :         cancel: &CancellationToken,
    1071              :     ) -> Option<ReconcilerWaiter> {
    1072              :         // Reconcile in flight for a stale sequence?  Our sequence's task will wait for it before
    1073              :         // doing our sequence's work.
    1074              :         let old_handle = self.reconciler.take();
    1075              : 
    1076              :         // Build list of nodes from which the reconciler should detach
    1077              :         let mut detach = Vec::new();
    1078              :         for node_id in self.observed.locations.keys() {
    1079              :             if self.intent.get_attached() != &Some(*node_id)
    1080              :                 && !self.intent.secondary.contains(node_id)
    1081              :             {
    1082              :                 detach.push(
    1083              :                     pageservers
    1084              :                         .get(node_id)
    1085              :                         .expect("Intent references non-existent pageserver")
    1086              :                         .clone(),
    1087              :                 )
    1088              :             }
    1089              :         }
    1090              : 
    1091              :         // Advance the sequence before spawning a reconciler, so that sequence waiters
    1092              :         // can distinguish between before+after the reconcile completes.
    1093              :         self.ensure_sequence_ahead();
    1094              : 
    1095              :         let reconciler_cancel = cancel.child_token();
    1096              :         let reconciler_intent = TargetState::from_intent(pageservers, &self.intent);
    1097              :         let mut reconciler = Reconciler {
    1098              :             tenant_shard_id: self.tenant_shard_id,
    1099              :             shard: self.shard,
    1100              :             placement_policy: self.policy.clone(),
    1101              :             generation: self.generation,
    1102              :             intent: reconciler_intent,
    1103              :             detach,
    1104              :             config: self.config.clone(),
    1105              :             observed: self.observed.clone(),
    1106              :             compute_hook: compute_hook.clone(),
    1107              :             service_config: service_config.clone(),
    1108              :             _gate_guard: gate_guard,
    1109              :             _resource_units: units,
    1110              :             cancel: reconciler_cancel.clone(),
    1111              :             persistence: persistence.clone(),
    1112              :             compute_notify_failure: false,
    1113              :         };
    1114              : 
    1115              :         let reconcile_seq = self.sequence;
    1116              : 
    1117              :         tracing::info!(seq=%reconcile_seq, "Spawning Reconciler for sequence {}", self.sequence);
    1118              :         let must_notify = self.pending_compute_notification;
    1119              :         let reconciler_span = tracing::info_span!(parent: None, "reconciler", seq=%reconcile_seq,
    1120              :                                                         tenant_id=%reconciler.tenant_shard_id.tenant_id,
    1121              :                                                         shard_id=%reconciler.tenant_shard_id.shard_slug());
    1122              :         metrics::METRICS_REGISTRY
    1123              :             .metrics_group
    1124              :             .storage_controller_reconcile_spawn
    1125              :             .inc();
    1126              :         let result_tx = result_tx.clone();
    1127              :         let join_handle = tokio::task::spawn(
    1128            0 :             async move {
    1129              :                 // Wait for any previous reconcile task to complete before we start
    1130            0 :                 if let Some(old_handle) = old_handle {
    1131            0 :                     old_handle.cancel.cancel();
    1132            0 :                     if let Err(e) = old_handle.handle.await {
    1133              :                         // We can't do much with this other than log it: the task is done, so
    1134              :                         // we may proceed with our work.
    1135            0 :                         tracing::error!("Unexpected join error waiting for reconcile task: {e}");
    1136            0 :                     }
    1137            0 :                 }
    1138              : 
    1139              :                 // Early check for cancellation before doing any work
    1140              :                 // TODO: wrap all remote API operations in cancellation check
    1141              :                 // as well.
    1142            0 :                 if reconciler.cancel.is_cancelled() {
    1143            0 :                     metrics::METRICS_REGISTRY
    1144            0 :                         .metrics_group
    1145            0 :                         .storage_controller_reconcile_complete
    1146            0 :                         .inc(ReconcileCompleteLabelGroup {
    1147            0 :                             status: ReconcileOutcome::Cancel,
    1148            0 :                         });
    1149            0 :                     return;
    1150            0 :                 }
    1151              : 
    1152              :                 // Attempt to make observed state match intent state
    1153            0 :                 let result = reconciler.reconcile().await;
    1154              : 
    1155              :                 // If we know we had a pending compute notification from some previous action, send a notification irrespective
    1156              :                 // of whether the above reconcile() did any work
    1157            0 :                 if result.is_ok() && must_notify {
    1158              :                     // If this fails we will send the need to retry in [`ReconcileResult::pending_compute_notification`]
    1159            0 :                     reconciler.compute_notify().await.ok();
    1160            0 :                 }
    1161              : 
    1162              :                 // Update result counter
    1163            0 :                 let outcome_label = match &result {
    1164            0 :                     Ok(_) => ReconcileOutcome::Success,
    1165            0 :                     Err(ReconcileError::Cancel) => ReconcileOutcome::Cancel,
    1166            0 :                     Err(_) => ReconcileOutcome::Error,
    1167              :                 };
    1168              : 
    1169            0 :                 metrics::METRICS_REGISTRY
    1170            0 :                     .metrics_group
    1171            0 :                     .storage_controller_reconcile_complete
    1172            0 :                     .inc(ReconcileCompleteLabelGroup {
    1173            0 :                         status: outcome_label,
    1174            0 :                     });
    1175            0 : 
    1176            0 :                 // Constructing result implicitly drops Reconciler, freeing any ReconcileUnits before the Service might
    1177            0 :                 // try and schedule more work in response to our result.
    1178            0 :                 let result = ReconcileResult {
    1179            0 :                     sequence: reconcile_seq,
    1180            0 :                     result,
    1181            0 :                     tenant_shard_id: reconciler.tenant_shard_id,
    1182            0 :                     generation: reconciler.generation,
    1183            0 :                     observed: reconciler.observed,
    1184            0 :                     pending_compute_notification: reconciler.compute_notify_failure,
    1185            0 :                 };
    1186            0 : 
    1187            0 :                 result_tx
    1188            0 :                     .send(ReconcileResultRequest::ReconcileResult(result))
    1189            0 :                     .ok();
    1190            0 :             }
    1191              :             .instrument(reconciler_span),
    1192              :         );
    1193              : 
    1194              :         self.reconciler = Some(ReconcilerHandle {
    1195              :             sequence: self.sequence,
    1196              :             handle: join_handle,
    1197              :             cancel: reconciler_cancel,
    1198              :         });
    1199              : 
    1200              :         Some(ReconcilerWaiter {
    1201              :             tenant_shard_id: self.tenant_shard_id,
    1202              :             seq_wait: self.waiter.clone(),
    1203              :             error_seq_wait: self.error_waiter.clone(),
    1204              :             error: self.last_error.clone(),
    1205              :             seq: self.sequence,
    1206              :         })
    1207              :     }
    1208              : 
    1209              :     /// Get a waiter for any reconciliation in flight, but do not start reconciliation
    1210              :     /// if it is not already running
    1211            0 :     pub(crate) fn get_waiter(&self) -> Option<ReconcilerWaiter> {
    1212            0 :         if self.reconciler.is_some() {
    1213            0 :             Some(ReconcilerWaiter {
    1214            0 :                 tenant_shard_id: self.tenant_shard_id,
    1215            0 :                 seq_wait: self.waiter.clone(),
    1216            0 :                 error_seq_wait: self.error_waiter.clone(),
    1217            0 :                 error: self.last_error.clone(),
    1218            0 :                 seq: self.sequence,
    1219            0 :             })
    1220              :         } else {
    1221            0 :             None
    1222              :         }
    1223            0 :     }
    1224              : 
    1225              :     /// Called when a ReconcileResult has been emitted and the service is updating
    1226              :     /// our state: if the result is from a sequence >= my ReconcileHandle, then drop
    1227              :     /// the handle to indicate there is no longer a reconciliation in progress.
    1228            0 :     pub(crate) fn reconcile_complete(&mut self, sequence: Sequence) {
    1229            0 :         if let Some(reconcile_handle) = &self.reconciler {
    1230            0 :             if reconcile_handle.sequence <= sequence {
    1231            0 :                 self.reconciler = None;
    1232            0 :             }
    1233            0 :         }
    1234            0 :     }
    1235              : 
    1236              :     /// If we had any state at all referring to this node ID, drop it.  Does not
    1237              :     /// attempt to reschedule.
    1238              :     ///
    1239              :     /// Returns true if we modified the node's intent state.
    1240            0 :     pub(crate) fn deref_node(&mut self, node_id: NodeId) -> bool {
    1241            0 :         let mut intent_modified = false;
    1242            0 : 
    1243            0 :         // Drop if this node was our attached intent
    1244            0 :         if self.intent.attached == Some(node_id) {
    1245            0 :             self.intent.attached = None;
    1246            0 :             intent_modified = true;
    1247            0 :         }
    1248              : 
    1249              :         // Drop from the list of secondaries, and check if we modified it
    1250            0 :         let had_secondaries = self.intent.secondary.len();
    1251            0 :         self.intent.secondary.retain(|n| n != &node_id);
    1252            0 :         intent_modified |= self.intent.secondary.len() != had_secondaries;
    1253            0 : 
    1254            0 :         debug_assert!(!self.intent.all_pageservers().contains(&node_id));
    1255              : 
    1256            0 :         intent_modified
    1257            0 :     }
    1258              : 
    1259            0 :     pub(crate) fn set_scheduling_policy(&mut self, p: ShardSchedulingPolicy) {
    1260            0 :         self.scheduling_policy = p;
    1261            0 :     }
    1262              : 
    1263            0 :     pub(crate) fn get_scheduling_policy(&self) -> &ShardSchedulingPolicy {
    1264            0 :         &self.scheduling_policy
    1265            0 :     }
    1266              : 
    1267            0 :     pub(crate) fn set_last_error(&mut self, sequence: Sequence, error: ReconcileError) {
    1268            0 :         // Ordering: always set last_error before advancing sequence, so that sequence
    1269            0 :         // waiters are guaranteed to see a Some value when they see an error.
    1270            0 :         *(self.last_error.lock().unwrap()) = Some(Arc::new(error));
    1271            0 :         self.error_waiter.advance(sequence);
    1272            0 :     }
    1273              : 
    1274            0 :     pub(crate) fn from_persistent(
    1275            0 :         tsp: TenantShardPersistence,
    1276            0 :         intent: IntentState,
    1277            0 :     ) -> anyhow::Result<Self> {
    1278            0 :         let tenant_shard_id = tsp.get_tenant_shard_id()?;
    1279            0 :         let shard_identity = tsp.get_shard_identity()?;
    1280              : 
    1281            0 :         Ok(Self {
    1282            0 :             tenant_shard_id,
    1283            0 :             shard: shard_identity,
    1284            0 :             sequence: Sequence::initial(),
    1285            0 :             generation: tsp.generation.map(|g| Generation::new(g as u32)),
    1286            0 :             policy: serde_json::from_str(&tsp.placement_policy).unwrap(),
    1287            0 :             intent,
    1288            0 :             observed: ObservedState::new(),
    1289            0 :             config: serde_json::from_str(&tsp.config).unwrap(),
    1290            0 :             reconciler: None,
    1291            0 :             splitting: tsp.splitting,
    1292            0 :             waiter: Arc::new(SeqWait::new(Sequence::initial())),
    1293            0 :             error_waiter: Arc::new(SeqWait::new(Sequence::initial())),
    1294            0 :             last_error: Arc::default(),
    1295            0 :             pending_compute_notification: false,
    1296            0 :             delayed_reconcile: false,
    1297            0 :             scheduling_policy: serde_json::from_str(&tsp.scheduling_policy).unwrap(),
    1298            0 :         })
    1299            0 :     }
    1300              : 
    1301            0 :     pub(crate) fn to_persistent(&self) -> TenantShardPersistence {
    1302            0 :         TenantShardPersistence {
    1303            0 :             tenant_id: self.tenant_shard_id.tenant_id.to_string(),
    1304            0 :             shard_number: self.tenant_shard_id.shard_number.0 as i32,
    1305            0 :             shard_count: self.tenant_shard_id.shard_count.literal() as i32,
    1306            0 :             shard_stripe_size: self.shard.stripe_size.0 as i32,
    1307            0 :             generation: self.generation.map(|g| g.into().unwrap_or(0) as i32),
    1308            0 :             generation_pageserver: self.intent.get_attached().map(|n| n.0 as i64),
    1309            0 :             placement_policy: serde_json::to_string(&self.policy).unwrap(),
    1310            0 :             config: serde_json::to_string(&self.config).unwrap(),
    1311            0 :             splitting: SplitState::default(),
    1312            0 :             scheduling_policy: serde_json::to_string(&self.scheduling_policy).unwrap(),
    1313            0 :         }
    1314            0 :     }
    1315              : }
    1316              : 
    1317              : #[cfg(test)]
    1318              : pub(crate) mod tests {
    1319              :     use pageserver_api::{
    1320              :         controller_api::NodeAvailability,
    1321              :         shard::{ShardCount, ShardNumber},
    1322              :     };
    1323              :     use utils::id::TenantId;
    1324              : 
    1325              :     use crate::scheduler::test_utils::make_test_nodes;
    1326              : 
    1327              :     use super::*;
    1328              : 
    1329           14 :     fn make_test_tenant_shard(policy: PlacementPolicy) -> TenantShard {
    1330           14 :         let tenant_id = TenantId::generate();
    1331           14 :         let shard_number = ShardNumber(0);
    1332           14 :         let shard_count = ShardCount::new(1);
    1333           14 : 
    1334           14 :         let tenant_shard_id = TenantShardId {
    1335           14 :             tenant_id,
    1336           14 :             shard_number,
    1337           14 :             shard_count,
    1338           14 :         };
    1339           14 :         TenantShard::new(
    1340           14 :             tenant_shard_id,
    1341           14 :             ShardIdentity::new(
    1342           14 :                 shard_number,
    1343           14 :                 shard_count,
    1344           14 :                 pageserver_api::shard::ShardStripeSize(32768),
    1345           14 :             )
    1346           14 :             .unwrap(),
    1347           14 :             policy,
    1348           14 :         )
    1349           14 :     }
    1350              : 
    1351            2 :     fn make_test_tenant(policy: PlacementPolicy, shard_count: ShardCount) -> Vec<TenantShard> {
    1352            2 :         let tenant_id = TenantId::generate();
    1353            2 : 
    1354            2 :         (0..shard_count.count())
    1355            8 :             .map(|i| {
    1356            8 :                 let shard_number = ShardNumber(i);
    1357            8 : 
    1358            8 :                 let tenant_shard_id = TenantShardId {
    1359            8 :                     tenant_id,
    1360            8 :                     shard_number,
    1361            8 :                     shard_count,
    1362            8 :                 };
    1363            8 :                 TenantShard::new(
    1364            8 :                     tenant_shard_id,
    1365            8 :                     ShardIdentity::new(
    1366            8 :                         shard_number,
    1367            8 :                         shard_count,
    1368            8 :                         pageserver_api::shard::ShardStripeSize(32768),
    1369            8 :                     )
    1370            8 :                     .unwrap(),
    1371            8 :                     policy.clone(),
    1372            8 :                 )
    1373            8 :             })
    1374            2 :             .collect()
    1375            2 :     }
    1376              : 
    1377              :     /// Test the scheduling behaviors used when a tenant configured for HA is subject
    1378              :     /// to nodes being marked offline.
    1379              :     #[test]
    1380            2 :     fn tenant_ha_scheduling() -> anyhow::Result<()> {
    1381            2 :         // Start with three nodes.  Our tenant will only use two.  The third one is
    1382            2 :         // expected to remain unused.
    1383            2 :         let mut nodes = make_test_nodes(3);
    1384            2 : 
    1385            2 :         let mut scheduler = Scheduler::new(nodes.values());
    1386            2 :         let mut context = ScheduleContext::default();
    1387            2 : 
    1388            2 :         let mut tenant_shard = make_test_tenant_shard(PlacementPolicy::Attached(1));
    1389            2 :         tenant_shard
    1390            2 :             .schedule(&mut scheduler, &mut context)
    1391            2 :             .expect("we have enough nodes, scheduling should work");
    1392            2 : 
    1393            2 :         // Expect to initially be schedule on to different nodes
    1394            2 :         assert_eq!(tenant_shard.intent.secondary.len(), 1);
    1395            2 :         assert!(tenant_shard.intent.attached.is_some());
    1396              : 
    1397            2 :         let attached_node_id = tenant_shard.intent.attached.unwrap();
    1398            2 :         let secondary_node_id = *tenant_shard.intent.secondary.iter().last().unwrap();
    1399            2 :         assert_ne!(attached_node_id, secondary_node_id);
    1400              : 
    1401              :         // Notifying the attached node is offline should demote it to a secondary
    1402            2 :         let changed = tenant_shard
    1403            2 :             .intent
    1404            2 :             .demote_attached(&mut scheduler, attached_node_id);
    1405            2 :         assert!(changed);
    1406            2 :         assert!(tenant_shard.intent.attached.is_none());
    1407            2 :         assert_eq!(tenant_shard.intent.secondary.len(), 2);
    1408              : 
    1409              :         // Update the scheduler state to indicate the node is offline
    1410            2 :         nodes
    1411            2 :             .get_mut(&attached_node_id)
    1412            2 :             .unwrap()
    1413            2 :             .set_availability(NodeAvailability::Offline);
    1414            2 :         scheduler.node_upsert(nodes.get(&attached_node_id).unwrap());
    1415            2 : 
    1416            2 :         // Scheduling the node should promote the still-available secondary node to attached
    1417            2 :         tenant_shard
    1418            2 :             .schedule(&mut scheduler, &mut context)
    1419            2 :             .expect("active nodes are available");
    1420            2 :         assert_eq!(tenant_shard.intent.attached.unwrap(), secondary_node_id);
    1421              : 
    1422              :         // The original attached node should have been retained as a secondary
    1423            2 :         assert_eq!(
    1424            2 :             *tenant_shard.intent.secondary.iter().last().unwrap(),
    1425            2 :             attached_node_id
    1426            2 :         );
    1427              : 
    1428            2 :         tenant_shard.intent.clear(&mut scheduler);
    1429            2 : 
    1430            2 :         Ok(())
    1431            2 :     }
    1432              : 
    1433              :     #[test]
    1434            2 :     fn intent_from_observed() -> anyhow::Result<()> {
    1435            2 :         let nodes = make_test_nodes(3);
    1436            2 :         let mut scheduler = Scheduler::new(nodes.values());
    1437            2 : 
    1438            2 :         let mut tenant_shard = make_test_tenant_shard(PlacementPolicy::Attached(1));
    1439            2 : 
    1440            2 :         tenant_shard.observed.locations.insert(
    1441            2 :             NodeId(3),
    1442            2 :             ObservedStateLocation {
    1443            2 :                 conf: Some(LocationConfig {
    1444            2 :                     mode: LocationConfigMode::AttachedMulti,
    1445            2 :                     generation: Some(2),
    1446            2 :                     secondary_conf: None,
    1447            2 :                     shard_number: tenant_shard.shard.number.0,
    1448            2 :                     shard_count: tenant_shard.shard.count.literal(),
    1449            2 :                     shard_stripe_size: tenant_shard.shard.stripe_size.0,
    1450            2 :                     tenant_conf: TenantConfig::default(),
    1451            2 :                 }),
    1452            2 :             },
    1453            2 :         );
    1454            2 : 
    1455            2 :         tenant_shard.observed.locations.insert(
    1456            2 :             NodeId(2),
    1457            2 :             ObservedStateLocation {
    1458            2 :                 conf: Some(LocationConfig {
    1459            2 :                     mode: LocationConfigMode::AttachedStale,
    1460            2 :                     generation: Some(1),
    1461            2 :                     secondary_conf: None,
    1462            2 :                     shard_number: tenant_shard.shard.number.0,
    1463            2 :                     shard_count: tenant_shard.shard.count.literal(),
    1464            2 :                     shard_stripe_size: tenant_shard.shard.stripe_size.0,
    1465            2 :                     tenant_conf: TenantConfig::default(),
    1466            2 :                 }),
    1467            2 :             },
    1468            2 :         );
    1469            2 : 
    1470            2 :         tenant_shard.intent_from_observed(&mut scheduler);
    1471            2 : 
    1472            2 :         // The highest generationed attached location gets used as attached
    1473            2 :         assert_eq!(tenant_shard.intent.attached, Some(NodeId(3)));
    1474              :         // Other locations get used as secondary
    1475            2 :         assert_eq!(tenant_shard.intent.secondary, vec![NodeId(2)]);
    1476              : 
    1477            2 :         scheduler.consistency_check(nodes.values(), [&tenant_shard].into_iter())?;
    1478              : 
    1479            2 :         tenant_shard.intent.clear(&mut scheduler);
    1480            2 :         Ok(())
    1481            2 :     }
    1482              : 
    1483              :     #[test]
    1484            2 :     fn scheduling_mode() -> anyhow::Result<()> {
    1485            2 :         let nodes = make_test_nodes(3);
    1486            2 :         let mut scheduler = Scheduler::new(nodes.values());
    1487            2 : 
    1488            2 :         let mut tenant_shard = make_test_tenant_shard(PlacementPolicy::Attached(1));
    1489            2 : 
    1490            2 :         // In pause mode, schedule() shouldn't do anything
    1491            2 :         tenant_shard.scheduling_policy = ShardSchedulingPolicy::Pause;
    1492            2 :         assert!(tenant_shard
    1493            2 :             .schedule(&mut scheduler, &mut ScheduleContext::default())
    1494            2 :             .is_ok());
    1495            2 :         assert!(tenant_shard.intent.all_pageservers().is_empty());
    1496              : 
    1497              :         // In active mode, schedule() works
    1498            2 :         tenant_shard.scheduling_policy = ShardSchedulingPolicy::Active;
    1499            2 :         assert!(tenant_shard
    1500            2 :             .schedule(&mut scheduler, &mut ScheduleContext::default())
    1501            2 :             .is_ok());
    1502            2 :         assert!(!tenant_shard.intent.all_pageservers().is_empty());
    1503              : 
    1504            2 :         tenant_shard.intent.clear(&mut scheduler);
    1505            2 :         Ok(())
    1506            2 :     }
    1507              : 
    1508              :     #[test]
    1509            2 :     fn optimize_attachment() -> anyhow::Result<()> {
    1510            2 :         let nodes = make_test_nodes(3);
    1511            2 :         let mut scheduler = Scheduler::new(nodes.values());
    1512            2 : 
    1513            2 :         let mut shard_a = make_test_tenant_shard(PlacementPolicy::Attached(1));
    1514            2 :         let mut shard_b = make_test_tenant_shard(PlacementPolicy::Attached(1));
    1515            2 : 
    1516            2 :         // Initially: both nodes attached on shard 1, and both have secondary locations
    1517            2 :         // on different nodes.
    1518            2 :         shard_a.intent.set_attached(&mut scheduler, Some(NodeId(1)));
    1519            2 :         shard_a.intent.push_secondary(&mut scheduler, NodeId(2));
    1520            2 :         shard_b.intent.set_attached(&mut scheduler, Some(NodeId(1)));
    1521            2 :         shard_b.intent.push_secondary(&mut scheduler, NodeId(3));
    1522            2 : 
    1523            2 :         let mut schedule_context = ScheduleContext::default();
    1524            2 :         schedule_context.avoid(&shard_a.intent.all_pageservers());
    1525            2 :         schedule_context.push_attached(shard_a.intent.get_attached().unwrap());
    1526            2 :         schedule_context.avoid(&shard_b.intent.all_pageservers());
    1527            2 :         schedule_context.push_attached(shard_b.intent.get_attached().unwrap());
    1528            2 : 
    1529            2 :         let optimization_a = shard_a.optimize_attachment(&nodes, &schedule_context);
    1530            2 : 
    1531            2 :         // Either shard should recognize that it has the option to switch to a secondary location where there
    1532            2 :         // would be no other shards from the same tenant, and request to do so.
    1533            2 :         assert_eq!(
    1534            2 :             optimization_a,
    1535            2 :             Some(ScheduleOptimization {
    1536            2 :                 sequence: shard_a.sequence,
    1537            2 :                 action: ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
    1538            2 :                     old_attached_node_id: NodeId(1),
    1539            2 :                     new_attached_node_id: NodeId(2)
    1540            2 :                 })
    1541            2 :             })
    1542            2 :         );
    1543              : 
    1544              :         // Note that these optimizing two shards in the same tenant with the same ScheduleContext is
    1545              :         // mutually exclusive (the optimization of one invalidates the stats) -- it is the responsibility
    1546              :         // of [`Service::optimize_all`] to avoid trying
    1547              :         // to do optimizations for multiple shards in the same tenant at the same time.  Generating
    1548              :         // both optimizations is just done for test purposes
    1549            2 :         let optimization_b = shard_b.optimize_attachment(&nodes, &schedule_context);
    1550            2 :         assert_eq!(
    1551            2 :             optimization_b,
    1552            2 :             Some(ScheduleOptimization {
    1553            2 :                 sequence: shard_b.sequence,
    1554            2 :                 action: ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
    1555            2 :                     old_attached_node_id: NodeId(1),
    1556            2 :                     new_attached_node_id: NodeId(3)
    1557            2 :                 })
    1558            2 :             })
    1559            2 :         );
    1560              : 
    1561              :         // Applying these optimizations should result in the end state proposed
    1562            2 :         shard_a.apply_optimization(&mut scheduler, optimization_a.unwrap());
    1563            2 :         assert_eq!(shard_a.intent.get_attached(), &Some(NodeId(2)));
    1564            2 :         assert_eq!(shard_a.intent.get_secondary(), &vec![NodeId(1)]);
    1565            2 :         shard_b.apply_optimization(&mut scheduler, optimization_b.unwrap());
    1566            2 :         assert_eq!(shard_b.intent.get_attached(), &Some(NodeId(3)));
    1567            2 :         assert_eq!(shard_b.intent.get_secondary(), &vec![NodeId(1)]);
    1568              : 
    1569            2 :         shard_a.intent.clear(&mut scheduler);
    1570            2 :         shard_b.intent.clear(&mut scheduler);
    1571            2 : 
    1572            2 :         Ok(())
    1573            2 :     }
    1574              : 
    1575              :     #[test]
    1576            2 :     fn optimize_secondary() -> anyhow::Result<()> {
    1577            2 :         let nodes = make_test_nodes(4);
    1578            2 :         let mut scheduler = Scheduler::new(nodes.values());
    1579            2 : 
    1580            2 :         let mut shard_a = make_test_tenant_shard(PlacementPolicy::Attached(1));
    1581            2 :         let mut shard_b = make_test_tenant_shard(PlacementPolicy::Attached(1));
    1582            2 : 
    1583            2 :         // Initially: both nodes attached on shard 1, and both have secondary locations
    1584            2 :         // on different nodes.
    1585            2 :         shard_a.intent.set_attached(&mut scheduler, Some(NodeId(1)));
    1586            2 :         shard_a.intent.push_secondary(&mut scheduler, NodeId(3));
    1587            2 :         shard_b.intent.set_attached(&mut scheduler, Some(NodeId(2)));
    1588            2 :         shard_b.intent.push_secondary(&mut scheduler, NodeId(3));
    1589            2 : 
    1590            2 :         let mut schedule_context = ScheduleContext::default();
    1591            2 :         schedule_context.avoid(&shard_a.intent.all_pageservers());
    1592            2 :         schedule_context.push_attached(shard_a.intent.get_attached().unwrap());
    1593            2 :         schedule_context.avoid(&shard_b.intent.all_pageservers());
    1594            2 :         schedule_context.push_attached(shard_b.intent.get_attached().unwrap());
    1595            2 : 
    1596            2 :         let optimization_a = shard_a.optimize_secondary(&scheduler, &schedule_context);
    1597            2 : 
    1598            2 :         // Since there is a node with no locations available, the node with two locations for the
    1599            2 :         // same tenant should generate an optimization to move one away
    1600            2 :         assert_eq!(
    1601            2 :             optimization_a,
    1602            2 :             Some(ScheduleOptimization {
    1603            2 :                 sequence: shard_a.sequence,
    1604            2 :                 action: ScheduleOptimizationAction::ReplaceSecondary(ReplaceSecondary {
    1605            2 :                     old_node_id: NodeId(3),
    1606            2 :                     new_node_id: NodeId(4)
    1607            2 :                 })
    1608            2 :             })
    1609            2 :         );
    1610              : 
    1611            2 :         shard_a.apply_optimization(&mut scheduler, optimization_a.unwrap());
    1612            2 :         assert_eq!(shard_a.intent.get_attached(), &Some(NodeId(1)));
    1613            2 :         assert_eq!(shard_a.intent.get_secondary(), &vec![NodeId(4)]);
    1614              : 
    1615            2 :         shard_a.intent.clear(&mut scheduler);
    1616            2 :         shard_b.intent.clear(&mut scheduler);
    1617            2 : 
    1618            2 :         Ok(())
    1619            2 :     }
    1620              : 
    1621              :     // Optimize til quiescent: this emulates what Service::optimize_all does, when
    1622              :     // called repeatedly in the background.
    1623            2 :     fn optimize_til_idle(
    1624            2 :         nodes: &HashMap<NodeId, Node>,
    1625            2 :         scheduler: &mut Scheduler,
    1626            2 :         shards: &mut [TenantShard],
    1627            2 :     ) {
    1628            2 :         let mut loop_n = 0;
    1629              :         loop {
    1630           14 :             let mut schedule_context = ScheduleContext::default();
    1631           14 :             let mut any_changed = false;
    1632              : 
    1633           56 :             for shard in shards.iter() {
    1634           56 :                 schedule_context.avoid(&shard.intent.all_pageservers());
    1635           56 :                 if let Some(attached) = shard.intent.get_attached() {
    1636           56 :                     schedule_context.push_attached(*attached);
    1637           56 :                 }
    1638              :             }
    1639              : 
    1640           26 :             for shard in shards.iter_mut() {
    1641           26 :                 let optimization = shard.optimize_attachment(nodes, &schedule_context);
    1642           26 :                 if let Some(optimization) = optimization {
    1643            4 :                     shard.apply_optimization(scheduler, optimization);
    1644            4 :                     any_changed = true;
    1645            4 :                     break;
    1646           22 :                 }
    1647           22 : 
    1648           22 :                 let optimization = shard.optimize_secondary(scheduler, &schedule_context);
    1649           22 :                 if let Some(optimization) = optimization {
    1650            8 :                     shard.apply_optimization(scheduler, optimization);
    1651            8 :                     any_changed = true;
    1652            8 :                     break;
    1653           14 :                 }
    1654              :             }
    1655              : 
    1656           14 :             if !any_changed {
    1657            2 :                 break;
    1658           12 :             }
    1659           12 : 
    1660           12 :             // Assert no infinite loop
    1661           12 :             loop_n += 1;
    1662           12 :             assert!(loop_n < 1000);
    1663              :         }
    1664            2 :     }
    1665              : 
    1666              :     /// Test the balancing behavior of shard scheduling: that it achieves a balance, and
    1667              :     /// that it converges.
    1668              :     #[test]
    1669            2 :     fn optimize_add_nodes() -> anyhow::Result<()> {
    1670            2 :         let nodes = make_test_nodes(4);
    1671            2 : 
    1672            2 :         // Only show the scheduler a couple of nodes
    1673            2 :         let mut scheduler = Scheduler::new([].iter());
    1674            2 :         scheduler.node_upsert(nodes.get(&NodeId(1)).unwrap());
    1675            2 :         scheduler.node_upsert(nodes.get(&NodeId(2)).unwrap());
    1676            2 : 
    1677            2 :         let mut shards = make_test_tenant(PlacementPolicy::Attached(1), ShardCount::new(4));
    1678            2 :         let mut schedule_context = ScheduleContext::default();
    1679           10 :         for shard in &mut shards {
    1680            8 :             assert!(shard
    1681            8 :                 .schedule(&mut scheduler, &mut schedule_context)
    1682            8 :                 .is_ok());
    1683              :         }
    1684              : 
    1685              :         // We should see equal number of locations on the two nodes.
    1686            2 :         assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 4);
    1687            2 :         assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 2);
    1688              : 
    1689            2 :         assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 4);
    1690            2 :         assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 2);
    1691              : 
    1692              :         // Add another two nodes: we should see the shards spread out when their optimize
    1693              :         // methods are called
    1694            2 :         scheduler.node_upsert(nodes.get(&NodeId(3)).unwrap());
    1695            2 :         scheduler.node_upsert(nodes.get(&NodeId(4)).unwrap());
    1696            2 :         optimize_til_idle(&nodes, &mut scheduler, &mut shards);
    1697            2 : 
    1698            2 :         assert_eq!(scheduler.get_node_shard_count(NodeId(1)), 2);
    1699            2 :         assert_eq!(scheduler.get_node_attached_shard_count(NodeId(1)), 1);
    1700              : 
    1701            2 :         assert_eq!(scheduler.get_node_shard_count(NodeId(2)), 2);
    1702            2 :         assert_eq!(scheduler.get_node_attached_shard_count(NodeId(2)), 1);
    1703              : 
    1704            2 :         assert_eq!(scheduler.get_node_shard_count(NodeId(3)), 2);
    1705            2 :         assert_eq!(scheduler.get_node_attached_shard_count(NodeId(3)), 1);
    1706              : 
    1707            2 :         assert_eq!(scheduler.get_node_shard_count(NodeId(4)), 2);
    1708            2 :         assert_eq!(scheduler.get_node_attached_shard_count(NodeId(4)), 1);
    1709              : 
    1710            8 :         for shard in shards.iter_mut() {
    1711            8 :             shard.intent.clear(&mut scheduler);
    1712            8 :         }
    1713              : 
    1714            2 :         Ok(())
    1715            2 :     }
    1716              : }
        

Generated by: LCOV version 2.1-beta