LCOV - code coverage report
Current view: top level - storage_controller/src - persistence.rs (source / functions) Coverage Total Hit
Test: 6aa1d3391fe07ff9b962ba08ababc18abe4bbab3.info Lines: 0.0 % 794 0
Test Date: 2025-01-31 18:59:31 Functions: 0.0 % 349 0

            Line data    Source code
       1              : pub(crate) mod split_state;
       2              : use std::collections::HashMap;
       3              : use std::str::FromStr;
       4              : use std::time::Duration;
       5              : use std::time::Instant;
       6              : 
       7              : use self::split_state::SplitState;
       8              : use diesel::pg::PgConnection;
       9              : use diesel::prelude::*;
      10              : use diesel::Connection;
      11              : use itertools::Itertools;
      12              : use pageserver_api::controller_api::AvailabilityZone;
      13              : use pageserver_api::controller_api::MetadataHealthRecord;
      14              : use pageserver_api::controller_api::SafekeeperDescribeResponse;
      15              : use pageserver_api::controller_api::ShardSchedulingPolicy;
      16              : use pageserver_api::controller_api::SkSchedulingPolicy;
      17              : use pageserver_api::controller_api::{NodeSchedulingPolicy, PlacementPolicy};
      18              : use pageserver_api::models::TenantConfig;
      19              : use pageserver_api::shard::ShardConfigError;
      20              : use pageserver_api::shard::ShardIdentity;
      21              : use pageserver_api::shard::ShardStripeSize;
      22              : use pageserver_api::shard::{ShardCount, ShardNumber, TenantShardId};
      23              : use serde::{Deserialize, Serialize};
      24              : use utils::generation::Generation;
      25              : use utils::id::{NodeId, TenantId};
      26              : 
      27              : use crate::metrics::{
      28              :     DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
      29              : };
      30              : use crate::node::Node;
      31              : 
      32              : use diesel_migrations::{embed_migrations, EmbeddedMigrations};
      33              : const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
      34              : 
      35              : /// ## What do we store?
      36              : ///
      37              : /// The storage controller service does not store most of its state durably.
      38              : ///
      39              : /// The essential things to store durably are:
      40              : /// - generation numbers, as these must always advance monotonically to ensure data safety.
      41              : /// - Tenant's PlacementPolicy and TenantConfig, as the source of truth for these is something external.
      42              : /// - Node's scheduling policies, as the source of truth for these is something external.
      43              : ///
      44              : /// Other things we store durably as an implementation detail:
      45              : /// - Node's host/port: this could be avoided it we made nodes emit a self-registering heartbeat,
      46              : ///   but it is operationally simpler to make this service the authority for which nodes
      47              : ///   it talks to.
      48              : ///
      49              : /// ## Performance/efficiency
      50              : ///
      51              : /// The storage controller service does not go via the database for most things: there are
      52              : /// a couple of places where we must, and where efficiency matters:
      53              : /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
      54              : ///   before it can attach a tenant, so this acts as a bound on how fast things like
      55              : ///   failover can happen.
      56              : /// - Pageserver re-attach: we will increment many shards' generations when this happens,
      57              : ///   so it is important to avoid e.g. issuing O(N) queries.
      58              : ///
      59              : /// Database calls relating to nodes have low performance requirements, as they are very rarely
      60              : /// updated, and reads of nodes are always from memory, not the database.  We only require that
      61              : /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
      62              : pub struct Persistence {
      63              :     connection_pool: diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<PgConnection>>,
      64              : }
      65              : 
      66              : /// Legacy format, for use in JSON compat objects in test environment
      67            0 : #[derive(Serialize, Deserialize)]
      68              : struct JsonPersistence {
      69              :     tenants: HashMap<TenantShardId, TenantShardPersistence>,
      70              : }
      71              : 
      72              : #[derive(thiserror::Error, Debug)]
      73              : pub(crate) enum DatabaseError {
      74              :     #[error(transparent)]
      75              :     Query(#[from] diesel::result::Error),
      76              :     #[error(transparent)]
      77              :     Connection(#[from] diesel::result::ConnectionError),
      78              :     #[error(transparent)]
      79              :     ConnectionPool(#[from] r2d2::Error),
      80              :     #[error("Logical error: {0}")]
      81              :     Logical(String),
      82              :     #[error("Migration error: {0}")]
      83              :     Migration(String),
      84              : }
      85              : 
      86              : #[derive(measured::FixedCardinalityLabel, Copy, Clone)]
      87              : pub(crate) enum DatabaseOperation {
      88              :     InsertNode,
      89              :     UpdateNode,
      90              :     DeleteNode,
      91              :     ListNodes,
      92              :     BeginShardSplit,
      93              :     CompleteShardSplit,
      94              :     AbortShardSplit,
      95              :     Detach,
      96              :     ReAttach,
      97              :     IncrementGeneration,
      98              :     TenantGenerations,
      99              :     ShardGenerations,
     100              :     ListTenantShards,
     101              :     LoadTenant,
     102              :     InsertTenantShards,
     103              :     UpdateTenantShard,
     104              :     DeleteTenant,
     105              :     UpdateTenantConfig,
     106              :     UpdateMetadataHealth,
     107              :     ListMetadataHealth,
     108              :     ListMetadataHealthUnhealthy,
     109              :     ListMetadataHealthOutdated,
     110              :     ListSafekeepers,
     111              :     GetLeader,
     112              :     UpdateLeader,
     113              :     SetPreferredAzs,
     114              : }
     115              : 
     116              : #[must_use]
     117              : pub(crate) enum AbortShardSplitStatus {
     118              :     /// We aborted the split in the database by reverting to the parent shards
     119              :     Aborted,
     120              :     /// The split had already been persisted.
     121              :     Complete,
     122              : }
     123              : 
     124              : pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
     125              : 
     126              : /// Some methods can operate on either a whole tenant or a single shard
     127              : pub(crate) enum TenantFilter {
     128              :     Tenant(TenantId),
     129              :     Shard(TenantShardId),
     130              : }
     131              : 
     132              : /// Represents the results of looking up generation+pageserver for the shards of a tenant
     133              : pub(crate) struct ShardGenerationState {
     134              :     pub(crate) tenant_shard_id: TenantShardId,
     135              :     pub(crate) generation: Option<Generation>,
     136              :     pub(crate) generation_pageserver: Option<NodeId>,
     137              : }
     138              : 
     139              : impl Persistence {
     140              :     // The default postgres connection limit is 100.  We use up to 99, to leave one free for a human admin under
     141              :     // normal circumstances.  This assumes we have exclusive use of the database cluster to which we connect.
     142              :     pub const MAX_CONNECTIONS: u32 = 99;
     143              : 
     144              :     // We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
     145              :     const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
     146              :     const MAX_CONNECTION_LIFETIME: Duration = Duration::from_secs(60);
     147              : 
     148            0 :     pub fn new(database_url: String) -> Self {
     149            0 :         let manager = diesel::r2d2::ConnectionManager::<PgConnection>::new(database_url);
     150            0 : 
     151            0 :         // We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
     152            0 :         // to execute queries (database queries are not generally on latency-sensitive paths).
     153            0 :         let connection_pool = diesel::r2d2::Pool::builder()
     154            0 :             .max_size(Self::MAX_CONNECTIONS)
     155            0 :             .max_lifetime(Some(Self::MAX_CONNECTION_LIFETIME))
     156            0 :             .idle_timeout(Some(Self::IDLE_CONNECTION_TIMEOUT))
     157            0 :             // Always keep at least one connection ready to go
     158            0 :             .min_idle(Some(1))
     159            0 :             .test_on_check_out(true)
     160            0 :             .build(manager)
     161            0 :             .expect("Could not build connection pool");
     162            0 : 
     163            0 :         Self { connection_pool }
     164            0 :     }
     165              : 
     166              :     /// A helper for use during startup, where we would like to tolerate concurrent restarts of the
     167              :     /// database and the storage controller, therefore the database might not be available right away
     168            0 :     pub async fn await_connection(
     169            0 :         database_url: &str,
     170            0 :         timeout: Duration,
     171            0 :     ) -> Result<(), diesel::ConnectionError> {
     172            0 :         let started_at = Instant::now();
     173              :         loop {
     174            0 :             match PgConnection::establish(database_url) {
     175              :                 Ok(_) => {
     176            0 :                     tracing::info!("Connected to database.");
     177            0 :                     return Ok(());
     178              :                 }
     179            0 :                 Err(e) => {
     180            0 :                     if started_at.elapsed() > timeout {
     181            0 :                         return Err(e);
     182              :                     } else {
     183            0 :                         tracing::info!("Database not yet available, waiting... ({e})");
     184            0 :                         tokio::time::sleep(Duration::from_millis(100)).await;
     185              :                     }
     186              :                 }
     187              :             }
     188              :         }
     189            0 :     }
     190              : 
     191              :     /// Execute the diesel migrations that are built into this binary
     192            0 :     pub(crate) async fn migration_run(&self) -> DatabaseResult<()> {
     193              :         use diesel_migrations::{HarnessWithOutput, MigrationHarness};
     194              : 
     195            0 :         self.with_conn(move |conn| -> DatabaseResult<()> {
     196            0 :             HarnessWithOutput::write_to_stdout(conn)
     197            0 :                 .run_pending_migrations(MIGRATIONS)
     198            0 :                 .map(|_| ())
     199            0 :                 .map_err(|e| DatabaseError::Migration(e.to_string()))
     200            0 :         })
     201            0 :         .await
     202            0 :     }
     203              : 
     204              :     /// Wraps `with_conn` in order to collect latency and error metrics
     205            0 :     async fn with_measured_conn<F, R>(&self, op: DatabaseOperation, func: F) -> DatabaseResult<R>
     206            0 :     where
     207            0 :         F: Fn(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
     208            0 :         R: Send + 'static,
     209            0 :     {
     210            0 :         let latency = &METRICS_REGISTRY
     211            0 :             .metrics_group
     212            0 :             .storage_controller_database_query_latency;
     213            0 :         let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op });
     214              : 
     215            0 :         let res = self.with_conn(func).await;
     216              : 
     217            0 :         if let Err(err) = &res {
     218            0 :             let error_counter = &METRICS_REGISTRY
     219            0 :                 .metrics_group
     220            0 :                 .storage_controller_database_query_error;
     221            0 :             error_counter.inc(DatabaseQueryErrorLabelGroup {
     222            0 :                 error_type: err.error_label(),
     223            0 :                 operation: op,
     224            0 :             })
     225            0 :         }
     226              : 
     227            0 :         res
     228            0 :     }
     229              : 
     230              :     /// Call the provided function in a tokio blocking thread, with a Diesel database connection.
     231            0 :     async fn with_conn<F, R>(&self, func: F) -> DatabaseResult<R>
     232            0 :     where
     233            0 :         F: Fn(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
     234            0 :         R: Send + 'static,
     235            0 :     {
     236              :         // A generous allowance for how many times we may retry serializable transactions
     237              :         // before giving up.  This is not expected to be hit: it is a defensive measure in case we
     238              :         // somehow engineer a situation where duelling transactions might otherwise live-lock.
     239              :         const MAX_RETRIES: usize = 128;
     240              : 
     241            0 :         let mut conn = self.connection_pool.get()?;
     242            0 :         tokio::task::spawn_blocking(move || -> DatabaseResult<R> {
     243            0 :             let mut retry_count = 0;
     244              :             loop {
     245            0 :                 match conn.build_transaction().serializable().run(|c| func(c)) {
     246            0 :                     Ok(r) => break Ok(r),
     247              :                     Err(
     248            0 :                         err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
     249            0 :                             diesel::result::DatabaseErrorKind::SerializationFailure,
     250            0 :                             _,
     251            0 :                         )),
     252            0 :                     ) => {
     253            0 :                         retry_count += 1;
     254            0 :                         if retry_count > MAX_RETRIES {
     255            0 :                             tracing::error!(
     256            0 :                                 "Exceeded max retries on SerializationFailure errors: {err:?}"
     257              :                             );
     258            0 :                             break Err(err);
     259              :                         } else {
     260              :                             // Retry on serialization errors: these are expected, because even though our
     261              :                             // transactions don't fight for the same rows, they will occasionally collide
     262              :                             // on index pages (e.g. increment_generation for unrelated shards can collide)
     263            0 :                             tracing::debug!(
     264            0 :                                 "Retrying transaction on serialization failure {err:?}"
     265              :                             );
     266            0 :                             continue;
     267              :                         }
     268              :                     }
     269            0 :                     Err(e) => break Err(e),
     270              :                 }
     271              :             }
     272            0 :         })
     273            0 :         .await
     274            0 :         .expect("Task panic")
     275            0 :     }
     276              : 
     277              :     /// When a node is first registered, persist it before using it for anything
     278            0 :     pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
     279            0 :         let np = node.to_persistent();
     280            0 :         self.with_measured_conn(
     281            0 :             DatabaseOperation::InsertNode,
     282            0 :             move |conn| -> DatabaseResult<()> {
     283            0 :                 diesel::insert_into(crate::schema::nodes::table)
     284            0 :                     .values(&np)
     285            0 :                     .execute(conn)?;
     286            0 :                 Ok(())
     287            0 :             },
     288            0 :         )
     289            0 :         .await
     290            0 :     }
     291              : 
     292              :     /// At startup, populate the list of nodes which our shards may be placed on
     293            0 :     pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> {
     294            0 :         let nodes: Vec<NodePersistence> = self
     295            0 :             .with_measured_conn(
     296            0 :                 DatabaseOperation::ListNodes,
     297            0 :                 move |conn| -> DatabaseResult<_> {
     298            0 :                     Ok(crate::schema::nodes::table.load::<NodePersistence>(conn)?)
     299            0 :                 },
     300            0 :             )
     301            0 :             .await?;
     302              : 
     303            0 :         tracing::info!("list_nodes: loaded {} nodes", nodes.len());
     304              : 
     305            0 :         Ok(nodes)
     306            0 :     }
     307              : 
     308            0 :     pub(crate) async fn update_node(
     309            0 :         &self,
     310            0 :         input_node_id: NodeId,
     311            0 :         input_scheduling: NodeSchedulingPolicy,
     312            0 :     ) -> DatabaseResult<()> {
     313              :         use crate::schema::nodes::dsl::*;
     314            0 :         let updated = self
     315            0 :             .with_measured_conn(DatabaseOperation::UpdateNode, move |conn| {
     316            0 :                 let updated = diesel::update(nodes)
     317            0 :                     .filter(node_id.eq(input_node_id.0 as i64))
     318            0 :                     .set((scheduling_policy.eq(String::from(input_scheduling)),))
     319            0 :                     .execute(conn)?;
     320            0 :                 Ok(updated)
     321            0 :             })
     322            0 :             .await?;
     323              : 
     324            0 :         if updated != 1 {
     325            0 :             Err(DatabaseError::Logical(format!(
     326            0 :                 "Node {node_id:?} not found for update",
     327            0 :             )))
     328              :         } else {
     329            0 :             Ok(())
     330              :         }
     331            0 :     }
     332              : 
     333              :     /// At startup, load the high level state for shards, such as their config + policy.  This will
     334              :     /// be enriched at runtime with state discovered on pageservers.
     335              :     ///
     336              :     /// We exclude shards configured to be detached.  During startup, if we see any attached locations
     337              :     /// for such shards, they will automatically be detached as 'orphans'.
     338            0 :     pub(crate) async fn load_active_tenant_shards(
     339            0 :         &self,
     340            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     341              :         use crate::schema::tenant_shards::dsl::*;
     342            0 :         self.with_measured_conn(
     343            0 :             DatabaseOperation::ListTenantShards,
     344            0 :             move |conn| -> DatabaseResult<_> {
     345            0 :                 let query = tenant_shards.filter(
     346            0 :                     placement_policy.ne(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
     347            0 :                 );
     348            0 :                 let result = query.load::<TenantShardPersistence>(conn)?;
     349              : 
     350            0 :                 Ok(result)
     351            0 :             },
     352            0 :         )
     353            0 :         .await
     354            0 :     }
     355              : 
     356              :     /// When restoring a previously detached tenant into memory, load it from the database
     357            0 :     pub(crate) async fn load_tenant(
     358            0 :         &self,
     359            0 :         filter_tenant_id: TenantId,
     360            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     361              :         use crate::schema::tenant_shards::dsl::*;
     362            0 :         self.with_measured_conn(
     363            0 :             DatabaseOperation::LoadTenant,
     364            0 :             move |conn| -> DatabaseResult<_> {
     365            0 :                 let query = tenant_shards.filter(tenant_id.eq(filter_tenant_id.to_string()));
     366            0 :                 let result = query.load::<TenantShardPersistence>(conn)?;
     367              : 
     368            0 :                 Ok(result)
     369            0 :             },
     370            0 :         )
     371            0 :         .await
     372            0 :     }
     373              : 
     374              :     /// Tenants must be persisted before we schedule them for the first time.  This enables us
     375              :     /// to correctly retain generation monotonicity, and the externally provided placement policy & config.
     376            0 :     pub(crate) async fn insert_tenant_shards(
     377            0 :         &self,
     378            0 :         shards: Vec<TenantShardPersistence>,
     379            0 :     ) -> DatabaseResult<()> {
     380              :         use crate::schema::metadata_health;
     381              :         use crate::schema::tenant_shards;
     382              : 
     383            0 :         let now = chrono::Utc::now();
     384            0 : 
     385            0 :         let metadata_health_records = shards
     386            0 :             .iter()
     387            0 :             .map(|t| MetadataHealthPersistence {
     388            0 :                 tenant_id: t.tenant_id.clone(),
     389            0 :                 shard_number: t.shard_number,
     390            0 :                 shard_count: t.shard_count,
     391            0 :                 healthy: true,
     392            0 :                 last_scrubbed_at: now,
     393            0 :             })
     394            0 :             .collect::<Vec<_>>();
     395            0 : 
     396            0 :         self.with_measured_conn(
     397            0 :             DatabaseOperation::InsertTenantShards,
     398            0 :             move |conn| -> DatabaseResult<()> {
     399            0 :                 diesel::insert_into(tenant_shards::table)
     400            0 :                     .values(&shards)
     401            0 :                     .execute(conn)?;
     402              : 
     403            0 :                 diesel::insert_into(metadata_health::table)
     404            0 :                     .values(&metadata_health_records)
     405            0 :                     .execute(conn)?;
     406            0 :                 Ok(())
     407            0 :             },
     408            0 :         )
     409            0 :         .await
     410            0 :     }
     411              : 
     412              :     /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
     413              :     /// the tenant from memory on this server.
     414            0 :     pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
     415              :         use crate::schema::tenant_shards::dsl::*;
     416            0 :         self.with_measured_conn(
     417            0 :             DatabaseOperation::DeleteTenant,
     418            0 :             move |conn| -> DatabaseResult<()> {
     419            0 :                 // `metadata_health` status (if exists) is also deleted based on the cascade behavior.
     420            0 :                 diesel::delete(tenant_shards)
     421            0 :                     .filter(tenant_id.eq(del_tenant_id.to_string()))
     422            0 :                     .execute(conn)?;
     423            0 :                 Ok(())
     424            0 :             },
     425            0 :         )
     426            0 :         .await
     427            0 :     }
     428              : 
     429            0 :     pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
     430              :         use crate::schema::nodes::dsl::*;
     431            0 :         self.with_measured_conn(
     432            0 :             DatabaseOperation::DeleteNode,
     433            0 :             move |conn| -> DatabaseResult<()> {
     434            0 :                 diesel::delete(nodes)
     435            0 :                     .filter(node_id.eq(del_node_id.0 as i64))
     436            0 :                     .execute(conn)?;
     437              : 
     438            0 :                 Ok(())
     439            0 :             },
     440            0 :         )
     441            0 :         .await
     442            0 :     }
     443              : 
     444              :     /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient
     445              :     /// batched increment of the generations of all tenants whose generation_pageserver is equal to
     446              :     /// the node that called /re-attach.
     447              :     #[tracing::instrument(skip_all, fields(node_id))]
     448              :     pub(crate) async fn re_attach(
     449              :         &self,
     450              :         input_node_id: NodeId,
     451              :     ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
     452              :         use crate::schema::nodes::dsl::scheduling_policy;
     453              :         use crate::schema::nodes::dsl::*;
     454              :         use crate::schema::tenant_shards::dsl::*;
     455              :         let updated = self
     456            0 :             .with_measured_conn(DatabaseOperation::ReAttach, move |conn| {
     457            0 :                 let rows_updated = diesel::update(tenant_shards)
     458            0 :                     .filter(generation_pageserver.eq(input_node_id.0 as i64))
     459            0 :                     .set(generation.eq(generation + 1))
     460            0 :                     .execute(conn)?;
     461              : 
     462            0 :                 tracing::info!("Incremented {} tenants' generations", rows_updated);
     463              : 
     464              :                 // TODO: UPDATE+SELECT in one query
     465              : 
     466            0 :                 let updated = tenant_shards
     467            0 :                     .filter(generation_pageserver.eq(input_node_id.0 as i64))
     468            0 :                     .select(TenantShardPersistence::as_select())
     469            0 :                     .load(conn)?;
     470              : 
     471              :                 // If the node went through a drain and restart phase before re-attaching,
     472              :                 // then reset it's node scheduling policy to active.
     473            0 :                 diesel::update(nodes)
     474            0 :                     .filter(node_id.eq(input_node_id.0 as i64))
     475            0 :                     .filter(
     476            0 :                         scheduling_policy
     477            0 :                             .eq(String::from(NodeSchedulingPolicy::PauseForRestart))
     478            0 :                             .or(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Draining)))
     479            0 :                             .or(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Filling))),
     480            0 :                     )
     481            0 :                     .set(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Active)))
     482            0 :                     .execute(conn)?;
     483              : 
     484            0 :                 Ok(updated)
     485            0 :             })
     486              :             .await?;
     487              : 
     488              :         let mut result = HashMap::new();
     489              :         for tsp in updated {
     490              :             let tenant_shard_id = TenantShardId {
     491              :                 tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
     492            0 :                     .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
     493              :                 shard_number: ShardNumber(tsp.shard_number as u8),
     494              :                 shard_count: ShardCount::new(tsp.shard_count as u8),
     495              :             };
     496              : 
     497              :             let Some(g) = tsp.generation else {
     498              :                 // If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
     499              :                 // we only set generation_pageserver when setting generation.
     500              :                 return Err(DatabaseError::Logical(
     501              :                     "Generation should always be set after incrementing".to_string(),
     502              :                 ));
     503              :             };
     504              :             result.insert(tenant_shard_id, Generation::new(g as u32));
     505              :         }
     506              : 
     507              :         Ok(result)
     508              :     }
     509              : 
     510              :     /// Reconciler calls this immediately before attaching to a new pageserver, to acquire a unique, monotonically
     511              :     /// advancing generation number.  We also store the NodeId for which the generation was issued, so that in
     512              :     /// [`Self::re_attach`] we can do a bulk UPDATE on the generations for that node.
     513            0 :     pub(crate) async fn increment_generation(
     514            0 :         &self,
     515            0 :         tenant_shard_id: TenantShardId,
     516            0 :         node_id: NodeId,
     517            0 :     ) -> anyhow::Result<Generation> {
     518              :         use crate::schema::tenant_shards::dsl::*;
     519            0 :         let updated = self
     520            0 :             .with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| {
     521            0 :                 let updated = diesel::update(tenant_shards)
     522            0 :                     .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     523            0 :                     .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     524            0 :                     .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     525            0 :                     .set((
     526            0 :                         generation.eq(generation + 1),
     527            0 :                         generation_pageserver.eq(node_id.0 as i64),
     528            0 :                     ))
     529            0 :                     // TODO: only returning() the generation column
     530            0 :                     .returning(TenantShardPersistence::as_returning())
     531            0 :                     .get_result(conn)?;
     532              : 
     533            0 :                 Ok(updated)
     534            0 :             })
     535            0 :             .await?;
     536              : 
     537              :         // Generation is always non-null in the rseult: if the generation column had been NULL, then we
     538              :         // should have experienced an SQL Confilict error while executing a query that tries to increment it.
     539            0 :         debug_assert!(updated.generation.is_some());
     540            0 :         let Some(g) = updated.generation else {
     541            0 :             return Err(DatabaseError::Logical(
     542            0 :                 "Generation should always be set after incrementing".to_string(),
     543            0 :             )
     544            0 :             .into());
     545              :         };
     546              : 
     547            0 :         Ok(Generation::new(g as u32))
     548            0 :     }
     549              : 
     550              :     /// When we want to call out to the running shards for a tenant, e.g. during timeline CRUD operations,
     551              :     /// we need to know where the shard is attached, _and_ the generation, so that we can re-check the generation
     552              :     /// afterwards to confirm that our timeline CRUD operation is truly persistent (it must have happened in the
     553              :     /// latest generation)
     554              :     ///
     555              :     /// If the tenant doesn't exist, an empty vector is returned.
     556              :     ///
     557              :     /// Output is sorted by shard number
     558            0 :     pub(crate) async fn tenant_generations(
     559            0 :         &self,
     560            0 :         filter_tenant_id: TenantId,
     561            0 :     ) -> Result<Vec<ShardGenerationState>, DatabaseError> {
     562              :         use crate::schema::tenant_shards::dsl::*;
     563            0 :         let rows = self
     564            0 :             .with_measured_conn(DatabaseOperation::TenantGenerations, move |conn| {
     565            0 :                 let result = tenant_shards
     566            0 :                     .filter(tenant_id.eq(filter_tenant_id.to_string()))
     567            0 :                     .select(TenantShardPersistence::as_select())
     568            0 :                     .order(shard_number)
     569            0 :                     .load(conn)?;
     570            0 :                 Ok(result)
     571            0 :             })
     572            0 :             .await?;
     573              : 
     574            0 :         Ok(rows
     575            0 :             .into_iter()
     576            0 :             .map(|p| ShardGenerationState {
     577            0 :                 tenant_shard_id: p
     578            0 :                     .get_tenant_shard_id()
     579            0 :                     .expect("Corrupt tenant shard id in database"),
     580            0 :                 generation: p.generation.map(|g| Generation::new(g as u32)),
     581            0 :                 generation_pageserver: p.generation_pageserver.map(|n| NodeId(n as u64)),
     582            0 :             })
     583            0 :             .collect())
     584            0 :     }
     585              : 
     586              :     /// Read the generation number of specific tenant shards
     587              :     ///
     588              :     /// Output is unsorted.  Output may not include values for all inputs, if they are missing in the database.
     589            0 :     pub(crate) async fn shard_generations(
     590            0 :         &self,
     591            0 :         mut tenant_shard_ids: impl Iterator<Item = &TenantShardId>,
     592            0 :     ) -> Result<Vec<(TenantShardId, Option<Generation>)>, DatabaseError> {
     593            0 :         let mut rows = Vec::with_capacity(tenant_shard_ids.size_hint().0);
     594              : 
     595              :         // We will chunk our input to avoid composing arbitrarily long `IN` clauses.  Typically we are
     596              :         // called with a single digit number of IDs, but in principle we could be called with tens
     597              :         // of thousands (all the shards on one pageserver) from the generation validation API.
     598            0 :         loop {
     599            0 :             // A modest hardcoded chunk size to handle typical cases in a single query but never generate particularly
     600            0 :             // large query strings.
     601            0 :             let chunk_ids = tenant_shard_ids.by_ref().take(32);
     602            0 : 
     603            0 :             // Compose a comma separated list of tuples for matching on (tenant_id, shard_number, shard_count)
     604            0 :             let in_clause = chunk_ids
     605            0 :                 .map(|tsid| {
     606            0 :                     format!(
     607            0 :                         "('{}', {}, {})",
     608            0 :                         tsid.tenant_id, tsid.shard_number.0, tsid.shard_count.0
     609            0 :                     )
     610            0 :                 })
     611            0 :                 .join(",");
     612            0 : 
     613            0 :             // We are done when our iterator gives us nothing to filter on
     614            0 :             if in_clause.is_empty() {
     615            0 :                 break;
     616            0 :             }
     617              : 
     618            0 :             let chunk_rows = self
     619            0 :                 .with_measured_conn(DatabaseOperation::ShardGenerations, move |conn| {
     620              :                     // diesel doesn't support multi-column IN queries, so we compose raw SQL.  No escaping is required because
     621              :                     // the inputs are strongly typed and cannot carry any user-supplied raw string content.
     622            0 :                     let result : Vec<TenantShardPersistence> = diesel::sql_query(
     623            0 :                         format!("SELECT * from tenant_shards where (tenant_id, shard_number, shard_count) in ({in_clause});").as_str()
     624            0 :                     ).load(conn)?;
     625              : 
     626            0 :                     Ok(result)
     627            0 :                 })
     628            0 :                 .await?;
     629            0 :             rows.extend(chunk_rows.into_iter())
     630              :         }
     631              : 
     632            0 :         Ok(rows
     633            0 :             .into_iter()
     634            0 :             .map(|tsp| {
     635            0 :                 (
     636            0 :                     tsp.get_tenant_shard_id()
     637            0 :                         .expect("Bad tenant ID in database"),
     638            0 :                     tsp.generation.map(|g| Generation::new(g as u32)),
     639            0 :                 )
     640            0 :             })
     641            0 :             .collect())
     642            0 :     }
     643              : 
     644              :     #[allow(non_local_definitions)]
     645              :     /// For use when updating a persistent property of a tenant, such as its config or placement_policy.
     646              :     ///
     647              :     /// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
     648              :     /// API: use [`Self::increment_generation`] instead.  Setting the generation via this route is a one-time thing
     649              :     /// that we only do the first time a tenant is set to an attached policy via /location_config.
     650            0 :     pub(crate) async fn update_tenant_shard(
     651            0 :         &self,
     652            0 :         tenant: TenantFilter,
     653            0 :         input_placement_policy: Option<PlacementPolicy>,
     654            0 :         input_config: Option<TenantConfig>,
     655            0 :         input_generation: Option<Generation>,
     656            0 :         input_scheduling_policy: Option<ShardSchedulingPolicy>,
     657            0 :     ) -> DatabaseResult<()> {
     658              :         use crate::schema::tenant_shards::dsl::*;
     659              : 
     660            0 :         self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
     661            0 :             let query = match tenant {
     662            0 :                 TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards)
     663            0 :                     .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     664            0 :                     .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     665            0 :                     .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     666            0 :                     .into_boxed(),
     667            0 :                 TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
     668            0 :                     .filter(tenant_id.eq(input_tenant_id.to_string()))
     669            0 :                     .into_boxed(),
     670              :             };
     671              : 
     672              :             // Clear generation_pageserver if we are moving into a state where we won't have
     673              :             // any attached pageservers.
     674            0 :             let input_generation_pageserver = match input_placement_policy {
     675            0 :                 None | Some(PlacementPolicy::Attached(_)) => None,
     676            0 :                 Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) => Some(None),
     677              :             };
     678              : 
     679            0 :             #[derive(AsChangeset)]
     680              :             #[diesel(table_name = crate::schema::tenant_shards)]
     681              :             struct ShardUpdate {
     682              :                 generation: Option<i32>,
     683              :                 placement_policy: Option<String>,
     684              :                 config: Option<String>,
     685              :                 scheduling_policy: Option<String>,
     686              :                 generation_pageserver: Option<Option<i64>>,
     687              :             }
     688              : 
     689            0 :             let update = ShardUpdate {
     690            0 :                 generation: input_generation.map(|g| g.into().unwrap() as i32),
     691            0 :                 placement_policy: input_placement_policy
     692            0 :                     .as_ref()
     693            0 :                     .map(|p| serde_json::to_string(&p).unwrap()),
     694            0 :                 config: input_config
     695            0 :                     .as_ref()
     696            0 :                     .map(|c| serde_json::to_string(&c).unwrap()),
     697            0 :                 scheduling_policy: input_scheduling_policy
     698            0 :                     .map(|p| serde_json::to_string(&p).unwrap()),
     699            0 :                 generation_pageserver: input_generation_pageserver,
     700            0 :             };
     701            0 : 
     702            0 :             query.set(update).execute(conn)?;
     703              : 
     704            0 :             Ok(())
     705            0 :         })
     706            0 :         .await?;
     707              : 
     708            0 :         Ok(())
     709            0 :     }
     710              : 
     711              :     /// Note that passing None for a shard clears the preferred AZ (rather than leaving it unmodified)
     712            0 :     pub(crate) async fn set_tenant_shard_preferred_azs(
     713            0 :         &self,
     714            0 :         preferred_azs: Vec<(TenantShardId, Option<AvailabilityZone>)>,
     715            0 :     ) -> DatabaseResult<Vec<(TenantShardId, Option<AvailabilityZone>)>> {
     716              :         use crate::schema::tenant_shards::dsl::*;
     717              : 
     718            0 :         self.with_measured_conn(DatabaseOperation::SetPreferredAzs, move |conn| {
     719            0 :             let mut shards_updated = Vec::default();
     720              : 
     721            0 :             for (tenant_shard_id, preferred_az) in preferred_azs.iter() {
     722            0 :                 let updated = diesel::update(tenant_shards)
     723            0 :                     .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     724            0 :                     .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     725            0 :                     .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     726            0 :                     .set(preferred_az_id.eq(preferred_az.as_ref().map(|az| az.0.clone())))
     727            0 :                     .execute(conn)?;
     728              : 
     729            0 :                 if updated == 1 {
     730            0 :                     shards_updated.push((*tenant_shard_id, preferred_az.clone()));
     731            0 :                 }
     732              :             }
     733              : 
     734            0 :             Ok(shards_updated)
     735            0 :         })
     736            0 :         .await
     737            0 :     }
     738              : 
     739            0 :     pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
     740              :         use crate::schema::tenant_shards::dsl::*;
     741            0 :         self.with_measured_conn(DatabaseOperation::Detach, move |conn| {
     742            0 :             let updated = diesel::update(tenant_shards)
     743            0 :                 .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     744            0 :                 .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     745            0 :                 .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     746            0 :                 .set((
     747            0 :                     generation_pageserver.eq(Option::<i64>::None),
     748            0 :                     placement_policy.eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
     749            0 :                 ))
     750            0 :                 .execute(conn)?;
     751              : 
     752            0 :             Ok(updated)
     753            0 :         })
     754            0 :         .await?;
     755              : 
     756            0 :         Ok(())
     757            0 :     }
     758              : 
     759              :     // When we start shard splitting, we must durably mark the tenant so that
     760              :     // on restart, we know that we must go through recovery.
     761              :     //
     762              :     // We create the child shards here, so that they will be available for increment_generation calls
     763              :     // if some pageserver holding a child shard needs to restart before the overall tenant split is complete.
     764            0 :     pub(crate) async fn begin_shard_split(
     765            0 :         &self,
     766            0 :         old_shard_count: ShardCount,
     767            0 :         split_tenant_id: TenantId,
     768            0 :         parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
     769            0 :     ) -> DatabaseResult<()> {
     770              :         use crate::schema::tenant_shards::dsl::*;
     771            0 :         self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| -> DatabaseResult<()> {
     772              :             // Mark parent shards as splitting
     773              : 
     774            0 :             let updated = diesel::update(tenant_shards)
     775            0 :                 .filter(tenant_id.eq(split_tenant_id.to_string()))
     776            0 :                 .filter(shard_count.eq(old_shard_count.literal() as i32))
     777            0 :                 .set((splitting.eq(1),))
     778            0 :                 .execute(conn)?;
     779            0 :             if u8::try_from(updated)
     780            0 :                 .map_err(|_| DatabaseError::Logical(
     781            0 :                     format!("Overflow existing shard count {} while splitting", updated))
     782            0 :                 )? != old_shard_count.count() {
     783              :                 // Perhaps a deletion or another split raced with this attempt to split, mutating
     784              :                 // the parent shards that we intend to split. In this case the split request should fail.
     785            0 :                 return Err(DatabaseError::Logical(
     786            0 :                     format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
     787            0 :                 ));
     788            0 :             }
     789            0 : 
     790            0 :             // FIXME: spurious clone to sidestep closure move rules
     791            0 :             let parent_to_children = parent_to_children.clone();
     792              : 
     793              :             // Insert child shards
     794            0 :             for (parent_shard_id, children) in parent_to_children {
     795            0 :                 let mut parent = crate::schema::tenant_shards::table
     796            0 :                     .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
     797            0 :                     .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
     798            0 :                     .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
     799            0 :                     .load::<TenantShardPersistence>(conn)?;
     800            0 :                 let parent = if parent.len() != 1 {
     801            0 :                     return Err(DatabaseError::Logical(format!(
     802            0 :                         "Parent shard {parent_shard_id} not found"
     803            0 :                     )));
     804              :                 } else {
     805            0 :                     parent.pop().unwrap()
     806              :                 };
     807            0 :                 for mut shard in children {
     808              :                     // Carry the parent's generation into the child
     809            0 :                     shard.generation = parent.generation;
     810            0 : 
     811            0 :                     debug_assert!(shard.splitting == SplitState::Splitting);
     812            0 :                     diesel::insert_into(tenant_shards)
     813            0 :                         .values(shard)
     814            0 :                         .execute(conn)?;
     815              :                 }
     816              :             }
     817              : 
     818            0 :             Ok(())
     819            0 :         })
     820            0 :         .await
     821            0 :     }
     822              : 
     823              :     // When we finish shard splitting, we must atomically clean up the old shards
     824              :     // and insert the new shards, and clear the splitting marker.
     825            0 :     pub(crate) async fn complete_shard_split(
     826            0 :         &self,
     827            0 :         split_tenant_id: TenantId,
     828            0 :         old_shard_count: ShardCount,
     829            0 :     ) -> DatabaseResult<()> {
     830              :         use crate::schema::tenant_shards::dsl::*;
     831            0 :         self.with_measured_conn(
     832            0 :             DatabaseOperation::CompleteShardSplit,
     833            0 :             move |conn| -> DatabaseResult<()> {
     834            0 :                 // Drop parent shards
     835            0 :                 diesel::delete(tenant_shards)
     836            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     837            0 :                     .filter(shard_count.eq(old_shard_count.literal() as i32))
     838            0 :                     .execute(conn)?;
     839              : 
     840              :                 // Clear sharding flag
     841            0 :                 let updated = diesel::update(tenant_shards)
     842            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     843            0 :                     .set((splitting.eq(0),))
     844            0 :                     .execute(conn)?;
     845            0 :                 debug_assert!(updated > 0);
     846              : 
     847            0 :                 Ok(())
     848            0 :             },
     849            0 :         )
     850            0 :         .await
     851            0 :     }
     852              : 
     853              :     /// Used when the remote part of a shard split failed: we will revert the database state to have only
     854              :     /// the parent shards, with SplitState::Idle.
     855            0 :     pub(crate) async fn abort_shard_split(
     856            0 :         &self,
     857            0 :         split_tenant_id: TenantId,
     858            0 :         new_shard_count: ShardCount,
     859            0 :     ) -> DatabaseResult<AbortShardSplitStatus> {
     860              :         use crate::schema::tenant_shards::dsl::*;
     861            0 :         self.with_measured_conn(
     862            0 :             DatabaseOperation::AbortShardSplit,
     863            0 :             move |conn| -> DatabaseResult<AbortShardSplitStatus> {
     864              :                 // Clear the splitting state on parent shards
     865            0 :                 let updated = diesel::update(tenant_shards)
     866            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     867            0 :                     .filter(shard_count.ne(new_shard_count.literal() as i32))
     868            0 :                     .set((splitting.eq(0),))
     869            0 :                     .execute(conn)?;
     870              : 
     871              :                 // Parent shards are already gone: we cannot abort.
     872            0 :                 if updated == 0 {
     873            0 :                     return Ok(AbortShardSplitStatus::Complete);
     874            0 :                 }
     875            0 : 
     876            0 :                 // Sanity check: if parent shards were present, their cardinality should
     877            0 :                 // be less than the number of child shards.
     878            0 :                 if updated >= new_shard_count.count() as usize {
     879            0 :                     return Err(DatabaseError::Logical(format!(
     880            0 :                         "Unexpected parent shard count {updated} while aborting split to \
     881            0 :                             count {new_shard_count:?} on tenant {split_tenant_id}"
     882            0 :                     )));
     883            0 :                 }
     884            0 : 
     885            0 :                 // Erase child shards
     886            0 :                 diesel::delete(tenant_shards)
     887            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     888            0 :                     .filter(shard_count.eq(new_shard_count.literal() as i32))
     889            0 :                     .execute(conn)?;
     890              : 
     891            0 :                 Ok(AbortShardSplitStatus::Aborted)
     892            0 :             },
     893            0 :         )
     894            0 :         .await
     895            0 :     }
     896              : 
     897              :     /// Stores all the latest metadata health updates durably. Updates existing entry on conflict.
     898              :     ///
     899              :     /// **Correctness:** `metadata_health_updates` should all belong the tenant shards managed by the storage controller.
     900              :     #[allow(dead_code)]
     901            0 :     pub(crate) async fn update_metadata_health_records(
     902            0 :         &self,
     903            0 :         healthy_records: Vec<MetadataHealthPersistence>,
     904            0 :         unhealthy_records: Vec<MetadataHealthPersistence>,
     905            0 :         now: chrono::DateTime<chrono::Utc>,
     906            0 :     ) -> DatabaseResult<()> {
     907              :         use crate::schema::metadata_health::dsl::*;
     908              : 
     909            0 :         self.with_measured_conn(
     910            0 :             DatabaseOperation::UpdateMetadataHealth,
     911            0 :             move |conn| -> DatabaseResult<_> {
     912            0 :                 diesel::insert_into(metadata_health)
     913            0 :                     .values(&healthy_records)
     914            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
     915            0 :                     .do_update()
     916            0 :                     .set((healthy.eq(true), last_scrubbed_at.eq(now)))
     917            0 :                     .execute(conn)?;
     918              : 
     919            0 :                 diesel::insert_into(metadata_health)
     920            0 :                     .values(&unhealthy_records)
     921            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
     922            0 :                     .do_update()
     923            0 :                     .set((healthy.eq(false), last_scrubbed_at.eq(now)))
     924            0 :                     .execute(conn)?;
     925            0 :                 Ok(())
     926            0 :             },
     927            0 :         )
     928            0 :         .await
     929            0 :     }
     930              : 
     931              :     /// Lists all the metadata health records.
     932              :     #[allow(dead_code)]
     933            0 :     pub(crate) async fn list_metadata_health_records(
     934            0 :         &self,
     935            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
     936            0 :         self.with_measured_conn(
     937            0 :             DatabaseOperation::ListMetadataHealth,
     938            0 :             move |conn| -> DatabaseResult<_> {
     939            0 :                 Ok(
     940            0 :                     crate::schema::metadata_health::table
     941            0 :                         .load::<MetadataHealthPersistence>(conn)?,
     942              :                 )
     943            0 :             },
     944            0 :         )
     945            0 :         .await
     946            0 :     }
     947              : 
     948              :     /// Lists all the metadata health records that is unhealthy.
     949              :     #[allow(dead_code)]
     950            0 :     pub(crate) async fn list_unhealthy_metadata_health_records(
     951            0 :         &self,
     952            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
     953              :         use crate::schema::metadata_health::dsl::*;
     954            0 :         self.with_measured_conn(
     955            0 :             DatabaseOperation::ListMetadataHealthUnhealthy,
     956            0 :             move |conn| -> DatabaseResult<_> {
     957            0 :                 Ok(crate::schema::metadata_health::table
     958            0 :                     .filter(healthy.eq(false))
     959            0 :                     .load::<MetadataHealthPersistence>(conn)?)
     960            0 :             },
     961            0 :         )
     962            0 :         .await
     963            0 :     }
     964              : 
     965              :     /// Lists all the metadata health records that have not been updated since an `earlier` time.
     966              :     #[allow(dead_code)]
     967            0 :     pub(crate) async fn list_outdated_metadata_health_records(
     968            0 :         &self,
     969            0 :         earlier: chrono::DateTime<chrono::Utc>,
     970            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
     971              :         use crate::schema::metadata_health::dsl::*;
     972              : 
     973            0 :         self.with_measured_conn(
     974            0 :             DatabaseOperation::ListMetadataHealthOutdated,
     975            0 :             move |conn| -> DatabaseResult<_> {
     976            0 :                 let query = metadata_health.filter(last_scrubbed_at.lt(earlier));
     977            0 :                 let res = query.load::<MetadataHealthPersistence>(conn)?;
     978              : 
     979            0 :                 Ok(res)
     980            0 :             },
     981            0 :         )
     982            0 :         .await
     983            0 :     }
     984              : 
     985              :     /// Get the current entry from the `leader` table if one exists.
     986              :     /// It is an error for the table to contain more than one entry.
     987            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
     988            0 :         let mut leader: Vec<ControllerPersistence> = self
     989            0 :             .with_measured_conn(
     990            0 :                 DatabaseOperation::GetLeader,
     991            0 :                 move |conn| -> DatabaseResult<_> {
     992            0 :                     Ok(crate::schema::controllers::table.load::<ControllerPersistence>(conn)?)
     993            0 :                 },
     994            0 :             )
     995            0 :             .await?;
     996              : 
     997            0 :         if leader.len() > 1 {
     998            0 :             return Err(DatabaseError::Logical(format!(
     999            0 :                 "More than one entry present in the leader table: {leader:?}"
    1000            0 :             )));
    1001            0 :         }
    1002            0 : 
    1003            0 :         Ok(leader.pop())
    1004            0 :     }
    1005              : 
    1006              :     /// Update the new leader with compare-exchange semantics. If `prev` does not
    1007              :     /// match the current leader entry, then the update is treated as a failure.
    1008              :     /// When `prev` is not specified, the update is forced.
    1009            0 :     pub(crate) async fn update_leader(
    1010            0 :         &self,
    1011            0 :         prev: Option<ControllerPersistence>,
    1012            0 :         new: ControllerPersistence,
    1013            0 :     ) -> DatabaseResult<()> {
    1014              :         use crate::schema::controllers::dsl::*;
    1015              : 
    1016            0 :         let updated = self
    1017            0 :             .with_measured_conn(
    1018            0 :                 DatabaseOperation::UpdateLeader,
    1019            0 :                 move |conn| -> DatabaseResult<usize> {
    1020            0 :                     let updated = match &prev {
    1021            0 :                         Some(prev) => diesel::update(controllers)
    1022            0 :                             .filter(address.eq(prev.address.clone()))
    1023            0 :                             .filter(started_at.eq(prev.started_at))
    1024            0 :                             .set((
    1025            0 :                                 address.eq(new.address.clone()),
    1026            0 :                                 started_at.eq(new.started_at),
    1027            0 :                             ))
    1028            0 :                             .execute(conn)?,
    1029            0 :                         None => diesel::insert_into(controllers)
    1030            0 :                             .values(new.clone())
    1031            0 :                             .execute(conn)?,
    1032              :                     };
    1033              : 
    1034            0 :                     Ok(updated)
    1035            0 :                 },
    1036            0 :             )
    1037            0 :             .await?;
    1038              : 
    1039            0 :         if updated == 0 {
    1040            0 :             return Err(DatabaseError::Logical(
    1041            0 :                 "Leader table update failed".to_string(),
    1042            0 :             ));
    1043            0 :         }
    1044            0 : 
    1045            0 :         Ok(())
    1046            0 :     }
    1047              : 
    1048              :     /// At startup, populate the list of nodes which our shards may be placed on
    1049            0 :     pub(crate) async fn list_safekeepers(&self) -> DatabaseResult<Vec<SafekeeperPersistence>> {
    1050            0 :         let safekeepers: Vec<SafekeeperPersistence> = self
    1051            0 :             .with_measured_conn(
    1052            0 :                 DatabaseOperation::ListNodes,
    1053            0 :                 move |conn| -> DatabaseResult<_> {
    1054            0 :                     Ok(crate::schema::safekeepers::table.load::<SafekeeperPersistence>(conn)?)
    1055            0 :                 },
    1056            0 :             )
    1057            0 :             .await?;
    1058              : 
    1059            0 :         tracing::info!("list_safekeepers: loaded {} nodes", safekeepers.len());
    1060              : 
    1061            0 :         Ok(safekeepers)
    1062            0 :     }
    1063              : 
    1064            0 :     pub(crate) async fn safekeeper_get(
    1065            0 :         &self,
    1066            0 :         id: i64,
    1067            0 :     ) -> Result<SafekeeperPersistence, DatabaseError> {
    1068              :         use crate::schema::safekeepers::dsl::{id as id_column, safekeepers};
    1069            0 :         self.with_conn(move |conn| -> DatabaseResult<SafekeeperPersistence> {
    1070            0 :             Ok(safekeepers
    1071            0 :                 .filter(id_column.eq(&id))
    1072            0 :                 .select(SafekeeperPersistence::as_select())
    1073            0 :                 .get_result(conn)?)
    1074            0 :         })
    1075            0 :         .await
    1076            0 :     }
    1077              : 
    1078            0 :     pub(crate) async fn safekeeper_upsert(
    1079            0 :         &self,
    1080            0 :         record: SafekeeperUpsert,
    1081            0 :     ) -> Result<(), DatabaseError> {
    1082              :         use crate::schema::safekeepers::dsl::*;
    1083              : 
    1084            0 :         self.with_conn(move |conn| -> DatabaseResult<()> {
    1085            0 :             let bind = record
    1086            0 :                 .as_insert_or_update()
    1087            0 :                 .map_err(|e| DatabaseError::Logical(format!("{e}")))?;
    1088              : 
    1089            0 :             let inserted_updated = diesel::insert_into(safekeepers)
    1090            0 :                 .values(&bind)
    1091            0 :                 .on_conflict(id)
    1092            0 :                 .do_update()
    1093            0 :                 .set(&bind)
    1094            0 :                 .execute(conn)?;
    1095              : 
    1096            0 :             if inserted_updated != 1 {
    1097            0 :                 return Err(DatabaseError::Logical(format!(
    1098            0 :                     "unexpected number of rows ({})",
    1099            0 :                     inserted_updated
    1100            0 :                 )));
    1101            0 :             }
    1102            0 : 
    1103            0 :             Ok(())
    1104            0 :         })
    1105            0 :         .await
    1106            0 :     }
    1107              : 
    1108            0 :     pub(crate) async fn set_safekeeper_scheduling_policy(
    1109            0 :         &self,
    1110            0 :         id_: i64,
    1111            0 :         scheduling_policy_: SkSchedulingPolicy,
    1112            0 :     ) -> Result<(), DatabaseError> {
    1113              :         use crate::schema::safekeepers::dsl::*;
    1114              : 
    1115            0 :         self.with_conn(move |conn| -> DatabaseResult<()> {
    1116            0 :             #[derive(Insertable, AsChangeset)]
    1117              :             #[diesel(table_name = crate::schema::safekeepers)]
    1118              :             struct UpdateSkSchedulingPolicy<'a> {
    1119              :                 id: i64,
    1120              :                 scheduling_policy: &'a str,
    1121              :             }
    1122            0 :             let scheduling_policy_ = String::from(scheduling_policy_);
    1123              : 
    1124            0 :             let rows_affected = diesel::update(safekeepers.filter(id.eq(id_)))
    1125            0 :                 .set(scheduling_policy.eq(scheduling_policy_))
    1126            0 :                 .execute(conn)?;
    1127              : 
    1128            0 :             if rows_affected != 1 {
    1129            0 :                 return Err(DatabaseError::Logical(format!(
    1130            0 :                     "unexpected number of rows ({rows_affected})",
    1131            0 :                 )));
    1132            0 :             }
    1133            0 : 
    1134            0 :             Ok(())
    1135            0 :         })
    1136            0 :         .await
    1137            0 :     }
    1138              : }
    1139              : 
    1140              : /// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably
    1141              : #[derive(
    1142            0 :     QueryableByName, Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq,
    1143              : )]
    1144              : #[diesel(table_name = crate::schema::tenant_shards)]
    1145              : pub(crate) struct TenantShardPersistence {
    1146              :     #[serde(default)]
    1147              :     pub(crate) tenant_id: String,
    1148              :     #[serde(default)]
    1149              :     pub(crate) shard_number: i32,
    1150              :     #[serde(default)]
    1151              :     pub(crate) shard_count: i32,
    1152              :     #[serde(default)]
    1153              :     pub(crate) shard_stripe_size: i32,
    1154              : 
    1155              :     // Latest generation number: next time we attach, increment this
    1156              :     // and use the incremented number when attaching.
    1157              :     //
    1158              :     // Generation is only None when first onboarding a tenant, where it may
    1159              :     // be in PlacementPolicy::Secondary and therefore have no valid generation state.
    1160              :     pub(crate) generation: Option<i32>,
    1161              : 
    1162              :     // Currently attached pageserver
    1163              :     #[serde(rename = "pageserver")]
    1164              :     pub(crate) generation_pageserver: Option<i64>,
    1165              : 
    1166              :     #[serde(default)]
    1167              :     pub(crate) placement_policy: String,
    1168              :     #[serde(default)]
    1169              :     pub(crate) splitting: SplitState,
    1170              :     #[serde(default)]
    1171              :     pub(crate) config: String,
    1172              :     #[serde(default)]
    1173              :     pub(crate) scheduling_policy: String,
    1174              : 
    1175              :     // Hint that we should attempt to schedule this tenant shard the given
    1176              :     // availability zone in order to minimise the chances of cross-AZ communication
    1177              :     // with compute.
    1178              :     pub(crate) preferred_az_id: Option<String>,
    1179              : }
    1180              : 
    1181              : impl TenantShardPersistence {
    1182            0 :     pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
    1183            0 :         if self.shard_count == 0 {
    1184            0 :             Ok(ShardIdentity::unsharded())
    1185              :         } else {
    1186            0 :             Ok(ShardIdentity::new(
    1187            0 :                 ShardNumber(self.shard_number as u8),
    1188            0 :                 ShardCount::new(self.shard_count as u8),
    1189            0 :                 ShardStripeSize(self.shard_stripe_size as u32),
    1190            0 :             )?)
    1191              :         }
    1192            0 :     }
    1193              : 
    1194            0 :     pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
    1195            0 :         Ok(TenantShardId {
    1196            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    1197            0 :             shard_number: ShardNumber(self.shard_number as u8),
    1198            0 :             shard_count: ShardCount::new(self.shard_count as u8),
    1199              :         })
    1200            0 :     }
    1201              : }
    1202              : 
    1203              : /// Parts of [`crate::node::Node`] that are stored durably
    1204            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)]
    1205              : #[diesel(table_name = crate::schema::nodes)]
    1206              : pub(crate) struct NodePersistence {
    1207              :     pub(crate) node_id: i64,
    1208              :     pub(crate) scheduling_policy: String,
    1209              :     pub(crate) listen_http_addr: String,
    1210              :     pub(crate) listen_http_port: i32,
    1211              :     pub(crate) listen_pg_addr: String,
    1212              :     pub(crate) listen_pg_port: i32,
    1213              :     pub(crate) availability_zone_id: String,
    1214              : }
    1215              : 
    1216              : /// Tenant metadata health status that are stored durably.
    1217            0 : #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
    1218              : #[diesel(table_name = crate::schema::metadata_health)]
    1219              : pub(crate) struct MetadataHealthPersistence {
    1220              :     #[serde(default)]
    1221              :     pub(crate) tenant_id: String,
    1222              :     #[serde(default)]
    1223              :     pub(crate) shard_number: i32,
    1224              :     #[serde(default)]
    1225              :     pub(crate) shard_count: i32,
    1226              : 
    1227              :     pub(crate) healthy: bool,
    1228              :     pub(crate) last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    1229              : }
    1230              : 
    1231              : impl MetadataHealthPersistence {
    1232            0 :     pub fn new(
    1233            0 :         tenant_shard_id: TenantShardId,
    1234            0 :         healthy: bool,
    1235            0 :         last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    1236            0 :     ) -> Self {
    1237            0 :         let tenant_id = tenant_shard_id.tenant_id.to_string();
    1238            0 :         let shard_number = tenant_shard_id.shard_number.0 as i32;
    1239            0 :         let shard_count = tenant_shard_id.shard_count.literal() as i32;
    1240            0 : 
    1241            0 :         MetadataHealthPersistence {
    1242            0 :             tenant_id,
    1243            0 :             shard_number,
    1244            0 :             shard_count,
    1245            0 :             healthy,
    1246            0 :             last_scrubbed_at,
    1247            0 :         }
    1248            0 :     }
    1249              : 
    1250              :     #[allow(dead_code)]
    1251            0 :     pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
    1252            0 :         Ok(TenantShardId {
    1253            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    1254            0 :             shard_number: ShardNumber(self.shard_number as u8),
    1255            0 :             shard_count: ShardCount::new(self.shard_count as u8),
    1256              :         })
    1257            0 :     }
    1258              : }
    1259              : 
    1260              : impl From<MetadataHealthPersistence> for MetadataHealthRecord {
    1261            0 :     fn from(value: MetadataHealthPersistence) -> Self {
    1262            0 :         MetadataHealthRecord {
    1263            0 :             tenant_shard_id: value
    1264            0 :                 .get_tenant_shard_id()
    1265            0 :                 .expect("stored tenant id should be valid"),
    1266            0 :             healthy: value.healthy,
    1267            0 :             last_scrubbed_at: value.last_scrubbed_at,
    1268            0 :         }
    1269            0 :     }
    1270              : }
    1271              : 
    1272              : #[derive(
    1273            0 :     Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Debug, Clone,
    1274              : )]
    1275              : #[diesel(table_name = crate::schema::controllers)]
    1276              : pub(crate) struct ControllerPersistence {
    1277              :     pub(crate) address: String,
    1278              :     pub(crate) started_at: chrono::DateTime<chrono::Utc>,
    1279              : }
    1280              : 
    1281              : // What we store in the database
    1282            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Eq, PartialEq, Debug, Clone)]
    1283              : #[diesel(table_name = crate::schema::safekeepers)]
    1284              : pub(crate) struct SafekeeperPersistence {
    1285              :     pub(crate) id: i64,
    1286              :     pub(crate) region_id: String,
    1287              :     /// 1 is special, it means just created (not currently posted to storcon).
    1288              :     /// Zero or negative is not really expected.
    1289              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    1290              :     pub(crate) version: i64,
    1291              :     pub(crate) host: String,
    1292              :     pub(crate) port: i32,
    1293              :     pub(crate) http_port: i32,
    1294              :     pub(crate) availability_zone_id: String,
    1295              :     pub(crate) scheduling_policy: String,
    1296              : }
    1297              : 
    1298              : impl SafekeeperPersistence {
    1299            0 :     pub(crate) fn as_describe_response(&self) -> Result<SafekeeperDescribeResponse, DatabaseError> {
    1300            0 :         let scheduling_policy =
    1301            0 :             SkSchedulingPolicy::from_str(&self.scheduling_policy).map_err(|e| {
    1302            0 :                 DatabaseError::Logical(format!("can't construct SkSchedulingPolicy: {e:?}"))
    1303            0 :             })?;
    1304            0 :         Ok(SafekeeperDescribeResponse {
    1305            0 :             id: NodeId(self.id as u64),
    1306            0 :             region_id: self.region_id.clone(),
    1307            0 :             version: self.version,
    1308            0 :             host: self.host.clone(),
    1309            0 :             port: self.port,
    1310            0 :             http_port: self.http_port,
    1311            0 :             availability_zone_id: self.availability_zone_id.clone(),
    1312            0 :             scheduling_policy,
    1313            0 :         })
    1314            0 :     }
    1315              : }
    1316              : 
    1317              : /// What we expect from the upsert http api
    1318            0 : #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)]
    1319              : pub(crate) struct SafekeeperUpsert {
    1320              :     pub(crate) id: i64,
    1321              :     pub(crate) region_id: String,
    1322              :     /// 1 is special, it means just created (not currently posted to storcon).
    1323              :     /// Zero or negative is not really expected.
    1324              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    1325              :     pub(crate) version: i64,
    1326              :     pub(crate) host: String,
    1327              :     pub(crate) port: i32,
    1328              :     /// The active flag will not be stored in the database and will be ignored.
    1329              :     pub(crate) active: Option<bool>,
    1330              :     pub(crate) http_port: i32,
    1331              :     pub(crate) availability_zone_id: String,
    1332              : }
    1333              : 
    1334              : impl SafekeeperUpsert {
    1335            0 :     fn as_insert_or_update(&self) -> anyhow::Result<InsertUpdateSafekeeper<'_>> {
    1336            0 :         if self.version < 0 {
    1337            0 :             anyhow::bail!("negative version: {}", self.version);
    1338            0 :         }
    1339            0 :         Ok(InsertUpdateSafekeeper {
    1340            0 :             id: self.id,
    1341            0 :             region_id: &self.region_id,
    1342            0 :             version: self.version,
    1343            0 :             host: &self.host,
    1344            0 :             port: self.port,
    1345            0 :             http_port: self.http_port,
    1346            0 :             availability_zone_id: &self.availability_zone_id,
    1347            0 :             // None means a wish to not update this column. We expose abilities to update it via other means.
    1348            0 :             scheduling_policy: None,
    1349            0 :         })
    1350            0 :     }
    1351              : }
    1352              : 
    1353            0 : #[derive(Insertable, AsChangeset)]
    1354              : #[diesel(table_name = crate::schema::safekeepers)]
    1355              : struct InsertUpdateSafekeeper<'a> {
    1356              :     id: i64,
    1357              :     region_id: &'a str,
    1358              :     version: i64,
    1359              :     host: &'a str,
    1360              :     port: i32,
    1361              :     http_port: i32,
    1362              :     availability_zone_id: &'a str,
    1363              :     scheduling_policy: Option<&'a str>,
    1364              : }
        

Generated by: LCOV version 2.1-beta