LCOV - code coverage report
Current view: top level - storage_controller/src - persistence.rs (source / functions) Coverage Total Hit
Test: b9728233c33232dfae45024a493738ef141ccd5d.info Lines: 0.0 % 774 0
Test Date: 2025-01-10 20:41:15 Functions: 0.0 % 337 0

            Line data    Source code
       1              : pub(crate) mod split_state;
       2              : use std::collections::HashMap;
       3              : use std::str::FromStr;
       4              : use std::time::Duration;
       5              : use std::time::Instant;
       6              : 
       7              : use self::split_state::SplitState;
       8              : use diesel::pg::PgConnection;
       9              : use diesel::prelude::*;
      10              : use diesel::Connection;
      11              : use itertools::Itertools;
      12              : use pageserver_api::controller_api::AvailabilityZone;
      13              : use pageserver_api::controller_api::MetadataHealthRecord;
      14              : use pageserver_api::controller_api::SafekeeperDescribeResponse;
      15              : use pageserver_api::controller_api::ShardSchedulingPolicy;
      16              : use pageserver_api::controller_api::SkSchedulingPolicy;
      17              : use pageserver_api::controller_api::{NodeSchedulingPolicy, PlacementPolicy};
      18              : use pageserver_api::models::TenantConfig;
      19              : use pageserver_api::shard::ShardConfigError;
      20              : use pageserver_api::shard::ShardIdentity;
      21              : use pageserver_api::shard::ShardStripeSize;
      22              : use pageserver_api::shard::{ShardCount, ShardNumber, TenantShardId};
      23              : use serde::{Deserialize, Serialize};
      24              : use utils::generation::Generation;
      25              : use utils::id::{NodeId, TenantId};
      26              : 
      27              : use crate::metrics::{
      28              :     DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
      29              : };
      30              : use crate::node::Node;
      31              : 
      32              : use diesel_migrations::{embed_migrations, EmbeddedMigrations};
      33              : const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
      34              : 
      35              : /// ## What do we store?
      36              : ///
      37              : /// The storage controller service does not store most of its state durably.
      38              : ///
      39              : /// The essential things to store durably are:
      40              : /// - generation numbers, as these must always advance monotonically to ensure data safety.
      41              : /// - Tenant's PlacementPolicy and TenantConfig, as the source of truth for these is something external.
      42              : /// - Node's scheduling policies, as the source of truth for these is something external.
      43              : ///
      44              : /// Other things we store durably as an implementation detail:
      45              : /// - Node's host/port: this could be avoided it we made nodes emit a self-registering heartbeat,
      46              : ///   but it is operationally simpler to make this service the authority for which nodes
      47              : ///   it talks to.
      48              : ///
      49              : /// ## Performance/efficiency
      50              : ///
      51              : /// The storage controller service does not go via the database for most things: there are
      52              : /// a couple of places where we must, and where efficiency matters:
      53              : /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
      54              : ///   before it can attach a tenant, so this acts as a bound on how fast things like
      55              : ///   failover can happen.
      56              : /// - Pageserver re-attach: we will increment many shards' generations when this happens,
      57              : ///   so it is important to avoid e.g. issuing O(N) queries.
      58              : ///
      59              : /// Database calls relating to nodes have low performance requirements, as they are very rarely
      60              : /// updated, and reads of nodes are always from memory, not the database.  We only require that
      61              : /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
      62              : pub struct Persistence {
      63              :     connection_pool: diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<PgConnection>>,
      64              : }
      65              : 
      66              : /// Legacy format, for use in JSON compat objects in test environment
      67            0 : #[derive(Serialize, Deserialize)]
      68              : struct JsonPersistence {
      69              :     tenants: HashMap<TenantShardId, TenantShardPersistence>,
      70              : }
      71              : 
      72              : #[derive(thiserror::Error, Debug)]
      73              : pub(crate) enum DatabaseError {
      74              :     #[error(transparent)]
      75              :     Query(#[from] diesel::result::Error),
      76              :     #[error(transparent)]
      77              :     Connection(#[from] diesel::result::ConnectionError),
      78              :     #[error(transparent)]
      79              :     ConnectionPool(#[from] r2d2::Error),
      80              :     #[error("Logical error: {0}")]
      81              :     Logical(String),
      82              :     #[error("Migration error: {0}")]
      83              :     Migration(String),
      84              : }
      85              : 
      86              : #[derive(measured::FixedCardinalityLabel, Copy, Clone)]
      87              : pub(crate) enum DatabaseOperation {
      88              :     InsertNode,
      89              :     UpdateNode,
      90              :     DeleteNode,
      91              :     ListNodes,
      92              :     BeginShardSplit,
      93              :     CompleteShardSplit,
      94              :     AbortShardSplit,
      95              :     Detach,
      96              :     ReAttach,
      97              :     IncrementGeneration,
      98              :     TenantGenerations,
      99              :     ShardGenerations,
     100              :     ListTenantShards,
     101              :     LoadTenant,
     102              :     InsertTenantShards,
     103              :     UpdateTenantShard,
     104              :     DeleteTenant,
     105              :     UpdateTenantConfig,
     106              :     UpdateMetadataHealth,
     107              :     ListMetadataHealth,
     108              :     ListMetadataHealthUnhealthy,
     109              :     ListMetadataHealthOutdated,
     110              :     ListSafekeepers,
     111              :     GetLeader,
     112              :     UpdateLeader,
     113              :     SetPreferredAzs,
     114              : }
     115              : 
     116              : #[must_use]
     117              : pub(crate) enum AbortShardSplitStatus {
     118              :     /// We aborted the split in the database by reverting to the parent shards
     119              :     Aborted,
     120              :     /// The split had already been persisted.
     121              :     Complete,
     122              : }
     123              : 
     124              : pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
     125              : 
     126              : /// Some methods can operate on either a whole tenant or a single shard
     127              : pub(crate) enum TenantFilter {
     128              :     Tenant(TenantId),
     129              :     Shard(TenantShardId),
     130              : }
     131              : 
     132              : /// Represents the results of looking up generation+pageserver for the shards of a tenant
     133              : pub(crate) struct ShardGenerationState {
     134              :     pub(crate) tenant_shard_id: TenantShardId,
     135              :     pub(crate) generation: Option<Generation>,
     136              :     pub(crate) generation_pageserver: Option<NodeId>,
     137              : }
     138              : 
     139              : impl Persistence {
     140              :     // The default postgres connection limit is 100.  We use up to 99, to leave one free for a human admin under
     141              :     // normal circumstances.  This assumes we have exclusive use of the database cluster to which we connect.
     142              :     pub const MAX_CONNECTIONS: u32 = 99;
     143              : 
     144              :     // We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
     145              :     const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
     146              :     const MAX_CONNECTION_LIFETIME: Duration = Duration::from_secs(60);
     147              : 
     148            0 :     pub fn new(database_url: String) -> Self {
     149            0 :         let manager = diesel::r2d2::ConnectionManager::<PgConnection>::new(database_url);
     150            0 : 
     151            0 :         // We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
     152            0 :         // to execute queries (database queries are not generally on latency-sensitive paths).
     153            0 :         let connection_pool = diesel::r2d2::Pool::builder()
     154            0 :             .max_size(Self::MAX_CONNECTIONS)
     155            0 :             .max_lifetime(Some(Self::MAX_CONNECTION_LIFETIME))
     156            0 :             .idle_timeout(Some(Self::IDLE_CONNECTION_TIMEOUT))
     157            0 :             // Always keep at least one connection ready to go
     158            0 :             .min_idle(Some(1))
     159            0 :             .test_on_check_out(true)
     160            0 :             .build(manager)
     161            0 :             .expect("Could not build connection pool");
     162            0 : 
     163            0 :         Self { connection_pool }
     164            0 :     }
     165              : 
     166              :     /// A helper for use during startup, where we would like to tolerate concurrent restarts of the
     167              :     /// database and the storage controller, therefore the database might not be available right away
     168            0 :     pub async fn await_connection(
     169            0 :         database_url: &str,
     170            0 :         timeout: Duration,
     171            0 :     ) -> Result<(), diesel::ConnectionError> {
     172            0 :         let started_at = Instant::now();
     173              :         loop {
     174            0 :             match PgConnection::establish(database_url) {
     175              :                 Ok(_) => {
     176            0 :                     tracing::info!("Connected to database.");
     177            0 :                     return Ok(());
     178              :                 }
     179            0 :                 Err(e) => {
     180            0 :                     if started_at.elapsed() > timeout {
     181            0 :                         return Err(e);
     182              :                     } else {
     183            0 :                         tracing::info!("Database not yet available, waiting... ({e})");
     184            0 :                         tokio::time::sleep(Duration::from_millis(100)).await;
     185              :                     }
     186              :                 }
     187              :             }
     188              :         }
     189            0 :     }
     190              : 
     191              :     /// Execute the diesel migrations that are built into this binary
     192            0 :     pub(crate) async fn migration_run(&self) -> DatabaseResult<()> {
     193              :         use diesel_migrations::{HarnessWithOutput, MigrationHarness};
     194              : 
     195            0 :         self.with_conn(move |conn| -> DatabaseResult<()> {
     196            0 :             HarnessWithOutput::write_to_stdout(conn)
     197            0 :                 .run_pending_migrations(MIGRATIONS)
     198            0 :                 .map(|_| ())
     199            0 :                 .map_err(|e| DatabaseError::Migration(e.to_string()))
     200            0 :         })
     201            0 :         .await
     202            0 :     }
     203              : 
     204              :     /// Wraps `with_conn` in order to collect latency and error metrics
     205            0 :     async fn with_measured_conn<F, R>(&self, op: DatabaseOperation, func: F) -> DatabaseResult<R>
     206            0 :     where
     207            0 :         F: Fn(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
     208            0 :         R: Send + 'static,
     209            0 :     {
     210            0 :         let latency = &METRICS_REGISTRY
     211            0 :             .metrics_group
     212            0 :             .storage_controller_database_query_latency;
     213            0 :         let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op });
     214              : 
     215            0 :         let res = self.with_conn(func).await;
     216              : 
     217            0 :         if let Err(err) = &res {
     218            0 :             let error_counter = &METRICS_REGISTRY
     219            0 :                 .metrics_group
     220            0 :                 .storage_controller_database_query_error;
     221            0 :             error_counter.inc(DatabaseQueryErrorLabelGroup {
     222            0 :                 error_type: err.error_label(),
     223            0 :                 operation: op,
     224            0 :             })
     225            0 :         }
     226              : 
     227            0 :         res
     228            0 :     }
     229              : 
     230              :     /// Call the provided function in a tokio blocking thread, with a Diesel database connection.
     231            0 :     async fn with_conn<F, R>(&self, func: F) -> DatabaseResult<R>
     232            0 :     where
     233            0 :         F: Fn(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
     234            0 :         R: Send + 'static,
     235            0 :     {
     236              :         // A generous allowance for how many times we may retry serializable transactions
     237              :         // before giving up.  This is not expected to be hit: it is a defensive measure in case we
     238              :         // somehow engineer a situation where duelling transactions might otherwise live-lock.
     239              :         const MAX_RETRIES: usize = 128;
     240              : 
     241            0 :         let mut conn = self.connection_pool.get()?;
     242            0 :         tokio::task::spawn_blocking(move || -> DatabaseResult<R> {
     243            0 :             let mut retry_count = 0;
     244              :             loop {
     245            0 :                 match conn.build_transaction().serializable().run(|c| func(c)) {
     246            0 :                     Ok(r) => break Ok(r),
     247              :                     Err(
     248            0 :                         err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
     249            0 :                             diesel::result::DatabaseErrorKind::SerializationFailure,
     250            0 :                             _,
     251            0 :                         )),
     252            0 :                     ) => {
     253            0 :                         retry_count += 1;
     254            0 :                         if retry_count > MAX_RETRIES {
     255            0 :                             tracing::error!(
     256            0 :                                 "Exceeded max retries on SerializationFailure errors: {err:?}"
     257              :                             );
     258            0 :                             break Err(err);
     259              :                         } else {
     260              :                             // Retry on serialization errors: these are expected, because even though our
     261              :                             // transactions don't fight for the same rows, they will occasionally collide
     262              :                             // on index pages (e.g. increment_generation for unrelated shards can collide)
     263            0 :                             tracing::debug!(
     264            0 :                                 "Retrying transaction on serialization failure {err:?}"
     265              :                             );
     266            0 :                             continue;
     267              :                         }
     268              :                     }
     269            0 :                     Err(e) => break Err(e),
     270              :                 }
     271              :             }
     272            0 :         })
     273            0 :         .await
     274            0 :         .expect("Task panic")
     275            0 :     }
     276              : 
     277              :     /// When a node is first registered, persist it before using it for anything
     278            0 :     pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
     279            0 :         let np = node.to_persistent();
     280            0 :         self.with_measured_conn(
     281            0 :             DatabaseOperation::InsertNode,
     282            0 :             move |conn| -> DatabaseResult<()> {
     283            0 :                 diesel::insert_into(crate::schema::nodes::table)
     284            0 :                     .values(&np)
     285            0 :                     .execute(conn)?;
     286            0 :                 Ok(())
     287            0 :             },
     288            0 :         )
     289            0 :         .await
     290            0 :     }
     291              : 
     292              :     /// At startup, populate the list of nodes which our shards may be placed on
     293            0 :     pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> {
     294            0 :         let nodes: Vec<NodePersistence> = self
     295            0 :             .with_measured_conn(
     296            0 :                 DatabaseOperation::ListNodes,
     297            0 :                 move |conn| -> DatabaseResult<_> {
     298            0 :                     Ok(crate::schema::nodes::table.load::<NodePersistence>(conn)?)
     299            0 :                 },
     300            0 :             )
     301            0 :             .await?;
     302              : 
     303            0 :         tracing::info!("list_nodes: loaded {} nodes", nodes.len());
     304              : 
     305            0 :         Ok(nodes)
     306            0 :     }
     307              : 
     308            0 :     pub(crate) async fn update_node(
     309            0 :         &self,
     310            0 :         input_node_id: NodeId,
     311            0 :         input_scheduling: NodeSchedulingPolicy,
     312            0 :     ) -> DatabaseResult<()> {
     313              :         use crate::schema::nodes::dsl::*;
     314            0 :         let updated = self
     315            0 :             .with_measured_conn(DatabaseOperation::UpdateNode, move |conn| {
     316            0 :                 let updated = diesel::update(nodes)
     317            0 :                     .filter(node_id.eq(input_node_id.0 as i64))
     318            0 :                     .set((scheduling_policy.eq(String::from(input_scheduling)),))
     319            0 :                     .execute(conn)?;
     320            0 :                 Ok(updated)
     321            0 :             })
     322            0 :             .await?;
     323              : 
     324            0 :         if updated != 1 {
     325            0 :             Err(DatabaseError::Logical(format!(
     326            0 :                 "Node {node_id:?} not found for update",
     327            0 :             )))
     328              :         } else {
     329            0 :             Ok(())
     330              :         }
     331            0 :     }
     332              : 
     333              :     /// At startup, load the high level state for shards, such as their config + policy.  This will
     334              :     /// be enriched at runtime with state discovered on pageservers.
     335              :     ///
     336              :     /// We exclude shards configured to be detached.  During startup, if we see any attached locations
     337              :     /// for such shards, they will automatically be detached as 'orphans'.
     338            0 :     pub(crate) async fn load_active_tenant_shards(
     339            0 :         &self,
     340            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     341              :         use crate::schema::tenant_shards::dsl::*;
     342            0 :         self.with_measured_conn(
     343            0 :             DatabaseOperation::ListTenantShards,
     344            0 :             move |conn| -> DatabaseResult<_> {
     345            0 :                 let query = tenant_shards.filter(
     346            0 :                     placement_policy.ne(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
     347            0 :                 );
     348            0 :                 let result = query.load::<TenantShardPersistence>(conn)?;
     349              : 
     350            0 :                 Ok(result)
     351            0 :             },
     352            0 :         )
     353            0 :         .await
     354            0 :     }
     355              : 
     356              :     /// When restoring a previously detached tenant into memory, load it from the database
     357            0 :     pub(crate) async fn load_tenant(
     358            0 :         &self,
     359            0 :         filter_tenant_id: TenantId,
     360            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     361              :         use crate::schema::tenant_shards::dsl::*;
     362            0 :         self.with_measured_conn(
     363            0 :             DatabaseOperation::LoadTenant,
     364            0 :             move |conn| -> DatabaseResult<_> {
     365            0 :                 let query = tenant_shards.filter(tenant_id.eq(filter_tenant_id.to_string()));
     366            0 :                 let result = query.load::<TenantShardPersistence>(conn)?;
     367              : 
     368            0 :                 Ok(result)
     369            0 :             },
     370            0 :         )
     371            0 :         .await
     372            0 :     }
     373              : 
     374              :     /// Tenants must be persisted before we schedule them for the first time.  This enables us
     375              :     /// to correctly retain generation monotonicity, and the externally provided placement policy & config.
     376            0 :     pub(crate) async fn insert_tenant_shards(
     377            0 :         &self,
     378            0 :         shards: Vec<TenantShardPersistence>,
     379            0 :     ) -> DatabaseResult<()> {
     380              :         use crate::schema::metadata_health;
     381              :         use crate::schema::tenant_shards;
     382              : 
     383            0 :         let now = chrono::Utc::now();
     384            0 : 
     385            0 :         let metadata_health_records = shards
     386            0 :             .iter()
     387            0 :             .map(|t| MetadataHealthPersistence {
     388            0 :                 tenant_id: t.tenant_id.clone(),
     389            0 :                 shard_number: t.shard_number,
     390            0 :                 shard_count: t.shard_count,
     391            0 :                 healthy: true,
     392            0 :                 last_scrubbed_at: now,
     393            0 :             })
     394            0 :             .collect::<Vec<_>>();
     395            0 : 
     396            0 :         self.with_measured_conn(
     397            0 :             DatabaseOperation::InsertTenantShards,
     398            0 :             move |conn| -> DatabaseResult<()> {
     399            0 :                 diesel::insert_into(tenant_shards::table)
     400            0 :                     .values(&shards)
     401            0 :                     .execute(conn)?;
     402              : 
     403            0 :                 diesel::insert_into(metadata_health::table)
     404            0 :                     .values(&metadata_health_records)
     405            0 :                     .execute(conn)?;
     406            0 :                 Ok(())
     407            0 :             },
     408            0 :         )
     409            0 :         .await
     410            0 :     }
     411              : 
     412              :     /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
     413              :     /// the tenant from memory on this server.
     414            0 :     pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
     415              :         use crate::schema::tenant_shards::dsl::*;
     416            0 :         self.with_measured_conn(
     417            0 :             DatabaseOperation::DeleteTenant,
     418            0 :             move |conn| -> DatabaseResult<()> {
     419            0 :                 // `metadata_health` status (if exists) is also deleted based on the cascade behavior.
     420            0 :                 diesel::delete(tenant_shards)
     421            0 :                     .filter(tenant_id.eq(del_tenant_id.to_string()))
     422            0 :                     .execute(conn)?;
     423            0 :                 Ok(())
     424            0 :             },
     425            0 :         )
     426            0 :         .await
     427            0 :     }
     428              : 
     429            0 :     pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
     430              :         use crate::schema::nodes::dsl::*;
     431            0 :         self.with_measured_conn(
     432            0 :             DatabaseOperation::DeleteNode,
     433            0 :             move |conn| -> DatabaseResult<()> {
     434            0 :                 diesel::delete(nodes)
     435            0 :                     .filter(node_id.eq(del_node_id.0 as i64))
     436            0 :                     .execute(conn)?;
     437              : 
     438            0 :                 Ok(())
     439            0 :             },
     440            0 :         )
     441            0 :         .await
     442            0 :     }
     443              : 
     444              :     /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient
     445              :     /// batched increment of the generations of all tenants whose generation_pageserver is equal to
     446              :     /// the node that called /re-attach.
     447              :     #[tracing::instrument(skip_all, fields(node_id))]
     448              :     pub(crate) async fn re_attach(
     449              :         &self,
     450              :         input_node_id: NodeId,
     451              :     ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
     452              :         use crate::schema::nodes::dsl::scheduling_policy;
     453              :         use crate::schema::nodes::dsl::*;
     454              :         use crate::schema::tenant_shards::dsl::*;
     455              :         let updated = self
     456            0 :             .with_measured_conn(DatabaseOperation::ReAttach, move |conn| {
     457            0 :                 let rows_updated = diesel::update(tenant_shards)
     458            0 :                     .filter(generation_pageserver.eq(input_node_id.0 as i64))
     459            0 :                     .set(generation.eq(generation + 1))
     460            0 :                     .execute(conn)?;
     461              : 
     462            0 :                 tracing::info!("Incremented {} tenants' generations", rows_updated);
     463              : 
     464              :                 // TODO: UPDATE+SELECT in one query
     465              : 
     466            0 :                 let updated = tenant_shards
     467            0 :                     .filter(generation_pageserver.eq(input_node_id.0 as i64))
     468            0 :                     .select(TenantShardPersistence::as_select())
     469            0 :                     .load(conn)?;
     470              : 
     471              :                 // If the node went through a drain and restart phase before re-attaching,
     472              :                 // then reset it's node scheduling policy to active.
     473            0 :                 diesel::update(nodes)
     474            0 :                     .filter(node_id.eq(input_node_id.0 as i64))
     475            0 :                     .filter(
     476            0 :                         scheduling_policy
     477            0 :                             .eq(String::from(NodeSchedulingPolicy::PauseForRestart))
     478            0 :                             .or(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Draining)))
     479            0 :                             .or(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Filling))),
     480            0 :                     )
     481            0 :                     .set(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Active)))
     482            0 :                     .execute(conn)?;
     483              : 
     484            0 :                 Ok(updated)
     485            0 :             })
     486              :             .await?;
     487              : 
     488              :         let mut result = HashMap::new();
     489              :         for tsp in updated {
     490              :             let tenant_shard_id = TenantShardId {
     491              :                 tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
     492            0 :                     .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
     493              :                 shard_number: ShardNumber(tsp.shard_number as u8),
     494              :                 shard_count: ShardCount::new(tsp.shard_count as u8),
     495              :             };
     496              : 
     497              :             let Some(g) = tsp.generation else {
     498              :                 // If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
     499              :                 // we only set generation_pageserver when setting generation.
     500              :                 return Err(DatabaseError::Logical(
     501              :                     "Generation should always be set after incrementing".to_string(),
     502              :                 ));
     503              :             };
     504              :             result.insert(tenant_shard_id, Generation::new(g as u32));
     505              :         }
     506              : 
     507              :         Ok(result)
     508              :     }
     509              : 
     510              :     /// Reconciler calls this immediately before attaching to a new pageserver, to acquire a unique, monotonically
     511              :     /// advancing generation number.  We also store the NodeId for which the generation was issued, so that in
     512              :     /// [`Self::re_attach`] we can do a bulk UPDATE on the generations for that node.
     513            0 :     pub(crate) async fn increment_generation(
     514            0 :         &self,
     515            0 :         tenant_shard_id: TenantShardId,
     516            0 :         node_id: NodeId,
     517            0 :     ) -> anyhow::Result<Generation> {
     518              :         use crate::schema::tenant_shards::dsl::*;
     519            0 :         let updated = self
     520            0 :             .with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| {
     521            0 :                 let updated = diesel::update(tenant_shards)
     522            0 :                     .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     523            0 :                     .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     524            0 :                     .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     525            0 :                     .set((
     526            0 :                         generation.eq(generation + 1),
     527            0 :                         generation_pageserver.eq(node_id.0 as i64),
     528            0 :                     ))
     529            0 :                     // TODO: only returning() the generation column
     530            0 :                     .returning(TenantShardPersistence::as_returning())
     531            0 :                     .get_result(conn)?;
     532              : 
     533            0 :                 Ok(updated)
     534            0 :             })
     535            0 :             .await?;
     536              : 
     537              :         // Generation is always non-null in the rseult: if the generation column had been NULL, then we
     538              :         // should have experienced an SQL Confilict error while executing a query that tries to increment it.
     539            0 :         debug_assert!(updated.generation.is_some());
     540            0 :         let Some(g) = updated.generation else {
     541            0 :             return Err(DatabaseError::Logical(
     542            0 :                 "Generation should always be set after incrementing".to_string(),
     543            0 :             )
     544            0 :             .into());
     545              :         };
     546              : 
     547            0 :         Ok(Generation::new(g as u32))
     548            0 :     }
     549              : 
     550              :     /// When we want to call out to the running shards for a tenant, e.g. during timeline CRUD operations,
     551              :     /// we need to know where the shard is attached, _and_ the generation, so that we can re-check the generation
     552              :     /// afterwards to confirm that our timeline CRUD operation is truly persistent (it must have happened in the
     553              :     /// latest generation)
     554              :     ///
     555              :     /// If the tenant doesn't exist, an empty vector is returned.
     556              :     ///
     557              :     /// Output is sorted by shard number
     558            0 :     pub(crate) async fn tenant_generations(
     559            0 :         &self,
     560            0 :         filter_tenant_id: TenantId,
     561            0 :     ) -> Result<Vec<ShardGenerationState>, DatabaseError> {
     562              :         use crate::schema::tenant_shards::dsl::*;
     563            0 :         let rows = self
     564            0 :             .with_measured_conn(DatabaseOperation::TenantGenerations, move |conn| {
     565            0 :                 let result = tenant_shards
     566            0 :                     .filter(tenant_id.eq(filter_tenant_id.to_string()))
     567            0 :                     .select(TenantShardPersistence::as_select())
     568            0 :                     .order(shard_number)
     569            0 :                     .load(conn)?;
     570            0 :                 Ok(result)
     571            0 :             })
     572            0 :             .await?;
     573              : 
     574            0 :         Ok(rows
     575            0 :             .into_iter()
     576            0 :             .map(|p| ShardGenerationState {
     577            0 :                 tenant_shard_id: p
     578            0 :                     .get_tenant_shard_id()
     579            0 :                     .expect("Corrupt tenant shard id in database"),
     580            0 :                 generation: p.generation.map(|g| Generation::new(g as u32)),
     581            0 :                 generation_pageserver: p.generation_pageserver.map(|n| NodeId(n as u64)),
     582            0 :             })
     583            0 :             .collect())
     584            0 :     }
     585              : 
     586              :     /// Read the generation number of specific tenant shards
     587              :     ///
     588              :     /// Output is unsorted.  Output may not include values for all inputs, if they are missing in the database.
     589            0 :     pub(crate) async fn shard_generations(
     590            0 :         &self,
     591            0 :         mut tenant_shard_ids: impl Iterator<Item = &TenantShardId>,
     592            0 :     ) -> Result<Vec<(TenantShardId, Option<Generation>)>, DatabaseError> {
     593            0 :         let mut rows = Vec::with_capacity(tenant_shard_ids.size_hint().0);
     594              : 
     595              :         // We will chunk our input to avoid composing arbitrarily long `IN` clauses.  Typically we are
     596              :         // called with a single digit number of IDs, but in principle we could be called with tens
     597              :         // of thousands (all the shards on one pageserver) from the generation validation API.
     598            0 :         loop {
     599            0 :             // A modest hardcoded chunk size to handle typical cases in a single query but never generate particularly
     600            0 :             // large query strings.
     601            0 :             let chunk_ids = tenant_shard_ids.by_ref().take(32);
     602            0 : 
     603            0 :             // Compose a comma separated list of tuples for matching on (tenant_id, shard_number, shard_count)
     604            0 :             let in_clause = chunk_ids
     605            0 :                 .map(|tsid| {
     606            0 :                     format!(
     607            0 :                         "('{}', {}, {})",
     608            0 :                         tsid.tenant_id, tsid.shard_number.0, tsid.shard_count.0
     609            0 :                     )
     610            0 :                 })
     611            0 :                 .join(",");
     612            0 : 
     613            0 :             // We are done when our iterator gives us nothing to filter on
     614            0 :             if in_clause.is_empty() {
     615            0 :                 break;
     616            0 :             }
     617              : 
     618            0 :             let chunk_rows = self
     619            0 :                 .with_measured_conn(DatabaseOperation::ShardGenerations, move |conn| {
     620              :                     // diesel doesn't support multi-column IN queries, so we compose raw SQL.  No escaping is required because
     621              :                     // the inputs are strongly typed and cannot carry any user-supplied raw string content.
     622            0 :                     let result : Vec<TenantShardPersistence> = diesel::sql_query(
     623            0 :                         format!("SELECT * from tenant_shards where (tenant_id, shard_number, shard_count) in ({in_clause});").as_str()
     624            0 :                     ).load(conn)?;
     625              : 
     626            0 :                     Ok(result)
     627            0 :                 })
     628            0 :                 .await?;
     629            0 :             rows.extend(chunk_rows.into_iter())
     630              :         }
     631              : 
     632            0 :         Ok(rows
     633            0 :             .into_iter()
     634            0 :             .map(|tsp| {
     635            0 :                 (
     636            0 :                     tsp.get_tenant_shard_id()
     637            0 :                         .expect("Bad tenant ID in database"),
     638            0 :                     tsp.generation.map(|g| Generation::new(g as u32)),
     639            0 :                 )
     640            0 :             })
     641            0 :             .collect())
     642            0 :     }
     643              : 
     644              :     #[allow(non_local_definitions)]
     645              :     /// For use when updating a persistent property of a tenant, such as its config or placement_policy.
     646              :     ///
     647              :     /// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
     648              :     /// API: use [`Self::increment_generation`] instead.  Setting the generation via this route is a one-time thing
     649              :     /// that we only do the first time a tenant is set to an attached policy via /location_config.
     650            0 :     pub(crate) async fn update_tenant_shard(
     651            0 :         &self,
     652            0 :         tenant: TenantFilter,
     653            0 :         input_placement_policy: Option<PlacementPolicy>,
     654            0 :         input_config: Option<TenantConfig>,
     655            0 :         input_generation: Option<Generation>,
     656            0 :         input_scheduling_policy: Option<ShardSchedulingPolicy>,
     657            0 :     ) -> DatabaseResult<()> {
     658              :         use crate::schema::tenant_shards::dsl::*;
     659              : 
     660            0 :         self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
     661            0 :             let query = match tenant {
     662            0 :                 TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards)
     663            0 :                     .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     664            0 :                     .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     665            0 :                     .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     666            0 :                     .into_boxed(),
     667            0 :                 TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
     668            0 :                     .filter(tenant_id.eq(input_tenant_id.to_string()))
     669            0 :                     .into_boxed(),
     670              :             };
     671              : 
     672              :             // Clear generation_pageserver if we are moving into a state where we won't have
     673              :             // any attached pageservers.
     674            0 :             let input_generation_pageserver = match input_placement_policy {
     675            0 :                 None | Some(PlacementPolicy::Attached(_)) => None,
     676            0 :                 Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) => Some(None),
     677              :             };
     678              : 
     679            0 :             #[derive(AsChangeset)]
     680              :             #[diesel(table_name = crate::schema::tenant_shards)]
     681              :             struct ShardUpdate {
     682              :                 generation: Option<i32>,
     683              :                 placement_policy: Option<String>,
     684              :                 config: Option<String>,
     685              :                 scheduling_policy: Option<String>,
     686              :                 generation_pageserver: Option<Option<i64>>,
     687              :             }
     688              : 
     689            0 :             let update = ShardUpdate {
     690            0 :                 generation: input_generation.map(|g| g.into().unwrap() as i32),
     691            0 :                 placement_policy: input_placement_policy
     692            0 :                     .as_ref()
     693            0 :                     .map(|p| serde_json::to_string(&p).unwrap()),
     694            0 :                 config: input_config
     695            0 :                     .as_ref()
     696            0 :                     .map(|c| serde_json::to_string(&c).unwrap()),
     697            0 :                 scheduling_policy: input_scheduling_policy
     698            0 :                     .map(|p| serde_json::to_string(&p).unwrap()),
     699            0 :                 generation_pageserver: input_generation_pageserver,
     700            0 :             };
     701            0 : 
     702            0 :             query.set(update).execute(conn)?;
     703              : 
     704            0 :             Ok(())
     705            0 :         })
     706            0 :         .await?;
     707              : 
     708            0 :         Ok(())
     709            0 :     }
     710              : 
     711            0 :     pub(crate) async fn set_tenant_shard_preferred_azs(
     712            0 :         &self,
     713            0 :         preferred_azs: Vec<(TenantShardId, AvailabilityZone)>,
     714            0 :     ) -> DatabaseResult<Vec<(TenantShardId, AvailabilityZone)>> {
     715              :         use crate::schema::tenant_shards::dsl::*;
     716              : 
     717            0 :         self.with_measured_conn(DatabaseOperation::SetPreferredAzs, move |conn| {
     718            0 :             let mut shards_updated = Vec::default();
     719              : 
     720            0 :             for (tenant_shard_id, preferred_az) in preferred_azs.iter() {
     721            0 :                 let updated = diesel::update(tenant_shards)
     722            0 :                     .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     723            0 :                     .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     724            0 :                     .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     725            0 :                     .set(preferred_az_id.eq(preferred_az.0.clone()))
     726            0 :                     .execute(conn)?;
     727              : 
     728            0 :                 if updated == 1 {
     729            0 :                     shards_updated.push((*tenant_shard_id, preferred_az.clone()));
     730            0 :                 }
     731              :             }
     732              : 
     733            0 :             Ok(shards_updated)
     734            0 :         })
     735            0 :         .await
     736            0 :     }
     737              : 
     738            0 :     pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
     739              :         use crate::schema::tenant_shards::dsl::*;
     740            0 :         self.with_measured_conn(DatabaseOperation::Detach, move |conn| {
     741            0 :             let updated = diesel::update(tenant_shards)
     742            0 :                 .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     743            0 :                 .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     744            0 :                 .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     745            0 :                 .set((
     746            0 :                     generation_pageserver.eq(Option::<i64>::None),
     747            0 :                     placement_policy.eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
     748            0 :                 ))
     749            0 :                 .execute(conn)?;
     750              : 
     751            0 :             Ok(updated)
     752            0 :         })
     753            0 :         .await?;
     754              : 
     755            0 :         Ok(())
     756            0 :     }
     757              : 
     758              :     // When we start shard splitting, we must durably mark the tenant so that
     759              :     // on restart, we know that we must go through recovery.
     760              :     //
     761              :     // We create the child shards here, so that they will be available for increment_generation calls
     762              :     // if some pageserver holding a child shard needs to restart before the overall tenant split is complete.
     763            0 :     pub(crate) async fn begin_shard_split(
     764            0 :         &self,
     765            0 :         old_shard_count: ShardCount,
     766            0 :         split_tenant_id: TenantId,
     767            0 :         parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
     768            0 :     ) -> DatabaseResult<()> {
     769              :         use crate::schema::tenant_shards::dsl::*;
     770            0 :         self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| -> DatabaseResult<()> {
     771              :             // Mark parent shards as splitting
     772              : 
     773            0 :             let updated = diesel::update(tenant_shards)
     774            0 :                 .filter(tenant_id.eq(split_tenant_id.to_string()))
     775            0 :                 .filter(shard_count.eq(old_shard_count.literal() as i32))
     776            0 :                 .set((splitting.eq(1),))
     777            0 :                 .execute(conn)?;
     778            0 :             if u8::try_from(updated)
     779            0 :                 .map_err(|_| DatabaseError::Logical(
     780            0 :                     format!("Overflow existing shard count {} while splitting", updated))
     781            0 :                 )? != old_shard_count.count() {
     782              :                 // Perhaps a deletion or another split raced with this attempt to split, mutating
     783              :                 // the parent shards that we intend to split. In this case the split request should fail.
     784            0 :                 return Err(DatabaseError::Logical(
     785            0 :                     format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
     786            0 :                 ));
     787            0 :             }
     788            0 : 
     789            0 :             // FIXME: spurious clone to sidestep closure move rules
     790            0 :             let parent_to_children = parent_to_children.clone();
     791              : 
     792              :             // Insert child shards
     793            0 :             for (parent_shard_id, children) in parent_to_children {
     794            0 :                 let mut parent = crate::schema::tenant_shards::table
     795            0 :                     .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
     796            0 :                     .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
     797            0 :                     .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
     798            0 :                     .load::<TenantShardPersistence>(conn)?;
     799            0 :                 let parent = if parent.len() != 1 {
     800            0 :                     return Err(DatabaseError::Logical(format!(
     801            0 :                         "Parent shard {parent_shard_id} not found"
     802            0 :                     )));
     803              :                 } else {
     804            0 :                     parent.pop().unwrap()
     805              :                 };
     806            0 :                 for mut shard in children {
     807              :                     // Carry the parent's generation into the child
     808            0 :                     shard.generation = parent.generation;
     809            0 : 
     810            0 :                     debug_assert!(shard.splitting == SplitState::Splitting);
     811            0 :                     diesel::insert_into(tenant_shards)
     812            0 :                         .values(shard)
     813            0 :                         .execute(conn)?;
     814              :                 }
     815              :             }
     816              : 
     817            0 :             Ok(())
     818            0 :         })
     819            0 :         .await
     820            0 :     }
     821              : 
     822              :     // When we finish shard splitting, we must atomically clean up the old shards
     823              :     // and insert the new shards, and clear the splitting marker.
     824            0 :     pub(crate) async fn complete_shard_split(
     825            0 :         &self,
     826            0 :         split_tenant_id: TenantId,
     827            0 :         old_shard_count: ShardCount,
     828            0 :     ) -> DatabaseResult<()> {
     829              :         use crate::schema::tenant_shards::dsl::*;
     830            0 :         self.with_measured_conn(
     831            0 :             DatabaseOperation::CompleteShardSplit,
     832            0 :             move |conn| -> DatabaseResult<()> {
     833            0 :                 // Drop parent shards
     834            0 :                 diesel::delete(tenant_shards)
     835            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     836            0 :                     .filter(shard_count.eq(old_shard_count.literal() as i32))
     837            0 :                     .execute(conn)?;
     838              : 
     839              :                 // Clear sharding flag
     840            0 :                 let updated = diesel::update(tenant_shards)
     841            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     842            0 :                     .set((splitting.eq(0),))
     843            0 :                     .execute(conn)?;
     844            0 :                 debug_assert!(updated > 0);
     845              : 
     846            0 :                 Ok(())
     847            0 :             },
     848            0 :         )
     849            0 :         .await
     850            0 :     }
     851              : 
     852              :     /// Used when the remote part of a shard split failed: we will revert the database state to have only
     853              :     /// the parent shards, with SplitState::Idle.
     854            0 :     pub(crate) async fn abort_shard_split(
     855            0 :         &self,
     856            0 :         split_tenant_id: TenantId,
     857            0 :         new_shard_count: ShardCount,
     858            0 :     ) -> DatabaseResult<AbortShardSplitStatus> {
     859              :         use crate::schema::tenant_shards::dsl::*;
     860            0 :         self.with_measured_conn(
     861            0 :             DatabaseOperation::AbortShardSplit,
     862            0 :             move |conn| -> DatabaseResult<AbortShardSplitStatus> {
     863              :                 // Clear the splitting state on parent shards
     864            0 :                 let updated = diesel::update(tenant_shards)
     865            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     866            0 :                     .filter(shard_count.ne(new_shard_count.literal() as i32))
     867            0 :                     .set((splitting.eq(0),))
     868            0 :                     .execute(conn)?;
     869              : 
     870              :                 // Parent shards are already gone: we cannot abort.
     871            0 :                 if updated == 0 {
     872            0 :                     return Ok(AbortShardSplitStatus::Complete);
     873            0 :                 }
     874            0 : 
     875            0 :                 // Sanity check: if parent shards were present, their cardinality should
     876            0 :                 // be less than the number of child shards.
     877            0 :                 if updated >= new_shard_count.count() as usize {
     878            0 :                     return Err(DatabaseError::Logical(format!(
     879            0 :                         "Unexpected parent shard count {updated} while aborting split to \
     880            0 :                             count {new_shard_count:?} on tenant {split_tenant_id}"
     881            0 :                     )));
     882            0 :                 }
     883            0 : 
     884            0 :                 // Erase child shards
     885            0 :                 diesel::delete(tenant_shards)
     886            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     887            0 :                     .filter(shard_count.eq(new_shard_count.literal() as i32))
     888            0 :                     .execute(conn)?;
     889              : 
     890            0 :                 Ok(AbortShardSplitStatus::Aborted)
     891            0 :             },
     892            0 :         )
     893            0 :         .await
     894            0 :     }
     895              : 
     896              :     /// Stores all the latest metadata health updates durably. Updates existing entry on conflict.
     897              :     ///
     898              :     /// **Correctness:** `metadata_health_updates` should all belong the tenant shards managed by the storage controller.
     899              :     #[allow(dead_code)]
     900            0 :     pub(crate) async fn update_metadata_health_records(
     901            0 :         &self,
     902            0 :         healthy_records: Vec<MetadataHealthPersistence>,
     903            0 :         unhealthy_records: Vec<MetadataHealthPersistence>,
     904            0 :         now: chrono::DateTime<chrono::Utc>,
     905            0 :     ) -> DatabaseResult<()> {
     906              :         use crate::schema::metadata_health::dsl::*;
     907              : 
     908            0 :         self.with_measured_conn(
     909            0 :             DatabaseOperation::UpdateMetadataHealth,
     910            0 :             move |conn| -> DatabaseResult<_> {
     911            0 :                 diesel::insert_into(metadata_health)
     912            0 :                     .values(&healthy_records)
     913            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
     914            0 :                     .do_update()
     915            0 :                     .set((healthy.eq(true), last_scrubbed_at.eq(now)))
     916            0 :                     .execute(conn)?;
     917              : 
     918            0 :                 diesel::insert_into(metadata_health)
     919            0 :                     .values(&unhealthy_records)
     920            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
     921            0 :                     .do_update()
     922            0 :                     .set((healthy.eq(false), last_scrubbed_at.eq(now)))
     923            0 :                     .execute(conn)?;
     924            0 :                 Ok(())
     925            0 :             },
     926            0 :         )
     927            0 :         .await
     928            0 :     }
     929              : 
     930              :     /// Lists all the metadata health records.
     931              :     #[allow(dead_code)]
     932            0 :     pub(crate) async fn list_metadata_health_records(
     933            0 :         &self,
     934            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
     935            0 :         self.with_measured_conn(
     936            0 :             DatabaseOperation::ListMetadataHealth,
     937            0 :             move |conn| -> DatabaseResult<_> {
     938            0 :                 Ok(
     939            0 :                     crate::schema::metadata_health::table
     940            0 :                         .load::<MetadataHealthPersistence>(conn)?,
     941              :                 )
     942            0 :             },
     943            0 :         )
     944            0 :         .await
     945            0 :     }
     946              : 
     947              :     /// Lists all the metadata health records that is unhealthy.
     948              :     #[allow(dead_code)]
     949            0 :     pub(crate) async fn list_unhealthy_metadata_health_records(
     950            0 :         &self,
     951            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
     952              :         use crate::schema::metadata_health::dsl::*;
     953            0 :         self.with_measured_conn(
     954            0 :             DatabaseOperation::ListMetadataHealthUnhealthy,
     955            0 :             move |conn| -> DatabaseResult<_> {
     956            0 :                 Ok(crate::schema::metadata_health::table
     957            0 :                     .filter(healthy.eq(false))
     958            0 :                     .load::<MetadataHealthPersistence>(conn)?)
     959            0 :             },
     960            0 :         )
     961            0 :         .await
     962            0 :     }
     963              : 
     964              :     /// Lists all the metadata health records that have not been updated since an `earlier` time.
     965              :     #[allow(dead_code)]
     966            0 :     pub(crate) async fn list_outdated_metadata_health_records(
     967            0 :         &self,
     968            0 :         earlier: chrono::DateTime<chrono::Utc>,
     969            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
     970              :         use crate::schema::metadata_health::dsl::*;
     971              : 
     972            0 :         self.with_measured_conn(
     973            0 :             DatabaseOperation::ListMetadataHealthOutdated,
     974            0 :             move |conn| -> DatabaseResult<_> {
     975            0 :                 let query = metadata_health.filter(last_scrubbed_at.lt(earlier));
     976            0 :                 let res = query.load::<MetadataHealthPersistence>(conn)?;
     977              : 
     978            0 :                 Ok(res)
     979            0 :             },
     980            0 :         )
     981            0 :         .await
     982            0 :     }
     983              : 
     984              :     /// Get the current entry from the `leader` table if one exists.
     985              :     /// It is an error for the table to contain more than one entry.
     986            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
     987            0 :         let mut leader: Vec<ControllerPersistence> = self
     988            0 :             .with_measured_conn(
     989            0 :                 DatabaseOperation::GetLeader,
     990            0 :                 move |conn| -> DatabaseResult<_> {
     991            0 :                     Ok(crate::schema::controllers::table.load::<ControllerPersistence>(conn)?)
     992            0 :                 },
     993            0 :             )
     994            0 :             .await?;
     995              : 
     996            0 :         if leader.len() > 1 {
     997            0 :             return Err(DatabaseError::Logical(format!(
     998            0 :                 "More than one entry present in the leader table: {leader:?}"
     999            0 :             )));
    1000            0 :         }
    1001            0 : 
    1002            0 :         Ok(leader.pop())
    1003            0 :     }
    1004              : 
    1005              :     /// Update the new leader with compare-exchange semantics. If `prev` does not
    1006              :     /// match the current leader entry, then the update is treated as a failure.
    1007              :     /// When `prev` is not specified, the update is forced.
    1008            0 :     pub(crate) async fn update_leader(
    1009            0 :         &self,
    1010            0 :         prev: Option<ControllerPersistence>,
    1011            0 :         new: ControllerPersistence,
    1012            0 :     ) -> DatabaseResult<()> {
    1013              :         use crate::schema::controllers::dsl::*;
    1014              : 
    1015            0 :         let updated = self
    1016            0 :             .with_measured_conn(
    1017            0 :                 DatabaseOperation::UpdateLeader,
    1018            0 :                 move |conn| -> DatabaseResult<usize> {
    1019            0 :                     let updated = match &prev {
    1020            0 :                         Some(prev) => diesel::update(controllers)
    1021            0 :                             .filter(address.eq(prev.address.clone()))
    1022            0 :                             .filter(started_at.eq(prev.started_at))
    1023            0 :                             .set((
    1024            0 :                                 address.eq(new.address.clone()),
    1025            0 :                                 started_at.eq(new.started_at),
    1026            0 :                             ))
    1027            0 :                             .execute(conn)?,
    1028            0 :                         None => diesel::insert_into(controllers)
    1029            0 :                             .values(new.clone())
    1030            0 :                             .execute(conn)?,
    1031              :                     };
    1032              : 
    1033            0 :                     Ok(updated)
    1034            0 :                 },
    1035            0 :             )
    1036            0 :             .await?;
    1037              : 
    1038            0 :         if updated == 0 {
    1039            0 :             return Err(DatabaseError::Logical(
    1040            0 :                 "Leader table update failed".to_string(),
    1041            0 :             ));
    1042            0 :         }
    1043            0 : 
    1044            0 :         Ok(())
    1045            0 :     }
    1046              : 
    1047              :     /// At startup, populate the list of nodes which our shards may be placed on
    1048            0 :     pub(crate) async fn list_safekeepers(&self) -> DatabaseResult<Vec<SafekeeperPersistence>> {
    1049            0 :         let safekeepers: Vec<SafekeeperPersistence> = self
    1050            0 :             .with_measured_conn(
    1051            0 :                 DatabaseOperation::ListNodes,
    1052            0 :                 move |conn| -> DatabaseResult<_> {
    1053            0 :                     Ok(crate::schema::safekeepers::table.load::<SafekeeperPersistence>(conn)?)
    1054            0 :                 },
    1055            0 :             )
    1056            0 :             .await?;
    1057              : 
    1058            0 :         tracing::info!("list_safekeepers: loaded {} nodes", safekeepers.len());
    1059              : 
    1060            0 :         Ok(safekeepers)
    1061            0 :     }
    1062              : 
    1063            0 :     pub(crate) async fn safekeeper_get(
    1064            0 :         &self,
    1065            0 :         id: i64,
    1066            0 :     ) -> Result<SafekeeperPersistence, DatabaseError> {
    1067              :         use crate::schema::safekeepers::dsl::{id as id_column, safekeepers};
    1068            0 :         self.with_conn(move |conn| -> DatabaseResult<SafekeeperPersistence> {
    1069            0 :             Ok(safekeepers
    1070            0 :                 .filter(id_column.eq(&id))
    1071            0 :                 .select(SafekeeperPersistence::as_select())
    1072            0 :                 .get_result(conn)?)
    1073            0 :         })
    1074            0 :         .await
    1075            0 :     }
    1076              : 
    1077            0 :     pub(crate) async fn safekeeper_upsert(
    1078            0 :         &self,
    1079            0 :         record: SafekeeperUpsert,
    1080            0 :     ) -> Result<(), DatabaseError> {
    1081              :         use crate::schema::safekeepers::dsl::*;
    1082              : 
    1083            0 :         self.with_conn(move |conn| -> DatabaseResult<()> {
    1084            0 :             let bind = record
    1085            0 :                 .as_insert_or_update()
    1086            0 :                 .map_err(|e| DatabaseError::Logical(format!("{e}")))?;
    1087              : 
    1088            0 :             let inserted_updated = diesel::insert_into(safekeepers)
    1089            0 :                 .values(&bind)
    1090            0 :                 .on_conflict(id)
    1091            0 :                 .do_update()
    1092            0 :                 .set(&bind)
    1093            0 :                 .execute(conn)?;
    1094              : 
    1095            0 :             if inserted_updated != 1 {
    1096            0 :                 return Err(DatabaseError::Logical(format!(
    1097            0 :                     "unexpected number of rows ({})",
    1098            0 :                     inserted_updated
    1099            0 :                 )));
    1100            0 :             }
    1101            0 : 
    1102            0 :             Ok(())
    1103            0 :         })
    1104            0 :         .await
    1105            0 :     }
    1106              : }
    1107              : 
    1108              : /// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably
    1109              : #[derive(
    1110            0 :     QueryableByName, Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq,
    1111              : )]
    1112              : #[diesel(table_name = crate::schema::tenant_shards)]
    1113              : pub(crate) struct TenantShardPersistence {
    1114              :     #[serde(default)]
    1115              :     pub(crate) tenant_id: String,
    1116              :     #[serde(default)]
    1117              :     pub(crate) shard_number: i32,
    1118              :     #[serde(default)]
    1119              :     pub(crate) shard_count: i32,
    1120              :     #[serde(default)]
    1121              :     pub(crate) shard_stripe_size: i32,
    1122              : 
    1123              :     // Latest generation number: next time we attach, increment this
    1124              :     // and use the incremented number when attaching.
    1125              :     //
    1126              :     // Generation is only None when first onboarding a tenant, where it may
    1127              :     // be in PlacementPolicy::Secondary and therefore have no valid generation state.
    1128              :     pub(crate) generation: Option<i32>,
    1129              : 
    1130              :     // Currently attached pageserver
    1131              :     #[serde(rename = "pageserver")]
    1132              :     pub(crate) generation_pageserver: Option<i64>,
    1133              : 
    1134              :     #[serde(default)]
    1135              :     pub(crate) placement_policy: String,
    1136              :     #[serde(default)]
    1137              :     pub(crate) splitting: SplitState,
    1138              :     #[serde(default)]
    1139              :     pub(crate) config: String,
    1140              :     #[serde(default)]
    1141              :     pub(crate) scheduling_policy: String,
    1142              : 
    1143              :     // Hint that we should attempt to schedule this tenant shard the given
    1144              :     // availability zone in order to minimise the chances of cross-AZ communication
    1145              :     // with compute.
    1146              :     pub(crate) preferred_az_id: Option<String>,
    1147              : }
    1148              : 
    1149              : impl TenantShardPersistence {
    1150            0 :     pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
    1151            0 :         if self.shard_count == 0 {
    1152            0 :             Ok(ShardIdentity::unsharded())
    1153              :         } else {
    1154            0 :             Ok(ShardIdentity::new(
    1155            0 :                 ShardNumber(self.shard_number as u8),
    1156            0 :                 ShardCount::new(self.shard_count as u8),
    1157            0 :                 ShardStripeSize(self.shard_stripe_size as u32),
    1158            0 :             )?)
    1159              :         }
    1160            0 :     }
    1161              : 
    1162            0 :     pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
    1163            0 :         Ok(TenantShardId {
    1164            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    1165            0 :             shard_number: ShardNumber(self.shard_number as u8),
    1166            0 :             shard_count: ShardCount::new(self.shard_count as u8),
    1167              :         })
    1168            0 :     }
    1169              : }
    1170              : 
    1171              : /// Parts of [`crate::node::Node`] that are stored durably
    1172            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)]
    1173              : #[diesel(table_name = crate::schema::nodes)]
    1174              : pub(crate) struct NodePersistence {
    1175              :     pub(crate) node_id: i64,
    1176              :     pub(crate) scheduling_policy: String,
    1177              :     pub(crate) listen_http_addr: String,
    1178              :     pub(crate) listen_http_port: i32,
    1179              :     pub(crate) listen_pg_addr: String,
    1180              :     pub(crate) listen_pg_port: i32,
    1181              :     pub(crate) availability_zone_id: String,
    1182              : }
    1183              : 
    1184              : /// Tenant metadata health status that are stored durably.
    1185            0 : #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
    1186              : #[diesel(table_name = crate::schema::metadata_health)]
    1187              : pub(crate) struct MetadataHealthPersistence {
    1188              :     #[serde(default)]
    1189              :     pub(crate) tenant_id: String,
    1190              :     #[serde(default)]
    1191              :     pub(crate) shard_number: i32,
    1192              :     #[serde(default)]
    1193              :     pub(crate) shard_count: i32,
    1194              : 
    1195              :     pub(crate) healthy: bool,
    1196              :     pub(crate) last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    1197              : }
    1198              : 
    1199              : impl MetadataHealthPersistence {
    1200            0 :     pub fn new(
    1201            0 :         tenant_shard_id: TenantShardId,
    1202            0 :         healthy: bool,
    1203            0 :         last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    1204            0 :     ) -> Self {
    1205            0 :         let tenant_id = tenant_shard_id.tenant_id.to_string();
    1206            0 :         let shard_number = tenant_shard_id.shard_number.0 as i32;
    1207            0 :         let shard_count = tenant_shard_id.shard_count.literal() as i32;
    1208            0 : 
    1209            0 :         MetadataHealthPersistence {
    1210            0 :             tenant_id,
    1211            0 :             shard_number,
    1212            0 :             shard_count,
    1213            0 :             healthy,
    1214            0 :             last_scrubbed_at,
    1215            0 :         }
    1216            0 :     }
    1217              : 
    1218              :     #[allow(dead_code)]
    1219            0 :     pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
    1220            0 :         Ok(TenantShardId {
    1221            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    1222            0 :             shard_number: ShardNumber(self.shard_number as u8),
    1223            0 :             shard_count: ShardCount::new(self.shard_count as u8),
    1224              :         })
    1225            0 :     }
    1226              : }
    1227              : 
    1228              : impl From<MetadataHealthPersistence> for MetadataHealthRecord {
    1229            0 :     fn from(value: MetadataHealthPersistence) -> Self {
    1230            0 :         MetadataHealthRecord {
    1231            0 :             tenant_shard_id: value
    1232            0 :                 .get_tenant_shard_id()
    1233            0 :                 .expect("stored tenant id should be valid"),
    1234            0 :             healthy: value.healthy,
    1235            0 :             last_scrubbed_at: value.last_scrubbed_at,
    1236            0 :         }
    1237            0 :     }
    1238              : }
    1239              : 
    1240              : #[derive(
    1241            0 :     Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Debug, Clone,
    1242              : )]
    1243              : #[diesel(table_name = crate::schema::controllers)]
    1244              : pub(crate) struct ControllerPersistence {
    1245              :     pub(crate) address: String,
    1246              :     pub(crate) started_at: chrono::DateTime<chrono::Utc>,
    1247              : }
    1248              : 
    1249              : // What we store in the database
    1250            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Eq, PartialEq, Debug, Clone)]
    1251              : #[diesel(table_name = crate::schema::safekeepers)]
    1252              : pub(crate) struct SafekeeperPersistence {
    1253              :     pub(crate) id: i64,
    1254              :     pub(crate) region_id: String,
    1255              :     /// 1 is special, it means just created (not currently posted to storcon).
    1256              :     /// Zero or negative is not really expected.
    1257              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    1258              :     pub(crate) version: i64,
    1259              :     pub(crate) host: String,
    1260              :     pub(crate) port: i32,
    1261              :     pub(crate) active: bool,
    1262              :     pub(crate) http_port: i32,
    1263              :     pub(crate) availability_zone_id: String,
    1264              :     pub(crate) scheduling_policy: String,
    1265              : }
    1266              : 
    1267              : impl SafekeeperPersistence {
    1268            0 :     pub(crate) fn as_describe_response(&self) -> Result<SafekeeperDescribeResponse, DatabaseError> {
    1269            0 :         let scheduling_policy =
    1270            0 :             SkSchedulingPolicy::from_str(&self.scheduling_policy).map_err(|e| {
    1271            0 :                 DatabaseError::Logical(format!("can't construct SkSchedulingPolicy: {e:?}"))
    1272            0 :             })?;
    1273              :         // omit the `active` flag on purpose: it is deprecated.
    1274            0 :         Ok(SafekeeperDescribeResponse {
    1275            0 :             id: NodeId(self.id as u64),
    1276            0 :             region_id: self.region_id.clone(),
    1277            0 :             version: self.version,
    1278            0 :             host: self.host.clone(),
    1279            0 :             port: self.port,
    1280            0 :             http_port: self.http_port,
    1281            0 :             availability_zone_id: self.availability_zone_id.clone(),
    1282            0 :             scheduling_policy,
    1283            0 :         })
    1284            0 :     }
    1285              : }
    1286              : 
    1287              : /// What we expect from the upsert http api
    1288            0 : #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)]
    1289              : pub(crate) struct SafekeeperUpsert {
    1290              :     pub(crate) id: i64,
    1291              :     pub(crate) region_id: String,
    1292              :     /// 1 is special, it means just created (not currently posted to storcon).
    1293              :     /// Zero or negative is not really expected.
    1294              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    1295              :     pub(crate) version: i64,
    1296              :     pub(crate) host: String,
    1297              :     pub(crate) port: i32,
    1298              :     pub(crate) active: bool,
    1299              :     pub(crate) http_port: i32,
    1300              :     pub(crate) availability_zone_id: String,
    1301              : }
    1302              : 
    1303              : impl SafekeeperUpsert {
    1304            0 :     fn as_insert_or_update(&self) -> anyhow::Result<InsertUpdateSafekeeper<'_>> {
    1305            0 :         if self.version < 0 {
    1306            0 :             anyhow::bail!("negative version: {}", self.version);
    1307            0 :         }
    1308            0 :         Ok(InsertUpdateSafekeeper {
    1309            0 :             id: self.id,
    1310            0 :             region_id: &self.region_id,
    1311            0 :             version: self.version,
    1312            0 :             host: &self.host,
    1313            0 :             port: self.port,
    1314            0 :             active: self.active,
    1315            0 :             http_port: self.http_port,
    1316            0 :             availability_zone_id: &self.availability_zone_id,
    1317            0 :             // None means a wish to not update this column. We expose abilities to update it via other means.
    1318            0 :             scheduling_policy: None,
    1319            0 :         })
    1320            0 :     }
    1321              : }
    1322              : 
    1323            0 : #[derive(Insertable, AsChangeset)]
    1324              : #[diesel(table_name = crate::schema::safekeepers)]
    1325              : struct InsertUpdateSafekeeper<'a> {
    1326              :     id: i64,
    1327              :     region_id: &'a str,
    1328              :     version: i64,
    1329              :     host: &'a str,
    1330              :     port: i32,
    1331              :     active: bool,
    1332              :     http_port: i32,
    1333              :     availability_zone_id: &'a str,
    1334              :     scheduling_policy: Option<&'a str>,
    1335              : }
        

Generated by: LCOV version 2.1-beta