LCOV - code coverage report
Current view: top level - storage_controller/src - persistence.rs (source / functions) Coverage Total Hit
Test: feead26e04cdef6e988ff1765b1cb7075eb48d3d.info Lines: 0.7 % 1032 7
Test Date: 2025-02-28 12:11:00 Functions: 0.3 % 379 1

            Line data    Source code
       1              : pub(crate) mod split_state;
       2              : use std::collections::HashMap;
       3              : use std::str::FromStr;
       4              : use std::sync::Arc;
       5              : use std::time::{Duration, Instant};
       6              : 
       7              : use diesel::deserialize::{FromSql, FromSqlRow};
       8              : use diesel::pg::Pg;
       9              : use diesel::prelude::*;
      10              : use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
      11              : use diesel_async::pooled_connection::bb8::Pool;
      12              : use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig};
      13              : use diesel_async::{AsyncPgConnection, RunQueryDsl};
      14              : use diesel_migrations::{EmbeddedMigrations, embed_migrations};
      15              : use futures::FutureExt;
      16              : use futures::future::BoxFuture;
      17              : use itertools::Itertools;
      18              : use pageserver_api::controller_api::{
      19              :     AvailabilityZone, MetadataHealthRecord, NodeSchedulingPolicy, PlacementPolicy,
      20              :     SafekeeperDescribeResponse, ShardSchedulingPolicy, SkSchedulingPolicy,
      21              : };
      22              : use pageserver_api::models::TenantConfig;
      23              : use pageserver_api::shard::{
      24              :     ShardConfigError, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
      25              : };
      26              : use rustls::client::WebPkiServerVerifier;
      27              : use rustls::client::danger::{ServerCertVerified, ServerCertVerifier};
      28              : use rustls::crypto::ring;
      29              : use scoped_futures::ScopedBoxFuture;
      30              : use serde::{Deserialize, Serialize};
      31              : use utils::generation::Generation;
      32              : use utils::id::{NodeId, TenantId};
      33              : 
      34              : use self::split_state::SplitState;
      35              : use crate::metrics::{
      36              :     DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
      37              : };
      38              : use crate::node::Node;
      39              : const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
      40              : 
      41              : /// ## What do we store?
      42              : ///
      43              : /// The storage controller service does not store most of its state durably.
      44              : ///
      45              : /// The essential things to store durably are:
      46              : /// - generation numbers, as these must always advance monotonically to ensure data safety.
      47              : /// - Tenant's PlacementPolicy and TenantConfig, as the source of truth for these is something external.
      48              : /// - Node's scheduling policies, as the source of truth for these is something external.
      49              : ///
      50              : /// Other things we store durably as an implementation detail:
      51              : /// - Node's host/port: this could be avoided it we made nodes emit a self-registering heartbeat,
      52              : ///   but it is operationally simpler to make this service the authority for which nodes
      53              : ///   it talks to.
      54              : ///
      55              : /// ## Performance/efficiency
      56              : ///
      57              : /// The storage controller service does not go via the database for most things: there are
      58              : /// a couple of places where we must, and where efficiency matters:
      59              : /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
      60              : ///   before it can attach a tenant, so this acts as a bound on how fast things like
      61              : ///   failover can happen.
      62              : /// - Pageserver re-attach: we will increment many shards' generations when this happens,
      63              : ///   so it is important to avoid e.g. issuing O(N) queries.
      64              : ///
      65              : /// Database calls relating to nodes have low performance requirements, as they are very rarely
      66              : /// updated, and reads of nodes are always from memory, not the database.  We only require that
      67              : /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
      68              : pub struct Persistence {
      69              :     connection_pool: Pool<AsyncPgConnection>,
      70              : }
      71              : 
      72              : /// Legacy format, for use in JSON compat objects in test environment
      73            0 : #[derive(Serialize, Deserialize)]
      74              : struct JsonPersistence {
      75              :     tenants: HashMap<TenantShardId, TenantShardPersistence>,
      76              : }
      77              : 
      78              : #[derive(thiserror::Error, Debug)]
      79              : pub(crate) enum DatabaseError {
      80              :     #[error(transparent)]
      81              :     Query(#[from] diesel::result::Error),
      82              :     #[error(transparent)]
      83              :     Connection(#[from] diesel::result::ConnectionError),
      84              :     #[error(transparent)]
      85              :     ConnectionPool(#[from] diesel_async::pooled_connection::bb8::RunError),
      86              :     #[error("Logical error: {0}")]
      87              :     Logical(String),
      88              :     #[error("Migration error: {0}")]
      89              :     Migration(String),
      90              : }
      91              : 
      92              : #[derive(measured::FixedCardinalityLabel, Copy, Clone)]
      93              : pub(crate) enum DatabaseOperation {
      94              :     InsertNode,
      95              :     UpdateNode,
      96              :     DeleteNode,
      97              :     ListNodes,
      98              :     BeginShardSplit,
      99              :     CompleteShardSplit,
     100              :     AbortShardSplit,
     101              :     Detach,
     102              :     ReAttach,
     103              :     IncrementGeneration,
     104              :     TenantGenerations,
     105              :     ShardGenerations,
     106              :     ListTenantShards,
     107              :     LoadTenant,
     108              :     InsertTenantShards,
     109              :     UpdateTenantShard,
     110              :     DeleteTenant,
     111              :     UpdateTenantConfig,
     112              :     UpdateMetadataHealth,
     113              :     ListMetadataHealth,
     114              :     ListMetadataHealthUnhealthy,
     115              :     ListMetadataHealthOutdated,
     116              :     ListSafekeepers,
     117              :     GetLeader,
     118              :     UpdateLeader,
     119              :     SetPreferredAzs,
     120              : }
     121              : 
     122              : #[must_use]
     123              : pub(crate) enum AbortShardSplitStatus {
     124              :     /// We aborted the split in the database by reverting to the parent shards
     125              :     Aborted,
     126              :     /// The split had already been persisted.
     127              :     Complete,
     128              : }
     129              : 
     130              : pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
     131              : 
     132              : /// Some methods can operate on either a whole tenant or a single shard
     133              : #[derive(Clone)]
     134              : pub(crate) enum TenantFilter {
     135              :     Tenant(TenantId),
     136              :     Shard(TenantShardId),
     137              : }
     138              : 
     139              : /// Represents the results of looking up generation+pageserver for the shards of a tenant
     140              : pub(crate) struct ShardGenerationState {
     141              :     pub(crate) tenant_shard_id: TenantShardId,
     142              :     pub(crate) generation: Option<Generation>,
     143              :     pub(crate) generation_pageserver: Option<NodeId>,
     144              : }
     145              : 
     146              : // A generous allowance for how many times we may retry serializable transactions
     147              : // before giving up.  This is not expected to be hit: it is a defensive measure in case we
     148              : // somehow engineer a situation where duelling transactions might otherwise live-lock.
     149              : const MAX_RETRIES: usize = 128;
     150              : 
     151              : impl Persistence {
     152              :     // The default postgres connection limit is 100.  We use up to 99, to leave one free for a human admin under
     153              :     // normal circumstances.  This assumes we have exclusive use of the database cluster to which we connect.
     154              :     pub const MAX_CONNECTIONS: u32 = 99;
     155              : 
     156              :     // We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
     157              :     const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
     158              :     const MAX_CONNECTION_LIFETIME: Duration = Duration::from_secs(60);
     159              : 
     160            0 :     pub async fn new(database_url: String) -> Self {
     161            0 :         let mut mgr_config = ManagerConfig::default();
     162            0 :         mgr_config.custom_setup = Box::new(establish_connection_rustls);
     163            0 : 
     164            0 :         let manager = AsyncDieselConnectionManager::<AsyncPgConnection>::new_with_config(
     165            0 :             database_url,
     166            0 :             mgr_config,
     167            0 :         );
     168              : 
     169              :         // We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
     170              :         // to execute queries (database queries are not generally on latency-sensitive paths).
     171            0 :         let connection_pool = Pool::builder()
     172            0 :             .max_size(Self::MAX_CONNECTIONS)
     173            0 :             .max_lifetime(Some(Self::MAX_CONNECTION_LIFETIME))
     174            0 :             .idle_timeout(Some(Self::IDLE_CONNECTION_TIMEOUT))
     175            0 :             // Always keep at least one connection ready to go
     176            0 :             .min_idle(Some(1))
     177            0 :             .test_on_check_out(true)
     178            0 :             .build(manager)
     179            0 :             .await
     180            0 :             .expect("Could not build connection pool");
     181            0 : 
     182            0 :         Self { connection_pool }
     183            0 :     }
     184              : 
     185              :     /// A helper for use during startup, where we would like to tolerate concurrent restarts of the
     186              :     /// database and the storage controller, therefore the database might not be available right away
     187            0 :     pub async fn await_connection(
     188            0 :         database_url: &str,
     189            0 :         timeout: Duration,
     190            0 :     ) -> Result<(), diesel::ConnectionError> {
     191            0 :         let started_at = Instant::now();
     192            0 :         log_postgres_connstr_info(database_url)
     193            0 :             .map_err(|e| diesel::ConnectionError::InvalidConnectionUrl(e.to_string()))?;
     194              :         loop {
     195            0 :             match establish_connection_rustls(database_url).await {
     196              :                 Ok(_) => {
     197            0 :                     tracing::info!("Connected to database.");
     198            0 :                     return Ok(());
     199              :                 }
     200            0 :                 Err(e) => {
     201            0 :                     if started_at.elapsed() > timeout {
     202            0 :                         return Err(e);
     203              :                     } else {
     204            0 :                         tracing::info!("Database not yet available, waiting... ({e})");
     205            0 :                         tokio::time::sleep(Duration::from_millis(100)).await;
     206              :                     }
     207              :                 }
     208              :             }
     209              :         }
     210            0 :     }
     211              : 
     212              :     /// Execute the diesel migrations that are built into this binary
     213            0 :     pub(crate) async fn migration_run(&self) -> DatabaseResult<()> {
     214              :         use diesel_migrations::{HarnessWithOutput, MigrationHarness};
     215              : 
     216              :         // Can't use self.with_conn here as we do spawn_blocking which requires static.
     217            0 :         let conn = self
     218            0 :             .connection_pool
     219            0 :             .dedicated_connection()
     220            0 :             .await
     221            0 :             .map_err(|e| DatabaseError::Migration(e.to_string()))?;
     222            0 :         let mut async_wrapper: AsyncConnectionWrapper<AsyncPgConnection> =
     223            0 :             AsyncConnectionWrapper::from(conn);
     224            0 :         tokio::task::spawn_blocking(move || {
     225            0 :             let mut retry_count = 0;
     226            0 :             loop {
     227            0 :                 let result = HarnessWithOutput::write_to_stdout(&mut async_wrapper)
     228            0 :                     .run_pending_migrations(MIGRATIONS)
     229            0 :                     .map(|_| ())
     230            0 :                     .map_err(|e| DatabaseError::Migration(e.to_string()));
     231            0 :                 match result {
     232            0 :                     Ok(r) => break Ok(r),
     233              :                     Err(
     234            0 :                         err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
     235            0 :                             diesel::result::DatabaseErrorKind::SerializationFailure,
     236            0 :                             _,
     237            0 :                         )),
     238            0 :                     ) => {
     239            0 :                         retry_count += 1;
     240            0 :                         if retry_count > MAX_RETRIES {
     241            0 :                             tracing::error!(
     242            0 :                                 "Exceeded max retries on SerializationFailure errors: {err:?}"
     243              :                             );
     244            0 :                             break Err(err);
     245              :                         } else {
     246              :                             // Retry on serialization errors: these are expected, because even though our
     247              :                             // transactions don't fight for the same rows, they will occasionally collide
     248              :                             // on index pages (e.g. increment_generation for unrelated shards can collide)
     249            0 :                             tracing::debug!(
     250            0 :                                 "Retrying transaction on serialization failure {err:?}"
     251              :                             );
     252            0 :                             continue;
     253              :                         }
     254              :                     }
     255            0 :                     Err(e) => break Err(e),
     256              :                 }
     257              :             }
     258            0 :         })
     259            0 :         .await
     260            0 :         .map_err(|e| DatabaseError::Migration(e.to_string()))??;
     261            0 :         Ok(())
     262            0 :     }
     263              : 
     264              :     /// Wraps `with_conn` in order to collect latency and error metrics
     265            0 :     async fn with_measured_conn<'a, 'b, F, R>(
     266            0 :         &self,
     267            0 :         op: DatabaseOperation,
     268            0 :         func: F,
     269            0 :     ) -> DatabaseResult<R>
     270            0 :     where
     271            0 :         F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
     272            0 :             + Send
     273            0 :             + std::marker::Sync
     274            0 :             + 'a,
     275            0 :         R: Send + 'b,
     276            0 :     {
     277            0 :         let latency = &METRICS_REGISTRY
     278            0 :             .metrics_group
     279            0 :             .storage_controller_database_query_latency;
     280            0 :         let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op });
     281              : 
     282            0 :         let res = self.with_conn(func).await;
     283              : 
     284            0 :         if let Err(err) = &res {
     285            0 :             let error_counter = &METRICS_REGISTRY
     286            0 :                 .metrics_group
     287            0 :                 .storage_controller_database_query_error;
     288            0 :             error_counter.inc(DatabaseQueryErrorLabelGroup {
     289            0 :                 error_type: err.error_label(),
     290            0 :                 operation: op,
     291            0 :             })
     292            0 :         }
     293              : 
     294            0 :         res
     295            0 :     }
     296              : 
     297              :     /// Call the provided function with a Diesel database connection in a retry loop
     298            0 :     async fn with_conn<'a, 'b, F, R>(&self, func: F) -> DatabaseResult<R>
     299            0 :     where
     300            0 :         F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
     301            0 :             + Send
     302            0 :             + std::marker::Sync
     303            0 :             + 'a,
     304            0 :         R: Send + 'b,
     305            0 :     {
     306            0 :         let mut retry_count = 0;
     307              :         loop {
     308            0 :             let mut conn = self.connection_pool.get().await?;
     309            0 :             match conn
     310            0 :                 .build_transaction()
     311            0 :                 .serializable()
     312            0 :                 .run(|c| func(c))
     313            0 :                 .await
     314              :             {
     315            0 :                 Ok(r) => break Ok(r),
     316              :                 Err(
     317            0 :                     err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
     318            0 :                         diesel::result::DatabaseErrorKind::SerializationFailure,
     319            0 :                         _,
     320            0 :                     )),
     321            0 :                 ) => {
     322            0 :                     retry_count += 1;
     323            0 :                     if retry_count > MAX_RETRIES {
     324            0 :                         tracing::error!(
     325            0 :                             "Exceeded max retries on SerializationFailure errors: {err:?}"
     326              :                         );
     327            0 :                         break Err(err);
     328              :                     } else {
     329              :                         // Retry on serialization errors: these are expected, because even though our
     330              :                         // transactions don't fight for the same rows, they will occasionally collide
     331              :                         // on index pages (e.g. increment_generation for unrelated shards can collide)
     332            0 :                         tracing::debug!("Retrying transaction on serialization failure {err:?}");
     333            0 :                         continue;
     334              :                     }
     335              :                 }
     336            0 :                 Err(e) => break Err(e),
     337              :             }
     338              :         }
     339            0 :     }
     340              : 
     341              :     /// When a node is first registered, persist it before using it for anything
     342            0 :     pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
     343            0 :         let np = &node.to_persistent();
     344            0 :         self.with_measured_conn(DatabaseOperation::InsertNode, move |conn| {
     345            0 :             Box::pin(async move {
     346            0 :                 diesel::insert_into(crate::schema::nodes::table)
     347            0 :                     .values(np)
     348            0 :                     .execute(conn)
     349            0 :                     .await?;
     350            0 :                 Ok(())
     351            0 :             })
     352            0 :         })
     353            0 :         .await
     354            0 :     }
     355              : 
     356              :     /// At startup, populate the list of nodes which our shards may be placed on
     357            0 :     pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> {
     358            0 :         let nodes: Vec<NodePersistence> = self
     359            0 :             .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
     360            0 :                 Box::pin(async move {
     361            0 :                     Ok(crate::schema::nodes::table
     362            0 :                         .load::<NodePersistence>(conn)
     363            0 :                         .await?)
     364            0 :                 })
     365            0 :             })
     366            0 :             .await?;
     367              : 
     368            0 :         tracing::info!("list_nodes: loaded {} nodes", nodes.len());
     369              : 
     370            0 :         Ok(nodes)
     371            0 :     }
     372              : 
     373            0 :     pub(crate) async fn update_node<V>(
     374            0 :         &self,
     375            0 :         input_node_id: NodeId,
     376            0 :         values: V,
     377            0 :     ) -> DatabaseResult<()>
     378            0 :     where
     379            0 :         V: diesel::AsChangeset<Target = crate::schema::nodes::table> + Clone + Send + Sync,
     380            0 :         V::Changeset: diesel::query_builder::QueryFragment<diesel::pg::Pg> + Send, // valid Postgres SQL
     381            0 :     {
     382              :         use crate::schema::nodes::dsl::*;
     383            0 :         let updated = self
     384            0 :             .with_measured_conn(DatabaseOperation::UpdateNode, move |conn| {
     385            0 :                 let values = values.clone();
     386            0 :                 Box::pin(async move {
     387            0 :                     let updated = diesel::update(nodes)
     388            0 :                         .filter(node_id.eq(input_node_id.0 as i64))
     389            0 :                         .set(values)
     390            0 :                         .execute(conn)
     391            0 :                         .await?;
     392            0 :                     Ok(updated)
     393            0 :                 })
     394            0 :             })
     395            0 :             .await?;
     396              : 
     397            0 :         if updated != 1 {
     398            0 :             Err(DatabaseError::Logical(format!(
     399            0 :                 "Node {node_id:?} not found for update",
     400            0 :             )))
     401              :         } else {
     402            0 :             Ok(())
     403              :         }
     404            0 :     }
     405              : 
     406            0 :     pub(crate) async fn update_node_scheduling_policy(
     407            0 :         &self,
     408            0 :         input_node_id: NodeId,
     409            0 :         input_scheduling: NodeSchedulingPolicy,
     410            0 :     ) -> DatabaseResult<()> {
     411              :         use crate::schema::nodes::dsl::*;
     412            0 :         self.update_node(
     413            0 :             input_node_id,
     414            0 :             scheduling_policy.eq(String::from(input_scheduling)),
     415            0 :         )
     416            0 :         .await
     417            0 :     }
     418              : 
     419            0 :     pub(crate) async fn update_node_on_registration(
     420            0 :         &self,
     421            0 :         input_node_id: NodeId,
     422            0 :         input_https_port: Option<u16>,
     423            0 :     ) -> DatabaseResult<()> {
     424              :         use crate::schema::nodes::dsl::*;
     425            0 :         self.update_node(
     426            0 :             input_node_id,
     427            0 :             listen_https_port.eq(input_https_port.map(|x| x as i32)),
     428            0 :         )
     429            0 :         .await
     430            0 :     }
     431              : 
     432              :     /// At startup, load the high level state for shards, such as their config + policy.  This will
     433              :     /// be enriched at runtime with state discovered on pageservers.
     434              :     ///
     435              :     /// We exclude shards configured to be detached.  During startup, if we see any attached locations
     436              :     /// for such shards, they will automatically be detached as 'orphans'.
     437            0 :     pub(crate) async fn load_active_tenant_shards(
     438            0 :         &self,
     439            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     440              :         use crate::schema::tenant_shards::dsl::*;
     441            0 :         self.with_measured_conn(DatabaseOperation::ListTenantShards, move |conn| {
     442            0 :             Box::pin(async move {
     443            0 :                 let query = tenant_shards.filter(
     444            0 :                     placement_policy.ne(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
     445            0 :                 );
     446            0 :                 let result = query.load::<TenantShardPersistence>(conn).await?;
     447              : 
     448            0 :                 Ok(result)
     449            0 :             })
     450            0 :         })
     451            0 :         .await
     452            0 :     }
     453              : 
     454              :     /// When restoring a previously detached tenant into memory, load it from the database
     455            0 :     pub(crate) async fn load_tenant(
     456            0 :         &self,
     457            0 :         filter_tenant_id: TenantId,
     458            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     459              :         use crate::schema::tenant_shards::dsl::*;
     460            0 :         self.with_measured_conn(DatabaseOperation::LoadTenant, move |conn| {
     461            0 :             Box::pin(async move {
     462            0 :                 let query = tenant_shards.filter(tenant_id.eq(filter_tenant_id.to_string()));
     463            0 :                 let result = query.load::<TenantShardPersistence>(conn).await?;
     464              : 
     465            0 :                 Ok(result)
     466            0 :             })
     467            0 :         })
     468            0 :         .await
     469            0 :     }
     470              : 
     471              :     /// Tenants must be persisted before we schedule them for the first time.  This enables us
     472              :     /// to correctly retain generation monotonicity, and the externally provided placement policy & config.
     473            0 :     pub(crate) async fn insert_tenant_shards(
     474            0 :         &self,
     475            0 :         shards: Vec<TenantShardPersistence>,
     476            0 :     ) -> DatabaseResult<()> {
     477              :         use crate::schema::{metadata_health, tenant_shards};
     478              : 
     479            0 :         let now = chrono::Utc::now();
     480            0 : 
     481            0 :         let metadata_health_records = shards
     482            0 :             .iter()
     483            0 :             .map(|t| MetadataHealthPersistence {
     484            0 :                 tenant_id: t.tenant_id.clone(),
     485            0 :                 shard_number: t.shard_number,
     486            0 :                 shard_count: t.shard_count,
     487            0 :                 healthy: true,
     488            0 :                 last_scrubbed_at: now,
     489            0 :             })
     490            0 :             .collect::<Vec<_>>();
     491            0 : 
     492            0 :         let shards = &shards;
     493            0 :         let metadata_health_records = &metadata_health_records;
     494            0 :         self.with_measured_conn(DatabaseOperation::InsertTenantShards, move |conn| {
     495            0 :             Box::pin(async move {
     496            0 :                 diesel::insert_into(tenant_shards::table)
     497            0 :                     .values(shards)
     498            0 :                     .execute(conn)
     499            0 :                     .await?;
     500              : 
     501            0 :                 diesel::insert_into(metadata_health::table)
     502            0 :                     .values(metadata_health_records)
     503            0 :                     .execute(conn)
     504            0 :                     .await?;
     505            0 :                 Ok(())
     506            0 :             })
     507            0 :         })
     508            0 :         .await
     509            0 :     }
     510              : 
     511              :     /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
     512              :     /// the tenant from memory on this server.
     513            0 :     pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
     514              :         use crate::schema::tenant_shards::dsl::*;
     515            0 :         self.with_measured_conn(DatabaseOperation::DeleteTenant, move |conn| {
     516            0 :             Box::pin(async move {
     517            0 :                 // `metadata_health` status (if exists) is also deleted based on the cascade behavior.
     518            0 :                 diesel::delete(tenant_shards)
     519            0 :                     .filter(tenant_id.eq(del_tenant_id.to_string()))
     520            0 :                     .execute(conn)
     521            0 :                     .await?;
     522            0 :                 Ok(())
     523            0 :             })
     524            0 :         })
     525            0 :         .await
     526            0 :     }
     527              : 
     528            0 :     pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
     529              :         use crate::schema::nodes::dsl::*;
     530            0 :         self.with_measured_conn(DatabaseOperation::DeleteNode, move |conn| {
     531            0 :             Box::pin(async move {
     532            0 :                 diesel::delete(nodes)
     533            0 :                     .filter(node_id.eq(del_node_id.0 as i64))
     534            0 :                     .execute(conn)
     535            0 :                     .await?;
     536              : 
     537            0 :                 Ok(())
     538            0 :             })
     539            0 :         })
     540            0 :         .await
     541            0 :     }
     542              : 
     543              :     /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient
     544              :     /// batched increment of the generations of all tenants whose generation_pageserver is equal to
     545              :     /// the node that called /re-attach.
     546              :     #[tracing::instrument(skip_all, fields(node_id))]
     547              :     pub(crate) async fn re_attach(
     548              :         &self,
     549              :         input_node_id: NodeId,
     550              :     ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
     551              :         use crate::schema::nodes::dsl::{scheduling_policy, *};
     552              :         use crate::schema::tenant_shards::dsl::*;
     553              :         let updated = self
     554            0 :             .with_measured_conn(DatabaseOperation::ReAttach, move |conn| {
     555            0 :                 Box::pin(async move {
     556            0 :                     let rows_updated = diesel::update(tenant_shards)
     557            0 :                         .filter(generation_pageserver.eq(input_node_id.0 as i64))
     558            0 :                         .set(generation.eq(generation + 1))
     559            0 :                         .execute(conn)
     560            0 :                         .await?;
     561              : 
     562            0 :                     tracing::info!("Incremented {} tenants' generations", rows_updated);
     563              : 
     564              :                     // TODO: UPDATE+SELECT in one query
     565              : 
     566            0 :                     let updated = tenant_shards
     567            0 :                         .filter(generation_pageserver.eq(input_node_id.0 as i64))
     568            0 :                         .select(TenantShardPersistence::as_select())
     569            0 :                         .load(conn)
     570            0 :                         .await?;
     571              : 
     572              :                     // If the node went through a drain and restart phase before re-attaching,
     573              :                     // then reset it's node scheduling policy to active.
     574            0 :                     diesel::update(nodes)
     575            0 :                         .filter(node_id.eq(input_node_id.0 as i64))
     576            0 :                         .filter(
     577            0 :                             scheduling_policy
     578            0 :                                 .eq(String::from(NodeSchedulingPolicy::PauseForRestart))
     579            0 :                                 .or(scheduling_policy
     580            0 :                                     .eq(String::from(NodeSchedulingPolicy::Draining)))
     581            0 :                                 .or(scheduling_policy
     582            0 :                                     .eq(String::from(NodeSchedulingPolicy::Filling))),
     583            0 :                         )
     584            0 :                         .set(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Active)))
     585            0 :                         .execute(conn)
     586            0 :                         .await?;
     587              : 
     588            0 :                     Ok(updated)
     589            0 :                 })
     590            0 :             })
     591              :             .await?;
     592              : 
     593              :         let mut result = HashMap::new();
     594              :         for tsp in updated {
     595              :             let tenant_shard_id = TenantShardId {
     596              :                 tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
     597            0 :                     .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
     598              :                 shard_number: ShardNumber(tsp.shard_number as u8),
     599              :                 shard_count: ShardCount::new(tsp.shard_count as u8),
     600              :             };
     601              : 
     602              :             let Some(g) = tsp.generation else {
     603              :                 // If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
     604              :                 // we only set generation_pageserver when setting generation.
     605              :                 return Err(DatabaseError::Logical(
     606              :                     "Generation should always be set after incrementing".to_string(),
     607              :                 ));
     608              :             };
     609              :             result.insert(tenant_shard_id, Generation::new(g as u32));
     610              :         }
     611              : 
     612              :         Ok(result)
     613              :     }
     614              : 
     615              :     /// Reconciler calls this immediately before attaching to a new pageserver, to acquire a unique, monotonically
     616              :     /// advancing generation number.  We also store the NodeId for which the generation was issued, so that in
     617              :     /// [`Self::re_attach`] we can do a bulk UPDATE on the generations for that node.
     618            0 :     pub(crate) async fn increment_generation(
     619            0 :         &self,
     620            0 :         tenant_shard_id: TenantShardId,
     621            0 :         node_id: NodeId,
     622            0 :     ) -> anyhow::Result<Generation> {
     623              :         use crate::schema::tenant_shards::dsl::*;
     624            0 :         let updated = self
     625            0 :             .with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| {
     626            0 :                 Box::pin(async move {
     627            0 :                     let updated = diesel::update(tenant_shards)
     628            0 :                         .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     629            0 :                         .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     630            0 :                         .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     631            0 :                         .set((
     632            0 :                             generation.eq(generation + 1),
     633            0 :                             generation_pageserver.eq(node_id.0 as i64),
     634            0 :                         ))
     635            0 :                         // TODO: only returning() the generation column
     636            0 :                         .returning(TenantShardPersistence::as_returning())
     637            0 :                         .get_result(conn)
     638            0 :                         .await?;
     639              : 
     640            0 :                     Ok(updated)
     641            0 :                 })
     642            0 :             })
     643            0 :             .await?;
     644              : 
     645              :         // Generation is always non-null in the rseult: if the generation column had been NULL, then we
     646              :         // should have experienced an SQL Confilict error while executing a query that tries to increment it.
     647            0 :         debug_assert!(updated.generation.is_some());
     648            0 :         let Some(g) = updated.generation else {
     649            0 :             return Err(DatabaseError::Logical(
     650            0 :                 "Generation should always be set after incrementing".to_string(),
     651            0 :             )
     652            0 :             .into());
     653              :         };
     654              : 
     655            0 :         Ok(Generation::new(g as u32))
     656            0 :     }
     657              : 
     658              :     /// When we want to call out to the running shards for a tenant, e.g. during timeline CRUD operations,
     659              :     /// we need to know where the shard is attached, _and_ the generation, so that we can re-check the generation
     660              :     /// afterwards to confirm that our timeline CRUD operation is truly persistent (it must have happened in the
     661              :     /// latest generation)
     662              :     ///
     663              :     /// If the tenant doesn't exist, an empty vector is returned.
     664              :     ///
     665              :     /// Output is sorted by shard number
     666            0 :     pub(crate) async fn tenant_generations(
     667            0 :         &self,
     668            0 :         filter_tenant_id: TenantId,
     669            0 :     ) -> Result<Vec<ShardGenerationState>, DatabaseError> {
     670              :         use crate::schema::tenant_shards::dsl::*;
     671            0 :         let rows = self
     672            0 :             .with_measured_conn(DatabaseOperation::TenantGenerations, move |conn| {
     673            0 :                 Box::pin(async move {
     674            0 :                     let result = tenant_shards
     675            0 :                         .filter(tenant_id.eq(filter_tenant_id.to_string()))
     676            0 :                         .select(TenantShardPersistence::as_select())
     677            0 :                         .order(shard_number)
     678            0 :                         .load(conn)
     679            0 :                         .await?;
     680            0 :                     Ok(result)
     681            0 :                 })
     682            0 :             })
     683            0 :             .await?;
     684              : 
     685            0 :         Ok(rows
     686            0 :             .into_iter()
     687            0 :             .map(|p| ShardGenerationState {
     688            0 :                 tenant_shard_id: p
     689            0 :                     .get_tenant_shard_id()
     690            0 :                     .expect("Corrupt tenant shard id in database"),
     691            0 :                 generation: p.generation.map(|g| Generation::new(g as u32)),
     692            0 :                 generation_pageserver: p.generation_pageserver.map(|n| NodeId(n as u64)),
     693            0 :             })
     694            0 :             .collect())
     695            0 :     }
     696              : 
     697              :     /// Read the generation number of specific tenant shards
     698              :     ///
     699              :     /// Output is unsorted.  Output may not include values for all inputs, if they are missing in the database.
     700            0 :     pub(crate) async fn shard_generations(
     701            0 :         &self,
     702            0 :         mut tenant_shard_ids: impl Iterator<Item = &TenantShardId>,
     703            0 :     ) -> Result<Vec<(TenantShardId, Option<Generation>)>, DatabaseError> {
     704            0 :         let mut rows = Vec::with_capacity(tenant_shard_ids.size_hint().0);
     705              : 
     706              :         // We will chunk our input to avoid composing arbitrarily long `IN` clauses.  Typically we are
     707              :         // called with a single digit number of IDs, but in principle we could be called with tens
     708              :         // of thousands (all the shards on one pageserver) from the generation validation API.
     709            0 :         loop {
     710            0 :             // A modest hardcoded chunk size to handle typical cases in a single query but never generate particularly
     711            0 :             // large query strings.
     712            0 :             let chunk_ids = tenant_shard_ids.by_ref().take(32);
     713            0 : 
     714            0 :             // Compose a comma separated list of tuples for matching on (tenant_id, shard_number, shard_count)
     715            0 :             let in_clause = chunk_ids
     716            0 :                 .map(|tsid| {
     717            0 :                     format!(
     718            0 :                         "('{}', {}, {})",
     719            0 :                         tsid.tenant_id, tsid.shard_number.0, tsid.shard_count.0
     720            0 :                     )
     721            0 :                 })
     722            0 :                 .join(",");
     723            0 : 
     724            0 :             // We are done when our iterator gives us nothing to filter on
     725            0 :             if in_clause.is_empty() {
     726            0 :                 break;
     727            0 :             }
     728            0 : 
     729            0 :             let in_clause = &in_clause;
     730            0 :             let chunk_rows = self
     731            0 :                 .with_measured_conn(DatabaseOperation::ShardGenerations, move |conn| {
     732            0 :                     Box::pin(async move {
     733              :                         // diesel doesn't support multi-column IN queries, so we compose raw SQL.  No escaping is required because
     734              :                         // the inputs are strongly typed and cannot carry any user-supplied raw string content.
     735            0 :                         let result : Vec<TenantShardPersistence> = diesel::sql_query(
     736            0 :                             format!("SELECT * from tenant_shards where (tenant_id, shard_number, shard_count) in ({in_clause});").as_str()
     737            0 :                         ).load(conn).await?;
     738              : 
     739            0 :                         Ok(result)
     740            0 :                     })
     741            0 :                 })
     742            0 :                 .await?;
     743            0 :             rows.extend(chunk_rows.into_iter())
     744              :         }
     745              : 
     746            0 :         Ok(rows
     747            0 :             .into_iter()
     748            0 :             .map(|tsp| {
     749            0 :                 (
     750            0 :                     tsp.get_tenant_shard_id()
     751            0 :                         .expect("Bad tenant ID in database"),
     752            0 :                     tsp.generation.map(|g| Generation::new(g as u32)),
     753            0 :                 )
     754            0 :             })
     755            0 :             .collect())
     756            0 :     }
     757              : 
     758              :     #[allow(non_local_definitions)]
     759              :     /// For use when updating a persistent property of a tenant, such as its config or placement_policy.
     760              :     ///
     761              :     /// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
     762              :     /// API: use [`Self::increment_generation`] instead.  Setting the generation via this route is a one-time thing
     763              :     /// that we only do the first time a tenant is set to an attached policy via /location_config.
     764            0 :     pub(crate) async fn update_tenant_shard(
     765            0 :         &self,
     766            0 :         tenant: TenantFilter,
     767            0 :         input_placement_policy: Option<PlacementPolicy>,
     768            0 :         input_config: Option<TenantConfig>,
     769            0 :         input_generation: Option<Generation>,
     770            0 :         input_scheduling_policy: Option<ShardSchedulingPolicy>,
     771            0 :     ) -> DatabaseResult<()> {
     772              :         use crate::schema::tenant_shards::dsl::*;
     773              : 
     774            0 :         let tenant = &tenant;
     775            0 :         let input_placement_policy = &input_placement_policy;
     776            0 :         let input_config = &input_config;
     777            0 :         let input_generation = &input_generation;
     778            0 :         let input_scheduling_policy = &input_scheduling_policy;
     779            0 :         self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
     780            0 :             Box::pin(async move {
     781            0 :                 let query = match tenant {
     782            0 :                     TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards)
     783            0 :                         .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     784            0 :                         .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     785            0 :                         .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     786            0 :                         .into_boxed(),
     787            0 :                     TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
     788            0 :                         .filter(tenant_id.eq(input_tenant_id.to_string()))
     789            0 :                         .into_boxed(),
     790              :                 };
     791              : 
     792              :                 // Clear generation_pageserver if we are moving into a state where we won't have
     793              :                 // any attached pageservers.
     794            0 :                 let input_generation_pageserver = match input_placement_policy {
     795            0 :                     None | Some(PlacementPolicy::Attached(_)) => None,
     796            0 :                     Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) => Some(None),
     797              :                 };
     798              : 
     799            0 :                 #[derive(AsChangeset)]
     800              :                 #[diesel(table_name = crate::schema::tenant_shards)]
     801              :                 struct ShardUpdate {
     802              :                     generation: Option<i32>,
     803              :                     placement_policy: Option<String>,
     804              :                     config: Option<String>,
     805              :                     scheduling_policy: Option<String>,
     806              :                     generation_pageserver: Option<Option<i64>>,
     807              :                 }
     808              : 
     809            0 :                 let update = ShardUpdate {
     810            0 :                     generation: input_generation.map(|g| g.into().unwrap() as i32),
     811            0 :                     placement_policy: input_placement_policy
     812            0 :                         .as_ref()
     813            0 :                         .map(|p| serde_json::to_string(&p).unwrap()),
     814            0 :                     config: input_config
     815            0 :                         .as_ref()
     816            0 :                         .map(|c| serde_json::to_string(&c).unwrap()),
     817            0 :                     scheduling_policy: input_scheduling_policy
     818            0 :                         .map(|p| serde_json::to_string(&p).unwrap()),
     819            0 :                     generation_pageserver: input_generation_pageserver,
     820            0 :                 };
     821            0 : 
     822            0 :                 query.set(update).execute(conn).await?;
     823              : 
     824            0 :                 Ok(())
     825            0 :             })
     826            0 :         })
     827            0 :         .await?;
     828              : 
     829            0 :         Ok(())
     830            0 :     }
     831              : 
     832              :     /// Note that passing None for a shard clears the preferred AZ (rather than leaving it unmodified)
     833            0 :     pub(crate) async fn set_tenant_shard_preferred_azs(
     834            0 :         &self,
     835            0 :         preferred_azs: Vec<(TenantShardId, Option<AvailabilityZone>)>,
     836            0 :     ) -> DatabaseResult<Vec<(TenantShardId, Option<AvailabilityZone>)>> {
     837              :         use crate::schema::tenant_shards::dsl::*;
     838              : 
     839            0 :         let preferred_azs = preferred_azs.as_slice();
     840            0 :         self.with_measured_conn(DatabaseOperation::SetPreferredAzs, move |conn| {
     841            0 :             Box::pin(async move {
     842            0 :                 let mut shards_updated = Vec::default();
     843              : 
     844            0 :                 for (tenant_shard_id, preferred_az) in preferred_azs.iter() {
     845            0 :                     let updated = diesel::update(tenant_shards)
     846            0 :                         .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     847            0 :                         .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     848            0 :                         .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     849            0 :                         .set(preferred_az_id.eq(preferred_az.as_ref().map(|az| az.0.clone())))
     850            0 :                         .execute(conn)
     851            0 :                         .await?;
     852              : 
     853            0 :                     if updated == 1 {
     854            0 :                         shards_updated.push((*tenant_shard_id, preferred_az.clone()));
     855            0 :                     }
     856              :                 }
     857              : 
     858            0 :                 Ok(shards_updated)
     859            0 :             })
     860            0 :         })
     861            0 :         .await
     862            0 :     }
     863              : 
     864            0 :     pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
     865              :         use crate::schema::tenant_shards::dsl::*;
     866            0 :         self.with_measured_conn(DatabaseOperation::Detach, move |conn| {
     867            0 :             Box::pin(async move {
     868            0 :                 let updated = diesel::update(tenant_shards)
     869            0 :                     .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     870            0 :                     .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     871            0 :                     .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     872            0 :                     .set((
     873            0 :                         generation_pageserver.eq(Option::<i64>::None),
     874            0 :                         placement_policy
     875            0 :                             .eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
     876            0 :                     ))
     877            0 :                     .execute(conn)
     878            0 :                     .await?;
     879              : 
     880            0 :                 Ok(updated)
     881            0 :             })
     882            0 :         })
     883            0 :         .await?;
     884              : 
     885            0 :         Ok(())
     886            0 :     }
     887              : 
     888              :     // When we start shard splitting, we must durably mark the tenant so that
     889              :     // on restart, we know that we must go through recovery.
     890              :     //
     891              :     // We create the child shards here, so that they will be available for increment_generation calls
     892              :     // if some pageserver holding a child shard needs to restart before the overall tenant split is complete.
     893            0 :     pub(crate) async fn begin_shard_split(
     894            0 :         &self,
     895            0 :         old_shard_count: ShardCount,
     896            0 :         split_tenant_id: TenantId,
     897            0 :         parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
     898            0 :     ) -> DatabaseResult<()> {
     899              :         use crate::schema::tenant_shards::dsl::*;
     900            0 :         let parent_to_children = parent_to_children.as_slice();
     901            0 :         self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| {
     902            0 :             Box::pin(async move {
     903              :             // Mark parent shards as splitting
     904              : 
     905            0 :             let updated = diesel::update(tenant_shards)
     906            0 :                 .filter(tenant_id.eq(split_tenant_id.to_string()))
     907            0 :                 .filter(shard_count.eq(old_shard_count.literal() as i32))
     908            0 :                 .set((splitting.eq(1),))
     909            0 :                 .execute(conn).await?;
     910            0 :             if u8::try_from(updated)
     911            0 :                 .map_err(|_| DatabaseError::Logical(
     912            0 :                     format!("Overflow existing shard count {} while splitting", updated))
     913            0 :                 )? != old_shard_count.count() {
     914              :                 // Perhaps a deletion or another split raced with this attempt to split, mutating
     915              :                 // the parent shards that we intend to split. In this case the split request should fail.
     916            0 :                 return Err(DatabaseError::Logical(
     917            0 :                     format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
     918            0 :                 ));
     919            0 :             }
     920            0 : 
     921            0 :             // FIXME: spurious clone to sidestep closure move rules
     922            0 :             let parent_to_children = parent_to_children.to_vec();
     923              : 
     924              :             // Insert child shards
     925            0 :             for (parent_shard_id, children) in parent_to_children {
     926            0 :                 let mut parent = crate::schema::tenant_shards::table
     927            0 :                     .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
     928            0 :                     .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
     929            0 :                     .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
     930            0 :                     .load::<TenantShardPersistence>(conn).await?;
     931            0 :                 let parent = if parent.len() != 1 {
     932            0 :                     return Err(DatabaseError::Logical(format!(
     933            0 :                         "Parent shard {parent_shard_id} not found"
     934            0 :                     )));
     935              :                 } else {
     936            0 :                     parent.pop().unwrap()
     937              :                 };
     938            0 :                 for mut shard in children {
     939              :                     // Carry the parent's generation into the child
     940            0 :                     shard.generation = parent.generation;
     941            0 : 
     942            0 :                     debug_assert!(shard.splitting == SplitState::Splitting);
     943            0 :                     diesel::insert_into(tenant_shards)
     944            0 :                         .values(shard)
     945            0 :                         .execute(conn).await?;
     946              :                 }
     947              :             }
     948              : 
     949            0 :             Ok(())
     950            0 :         })
     951            0 :         })
     952            0 :         .await
     953            0 :     }
     954              : 
     955              :     // When we finish shard splitting, we must atomically clean up the old shards
     956              :     // and insert the new shards, and clear the splitting marker.
     957            0 :     pub(crate) async fn complete_shard_split(
     958            0 :         &self,
     959            0 :         split_tenant_id: TenantId,
     960            0 :         old_shard_count: ShardCount,
     961            0 :     ) -> DatabaseResult<()> {
     962              :         use crate::schema::tenant_shards::dsl::*;
     963            0 :         self.with_measured_conn(DatabaseOperation::CompleteShardSplit, move |conn| {
     964            0 :             Box::pin(async move {
     965            0 :                 // Drop parent shards
     966            0 :                 diesel::delete(tenant_shards)
     967            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     968            0 :                     .filter(shard_count.eq(old_shard_count.literal() as i32))
     969            0 :                     .execute(conn)
     970            0 :                     .await?;
     971              : 
     972              :                 // Clear sharding flag
     973            0 :                 let updated = diesel::update(tenant_shards)
     974            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     975            0 :                     .set((splitting.eq(0),))
     976            0 :                     .execute(conn)
     977            0 :                     .await?;
     978            0 :                 debug_assert!(updated > 0);
     979              : 
     980            0 :                 Ok(())
     981            0 :             })
     982            0 :         })
     983            0 :         .await
     984            0 :     }
     985              : 
     986              :     /// Used when the remote part of a shard split failed: we will revert the database state to have only
     987              :     /// the parent shards, with SplitState::Idle.
     988            0 :     pub(crate) async fn abort_shard_split(
     989            0 :         &self,
     990            0 :         split_tenant_id: TenantId,
     991            0 :         new_shard_count: ShardCount,
     992            0 :     ) -> DatabaseResult<AbortShardSplitStatus> {
     993              :         use crate::schema::tenant_shards::dsl::*;
     994            0 :         self.with_measured_conn(DatabaseOperation::AbortShardSplit, move |conn| {
     995            0 :             Box::pin(async move {
     996              :                 // Clear the splitting state on parent shards
     997            0 :                 let updated = diesel::update(tenant_shards)
     998            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     999            0 :                     .filter(shard_count.ne(new_shard_count.literal() as i32))
    1000            0 :                     .set((splitting.eq(0),))
    1001            0 :                     .execute(conn)
    1002            0 :                     .await?;
    1003              : 
    1004              :                 // Parent shards are already gone: we cannot abort.
    1005            0 :                 if updated == 0 {
    1006            0 :                     return Ok(AbortShardSplitStatus::Complete);
    1007            0 :                 }
    1008            0 : 
    1009            0 :                 // Sanity check: if parent shards were present, their cardinality should
    1010            0 :                 // be less than the number of child shards.
    1011            0 :                 if updated >= new_shard_count.count() as usize {
    1012            0 :                     return Err(DatabaseError::Logical(format!(
    1013            0 :                         "Unexpected parent shard count {updated} while aborting split to \
    1014            0 :                             count {new_shard_count:?} on tenant {split_tenant_id}"
    1015            0 :                     )));
    1016            0 :                 }
    1017            0 : 
    1018            0 :                 // Erase child shards
    1019            0 :                 diesel::delete(tenant_shards)
    1020            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
    1021            0 :                     .filter(shard_count.eq(new_shard_count.literal() as i32))
    1022            0 :                     .execute(conn)
    1023            0 :                     .await?;
    1024              : 
    1025            0 :                 Ok(AbortShardSplitStatus::Aborted)
    1026            0 :             })
    1027            0 :         })
    1028            0 :         .await
    1029            0 :     }
    1030              : 
    1031              :     /// Stores all the latest metadata health updates durably. Updates existing entry on conflict.
    1032              :     ///
    1033              :     /// **Correctness:** `metadata_health_updates` should all belong the tenant shards managed by the storage controller.
    1034              :     #[allow(dead_code)]
    1035            0 :     pub(crate) async fn update_metadata_health_records(
    1036            0 :         &self,
    1037            0 :         healthy_records: Vec<MetadataHealthPersistence>,
    1038            0 :         unhealthy_records: Vec<MetadataHealthPersistence>,
    1039            0 :         now: chrono::DateTime<chrono::Utc>,
    1040            0 :     ) -> DatabaseResult<()> {
    1041              :         use crate::schema::metadata_health::dsl::*;
    1042              : 
    1043            0 :         let healthy_records = healthy_records.as_slice();
    1044            0 :         let unhealthy_records = unhealthy_records.as_slice();
    1045            0 :         self.with_measured_conn(DatabaseOperation::UpdateMetadataHealth, move |conn| {
    1046            0 :             Box::pin(async move {
    1047            0 :                 diesel::insert_into(metadata_health)
    1048            0 :                     .values(healthy_records)
    1049            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
    1050            0 :                     .do_update()
    1051            0 :                     .set((healthy.eq(true), last_scrubbed_at.eq(now)))
    1052            0 :                     .execute(conn)
    1053            0 :                     .await?;
    1054              : 
    1055            0 :                 diesel::insert_into(metadata_health)
    1056            0 :                     .values(unhealthy_records)
    1057            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
    1058            0 :                     .do_update()
    1059            0 :                     .set((healthy.eq(false), last_scrubbed_at.eq(now)))
    1060            0 :                     .execute(conn)
    1061            0 :                     .await?;
    1062            0 :                 Ok(())
    1063            0 :             })
    1064            0 :         })
    1065            0 :         .await
    1066            0 :     }
    1067              : 
    1068              :     /// Lists all the metadata health records.
    1069              :     #[allow(dead_code)]
    1070            0 :     pub(crate) async fn list_metadata_health_records(
    1071            0 :         &self,
    1072            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
    1073            0 :         self.with_measured_conn(DatabaseOperation::ListMetadataHealth, move |conn| {
    1074            0 :             Box::pin(async {
    1075            0 :                 Ok(crate::schema::metadata_health::table
    1076            0 :                     .load::<MetadataHealthPersistence>(conn)
    1077            0 :                     .await?)
    1078            0 :             })
    1079            0 :         })
    1080            0 :         .await
    1081            0 :     }
    1082              : 
    1083              :     /// Lists all the metadata health records that is unhealthy.
    1084              :     #[allow(dead_code)]
    1085            0 :     pub(crate) async fn list_unhealthy_metadata_health_records(
    1086            0 :         &self,
    1087            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
    1088              :         use crate::schema::metadata_health::dsl::*;
    1089            0 :         self.with_measured_conn(
    1090            0 :             DatabaseOperation::ListMetadataHealthUnhealthy,
    1091            0 :             move |conn| {
    1092            0 :                 Box::pin(async {
    1093            0 :                     DatabaseResult::Ok(
    1094            0 :                         crate::schema::metadata_health::table
    1095            0 :                             .filter(healthy.eq(false))
    1096            0 :                             .load::<MetadataHealthPersistence>(conn)
    1097            0 :                             .await?,
    1098              :                     )
    1099            0 :                 })
    1100            0 :             },
    1101            0 :         )
    1102            0 :         .await
    1103            0 :     }
    1104              : 
    1105              :     /// Lists all the metadata health records that have not been updated since an `earlier` time.
    1106              :     #[allow(dead_code)]
    1107            0 :     pub(crate) async fn list_outdated_metadata_health_records(
    1108            0 :         &self,
    1109            0 :         earlier: chrono::DateTime<chrono::Utc>,
    1110            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
    1111              :         use crate::schema::metadata_health::dsl::*;
    1112              : 
    1113            0 :         self.with_measured_conn(DatabaseOperation::ListMetadataHealthOutdated, move |conn| {
    1114            0 :             Box::pin(async move {
    1115            0 :                 let query = metadata_health.filter(last_scrubbed_at.lt(earlier));
    1116            0 :                 let res = query.load::<MetadataHealthPersistence>(conn).await?;
    1117              : 
    1118            0 :                 Ok(res)
    1119            0 :             })
    1120            0 :         })
    1121            0 :         .await
    1122            0 :     }
    1123              : 
    1124              :     /// Get the current entry from the `leader` table if one exists.
    1125              :     /// It is an error for the table to contain more than one entry.
    1126            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
    1127            0 :         let mut leader: Vec<ControllerPersistence> = self
    1128            0 :             .with_measured_conn(DatabaseOperation::GetLeader, move |conn| {
    1129            0 :                 Box::pin(async move {
    1130            0 :                     Ok(crate::schema::controllers::table
    1131            0 :                         .load::<ControllerPersistence>(conn)
    1132            0 :                         .await?)
    1133            0 :                 })
    1134            0 :             })
    1135            0 :             .await?;
    1136              : 
    1137            0 :         if leader.len() > 1 {
    1138            0 :             return Err(DatabaseError::Logical(format!(
    1139            0 :                 "More than one entry present in the leader table: {leader:?}"
    1140            0 :             )));
    1141            0 :         }
    1142            0 : 
    1143            0 :         Ok(leader.pop())
    1144            0 :     }
    1145              : 
    1146              :     /// Update the new leader with compare-exchange semantics. If `prev` does not
    1147              :     /// match the current leader entry, then the update is treated as a failure.
    1148              :     /// When `prev` is not specified, the update is forced.
    1149            0 :     pub(crate) async fn update_leader(
    1150            0 :         &self,
    1151            0 :         prev: Option<ControllerPersistence>,
    1152            0 :         new: ControllerPersistence,
    1153            0 :     ) -> DatabaseResult<()> {
    1154              :         use crate::schema::controllers::dsl::*;
    1155              : 
    1156            0 :         let updated = self
    1157            0 :             .with_measured_conn(DatabaseOperation::UpdateLeader, move |conn| {
    1158            0 :                 let prev = prev.clone();
    1159            0 :                 let new = new.clone();
    1160            0 :                 Box::pin(async move {
    1161            0 :                     let updated = match &prev {
    1162            0 :                         Some(prev) => {
    1163            0 :                             diesel::update(controllers)
    1164            0 :                                 .filter(address.eq(prev.address.clone()))
    1165            0 :                                 .filter(started_at.eq(prev.started_at))
    1166            0 :                                 .set((
    1167            0 :                                     address.eq(new.address.clone()),
    1168            0 :                                     started_at.eq(new.started_at),
    1169            0 :                                 ))
    1170            0 :                                 .execute(conn)
    1171            0 :                                 .await?
    1172              :                         }
    1173              :                         None => {
    1174            0 :                             diesel::insert_into(controllers)
    1175            0 :                                 .values(new.clone())
    1176            0 :                                 .execute(conn)
    1177            0 :                                 .await?
    1178              :                         }
    1179              :                     };
    1180              : 
    1181            0 :                     Ok(updated)
    1182            0 :                 })
    1183            0 :             })
    1184            0 :             .await?;
    1185              : 
    1186            0 :         if updated == 0 {
    1187            0 :             return Err(DatabaseError::Logical(
    1188            0 :                 "Leader table update failed".to_string(),
    1189            0 :             ));
    1190            0 :         }
    1191            0 : 
    1192            0 :         Ok(())
    1193            0 :     }
    1194              : 
    1195              :     /// At startup, populate the list of nodes which our shards may be placed on
    1196            0 :     pub(crate) async fn list_safekeepers(&self) -> DatabaseResult<Vec<SafekeeperPersistence>> {
    1197            0 :         let safekeepers: Vec<SafekeeperPersistence> = self
    1198            0 :             .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
    1199            0 :                 Box::pin(async move {
    1200            0 :                     Ok(crate::schema::safekeepers::table
    1201            0 :                         .load::<SafekeeperPersistence>(conn)
    1202            0 :                         .await?)
    1203            0 :                 })
    1204            0 :             })
    1205            0 :             .await?;
    1206              : 
    1207            0 :         tracing::info!("list_safekeepers: loaded {} nodes", safekeepers.len());
    1208              : 
    1209            0 :         Ok(safekeepers)
    1210            0 :     }
    1211              : 
    1212            0 :     pub(crate) async fn safekeeper_upsert(
    1213            0 :         &self,
    1214            0 :         record: SafekeeperUpsert,
    1215            0 :     ) -> Result<(), DatabaseError> {
    1216              :         use crate::schema::safekeepers::dsl::*;
    1217              : 
    1218            0 :         self.with_conn(move |conn| {
    1219            0 :             let record = record.clone();
    1220            0 :             Box::pin(async move {
    1221            0 :                 let bind = record
    1222            0 :                     .as_insert_or_update()
    1223            0 :                     .map_err(|e| DatabaseError::Logical(format!("{e}")))?;
    1224              : 
    1225            0 :                 let inserted_updated = diesel::insert_into(safekeepers)
    1226            0 :                     .values(&bind)
    1227            0 :                     .on_conflict(id)
    1228            0 :                     .do_update()
    1229            0 :                     .set(&bind)
    1230            0 :                     .execute(conn)
    1231            0 :                     .await?;
    1232              : 
    1233            0 :                 if inserted_updated != 1 {
    1234            0 :                     return Err(DatabaseError::Logical(format!(
    1235            0 :                         "unexpected number of rows ({})",
    1236            0 :                         inserted_updated
    1237            0 :                     )));
    1238            0 :                 }
    1239            0 : 
    1240            0 :                 Ok(())
    1241            0 :             })
    1242            0 :         })
    1243            0 :         .await
    1244            0 :     }
    1245              : 
    1246            0 :     pub(crate) async fn set_safekeeper_scheduling_policy(
    1247            0 :         &self,
    1248            0 :         id_: i64,
    1249            0 :         scheduling_policy_: SkSchedulingPolicy,
    1250            0 :     ) -> Result<(), DatabaseError> {
    1251              :         use crate::schema::safekeepers::dsl::*;
    1252              : 
    1253            0 :         self.with_conn(move |conn| {
    1254            0 :             Box::pin(async move {
    1255            0 :                 #[derive(Insertable, AsChangeset)]
    1256              :                 #[diesel(table_name = crate::schema::safekeepers)]
    1257              :                 struct UpdateSkSchedulingPolicy<'a> {
    1258              :                     id: i64,
    1259              :                     scheduling_policy: &'a str,
    1260              :                 }
    1261            0 :                 let scheduling_policy_ = String::from(scheduling_policy_);
    1262              : 
    1263            0 :                 let rows_affected = diesel::update(safekeepers.filter(id.eq(id_)))
    1264            0 :                     .set(scheduling_policy.eq(scheduling_policy_))
    1265            0 :                     .execute(conn)
    1266            0 :                     .await?;
    1267              : 
    1268            0 :                 if rows_affected != 1 {
    1269            0 :                     return Err(DatabaseError::Logical(format!(
    1270            0 :                         "unexpected number of rows ({rows_affected})",
    1271            0 :                     )));
    1272            0 :                 }
    1273            0 : 
    1274            0 :                 Ok(())
    1275            0 :             })
    1276            0 :         })
    1277            0 :         .await
    1278            0 :     }
    1279              : }
    1280              : 
    1281            0 : pub(crate) fn load_certs() -> anyhow::Result<Arc<rustls::RootCertStore>> {
    1282            0 :     let der_certs = rustls_native_certs::load_native_certs();
    1283            0 : 
    1284            0 :     if !der_certs.errors.is_empty() {
    1285            0 :         anyhow::bail!("could not parse certificates: {:?}", der_certs.errors);
    1286            0 :     }
    1287            0 : 
    1288            0 :     let mut store = rustls::RootCertStore::empty();
    1289            0 :     store.add_parsable_certificates(der_certs.certs);
    1290            0 :     Ok(Arc::new(store))
    1291            0 : }
    1292              : 
    1293              : #[derive(Debug)]
    1294              : /// A verifier that accepts all certificates (but logs an error still)
    1295              : struct AcceptAll(Arc<WebPkiServerVerifier>);
    1296              : impl ServerCertVerifier for AcceptAll {
    1297            0 :     fn verify_server_cert(
    1298            0 :         &self,
    1299            0 :         end_entity: &rustls::pki_types::CertificateDer<'_>,
    1300            0 :         intermediates: &[rustls::pki_types::CertificateDer<'_>],
    1301            0 :         server_name: &rustls::pki_types::ServerName<'_>,
    1302            0 :         ocsp_response: &[u8],
    1303            0 :         now: rustls::pki_types::UnixTime,
    1304            0 :     ) -> Result<ServerCertVerified, rustls::Error> {
    1305            0 :         let r =
    1306            0 :             self.0
    1307            0 :                 .verify_server_cert(end_entity, intermediates, server_name, ocsp_response, now);
    1308            0 :         if let Err(err) = r {
    1309            0 :             tracing::info!(
    1310              :                 ?server_name,
    1311            0 :                 "ignoring db connection TLS validation error: {err:?}"
    1312              :             );
    1313            0 :             return Ok(ServerCertVerified::assertion());
    1314            0 :         }
    1315            0 :         r
    1316            0 :     }
    1317            0 :     fn verify_tls12_signature(
    1318            0 :         &self,
    1319            0 :         message: &[u8],
    1320            0 :         cert: &rustls::pki_types::CertificateDer<'_>,
    1321            0 :         dss: &rustls::DigitallySignedStruct,
    1322            0 :     ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
    1323            0 :         self.0.verify_tls12_signature(message, cert, dss)
    1324            0 :     }
    1325            0 :     fn verify_tls13_signature(
    1326            0 :         &self,
    1327            0 :         message: &[u8],
    1328            0 :         cert: &rustls::pki_types::CertificateDer<'_>,
    1329            0 :         dss: &rustls::DigitallySignedStruct,
    1330            0 :     ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
    1331            0 :         self.0.verify_tls13_signature(message, cert, dss)
    1332            0 :     }
    1333            0 :     fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
    1334            0 :         self.0.supported_verify_schemes()
    1335            0 :     }
    1336              : }
    1337              : 
    1338              : /// Loads the root certificates and constructs a client config suitable for connecting.
    1339              : /// This function is blocking.
    1340            0 : fn client_config_with_root_certs() -> anyhow::Result<rustls::ClientConfig> {
    1341            0 :     let client_config =
    1342            0 :         rustls::ClientConfig::builder_with_provider(Arc::new(ring::default_provider()))
    1343            0 :             .with_safe_default_protocol_versions()
    1344            0 :             .expect("ring should support the default protocol versions");
    1345              :     static DO_CERT_CHECKS: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
    1346            0 :     let do_cert_checks =
    1347            0 :         DO_CERT_CHECKS.get_or_init(|| std::env::var("STORCON_DB_CERT_CHECKS").is_ok());
    1348            0 :     Ok(if *do_cert_checks {
    1349            0 :         client_config
    1350            0 :             .with_root_certificates(load_certs()?)
    1351            0 :             .with_no_client_auth()
    1352              :     } else {
    1353            0 :         let verifier = AcceptAll(
    1354              :             WebPkiServerVerifier::builder_with_provider(
    1355            0 :                 load_certs()?,
    1356            0 :                 Arc::new(ring::default_provider()),
    1357            0 :             )
    1358            0 :             .build()?,
    1359              :         );
    1360            0 :         client_config
    1361            0 :             .dangerous()
    1362            0 :             .with_custom_certificate_verifier(Arc::new(verifier))
    1363            0 :             .with_no_client_auth()
    1364              :     })
    1365            0 : }
    1366              : 
    1367            0 : fn establish_connection_rustls(config: &str) -> BoxFuture<ConnectionResult<AsyncPgConnection>> {
    1368            0 :     let fut = async {
    1369              :         // We first set up the way we want rustls to work.
    1370            0 :         let rustls_config = client_config_with_root_certs()
    1371            0 :             .map_err(|err| ConnectionError::BadConnection(format!("{err:?}")))?;
    1372            0 :         let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config);
    1373            0 :         let (client, conn) = tokio_postgres::connect(config, tls)
    1374            0 :             .await
    1375            0 :             .map_err(|e| ConnectionError::BadConnection(e.to_string()))?;
    1376              : 
    1377            0 :         AsyncPgConnection::try_from_client_and_connection(client, conn).await
    1378            0 :     };
    1379            0 :     fut.boxed()
    1380            0 : }
    1381              : 
    1382              : #[cfg_attr(test, test)]
    1383            1 : fn test_config_debug_censors_password() {
    1384            1 :     let has_pw =
    1385            1 :         "host=/var/lib/postgresql,localhost port=1234 user=specialuser password='NOT ALLOWED TAG'";
    1386            1 :     let has_pw_cfg = has_pw.parse::<tokio_postgres::Config>().unwrap();
    1387            1 :     assert!(format!("{has_pw_cfg:?}").contains("specialuser"));
    1388              :     // Ensure that the password is not leaked by the debug impl
    1389            1 :     assert!(!format!("{has_pw_cfg:?}").contains("NOT ALLOWED TAG"));
    1390            1 : }
    1391              : 
    1392            0 : fn log_postgres_connstr_info(config_str: &str) -> anyhow::Result<()> {
    1393            0 :     let config = config_str
    1394            0 :         .parse::<tokio_postgres::Config>()
    1395            0 :         .map_err(|_e| anyhow::anyhow!("Couldn't parse config str"))?;
    1396              :     // We use debug formatting here, and use a unit test to ensure that we don't leak the password.
    1397              :     // To make extra sure the test gets ran, run it every time the function is called
    1398              :     // (this is rather cold code, we can afford it).
    1399              :     #[cfg(not(test))]
    1400            0 :     test_config_debug_censors_password();
    1401            0 :     tracing::info!("database connection config: {config:?}");
    1402            0 :     Ok(())
    1403            0 : }
    1404              : 
    1405              : /// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably
    1406              : #[derive(
    1407            0 :     QueryableByName, Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq,
    1408              : )]
    1409              : #[diesel(table_name = crate::schema::tenant_shards)]
    1410              : pub(crate) struct TenantShardPersistence {
    1411              :     #[serde(default)]
    1412              :     pub(crate) tenant_id: String,
    1413              :     #[serde(default)]
    1414              :     pub(crate) shard_number: i32,
    1415              :     #[serde(default)]
    1416              :     pub(crate) shard_count: i32,
    1417              :     #[serde(default)]
    1418              :     pub(crate) shard_stripe_size: i32,
    1419              : 
    1420              :     // Latest generation number: next time we attach, increment this
    1421              :     // and use the incremented number when attaching.
    1422              :     //
    1423              :     // Generation is only None when first onboarding a tenant, where it may
    1424              :     // be in PlacementPolicy::Secondary and therefore have no valid generation state.
    1425              :     pub(crate) generation: Option<i32>,
    1426              : 
    1427              :     // Currently attached pageserver
    1428              :     #[serde(rename = "pageserver")]
    1429              :     pub(crate) generation_pageserver: Option<i64>,
    1430              : 
    1431              :     #[serde(default)]
    1432              :     pub(crate) placement_policy: String,
    1433              :     #[serde(default)]
    1434              :     pub(crate) splitting: SplitState,
    1435              :     #[serde(default)]
    1436              :     pub(crate) config: String,
    1437              :     #[serde(default)]
    1438              :     pub(crate) scheduling_policy: String,
    1439              : 
    1440              :     // Hint that we should attempt to schedule this tenant shard the given
    1441              :     // availability zone in order to minimise the chances of cross-AZ communication
    1442              :     // with compute.
    1443              :     pub(crate) preferred_az_id: Option<String>,
    1444              : }
    1445              : 
    1446              : impl TenantShardPersistence {
    1447            0 :     pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
    1448            0 :         if self.shard_count == 0 {
    1449            0 :             Ok(ShardIdentity::unsharded())
    1450              :         } else {
    1451            0 :             Ok(ShardIdentity::new(
    1452            0 :                 ShardNumber(self.shard_number as u8),
    1453            0 :                 ShardCount::new(self.shard_count as u8),
    1454            0 :                 ShardStripeSize(self.shard_stripe_size as u32),
    1455            0 :             )?)
    1456              :         }
    1457            0 :     }
    1458              : 
    1459            0 :     pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
    1460            0 :         Ok(TenantShardId {
    1461            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    1462            0 :             shard_number: ShardNumber(self.shard_number as u8),
    1463            0 :             shard_count: ShardCount::new(self.shard_count as u8),
    1464              :         })
    1465            0 :     }
    1466              : }
    1467              : 
    1468              : /// Parts of [`crate::node::Node`] that are stored durably
    1469            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)]
    1470              : #[diesel(table_name = crate::schema::nodes)]
    1471              : pub(crate) struct NodePersistence {
    1472              :     pub(crate) node_id: i64,
    1473              :     pub(crate) scheduling_policy: String,
    1474              :     pub(crate) listen_http_addr: String,
    1475              :     pub(crate) listen_http_port: i32,
    1476              :     pub(crate) listen_pg_addr: String,
    1477              :     pub(crate) listen_pg_port: i32,
    1478              :     pub(crate) availability_zone_id: String,
    1479              :     pub(crate) listen_https_port: Option<i32>,
    1480              : }
    1481              : 
    1482              : /// Tenant metadata health status that are stored durably.
    1483            0 : #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
    1484              : #[diesel(table_name = crate::schema::metadata_health)]
    1485              : pub(crate) struct MetadataHealthPersistence {
    1486              :     #[serde(default)]
    1487              :     pub(crate) tenant_id: String,
    1488              :     #[serde(default)]
    1489              :     pub(crate) shard_number: i32,
    1490              :     #[serde(default)]
    1491              :     pub(crate) shard_count: i32,
    1492              : 
    1493              :     pub(crate) healthy: bool,
    1494              :     pub(crate) last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    1495              : }
    1496              : 
    1497              : impl MetadataHealthPersistence {
    1498            0 :     pub fn new(
    1499            0 :         tenant_shard_id: TenantShardId,
    1500            0 :         healthy: bool,
    1501            0 :         last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    1502            0 :     ) -> Self {
    1503            0 :         let tenant_id = tenant_shard_id.tenant_id.to_string();
    1504            0 :         let shard_number = tenant_shard_id.shard_number.0 as i32;
    1505            0 :         let shard_count = tenant_shard_id.shard_count.literal() as i32;
    1506            0 : 
    1507            0 :         MetadataHealthPersistence {
    1508            0 :             tenant_id,
    1509            0 :             shard_number,
    1510            0 :             shard_count,
    1511            0 :             healthy,
    1512            0 :             last_scrubbed_at,
    1513            0 :         }
    1514            0 :     }
    1515              : 
    1516              :     #[allow(dead_code)]
    1517            0 :     pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
    1518            0 :         Ok(TenantShardId {
    1519            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    1520            0 :             shard_number: ShardNumber(self.shard_number as u8),
    1521            0 :             shard_count: ShardCount::new(self.shard_count as u8),
    1522              :         })
    1523            0 :     }
    1524              : }
    1525              : 
    1526              : impl From<MetadataHealthPersistence> for MetadataHealthRecord {
    1527            0 :     fn from(value: MetadataHealthPersistence) -> Self {
    1528            0 :         MetadataHealthRecord {
    1529            0 :             tenant_shard_id: value
    1530            0 :                 .get_tenant_shard_id()
    1531            0 :                 .expect("stored tenant id should be valid"),
    1532            0 :             healthy: value.healthy,
    1533            0 :             last_scrubbed_at: value.last_scrubbed_at,
    1534            0 :         }
    1535            0 :     }
    1536              : }
    1537              : 
    1538              : #[derive(
    1539            0 :     Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Debug, Clone,
    1540              : )]
    1541              : #[diesel(table_name = crate::schema::controllers)]
    1542              : pub(crate) struct ControllerPersistence {
    1543              :     pub(crate) address: String,
    1544              :     pub(crate) started_at: chrono::DateTime<chrono::Utc>,
    1545              : }
    1546              : 
    1547              : // What we store in the database
    1548            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Eq, PartialEq, Debug, Clone)]
    1549              : #[diesel(table_name = crate::schema::safekeepers)]
    1550              : pub(crate) struct SafekeeperPersistence {
    1551              :     pub(crate) id: i64,
    1552              :     pub(crate) region_id: String,
    1553              :     /// 1 is special, it means just created (not currently posted to storcon).
    1554              :     /// Zero or negative is not really expected.
    1555              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    1556              :     pub(crate) version: i64,
    1557              :     pub(crate) host: String,
    1558              :     pub(crate) port: i32,
    1559              :     pub(crate) http_port: i32,
    1560              :     pub(crate) availability_zone_id: String,
    1561              :     pub(crate) scheduling_policy: SkSchedulingPolicyFromSql,
    1562              : }
    1563              : 
    1564              : /// Wrapper struct around [`SkSchedulingPolicy`] because both it and [`FromSql`] are from foreign crates,
    1565              : /// and we don't want to make [`safekeeper_api`] depend on [`diesel`].
    1566            0 : #[derive(Serialize, Deserialize, FromSqlRow, Eq, PartialEq, Debug, Copy, Clone)]
    1567              : pub(crate) struct SkSchedulingPolicyFromSql(pub(crate) SkSchedulingPolicy);
    1568              : 
    1569              : impl From<SkSchedulingPolicy> for SkSchedulingPolicyFromSql {
    1570            0 :     fn from(value: SkSchedulingPolicy) -> Self {
    1571            0 :         SkSchedulingPolicyFromSql(value)
    1572            0 :     }
    1573              : }
    1574              : 
    1575              : impl FromSql<diesel::sql_types::VarChar, Pg> for SkSchedulingPolicyFromSql {
    1576            0 :     fn from_sql(
    1577            0 :         bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
    1578            0 :     ) -> diesel::deserialize::Result<Self> {
    1579            0 :         let bytes = bytes.as_bytes();
    1580            0 :         match core::str::from_utf8(bytes) {
    1581            0 :             Ok(s) => match SkSchedulingPolicy::from_str(s) {
    1582            0 :                 Ok(policy) => Ok(SkSchedulingPolicyFromSql(policy)),
    1583            0 :                 Err(e) => Err(format!("can't parse: {e}").into()),
    1584              :             },
    1585            0 :             Err(e) => Err(format!("invalid UTF-8 for scheduling policy: {e}").into()),
    1586              :         }
    1587            0 :     }
    1588              : }
    1589              : 
    1590              : impl SafekeeperPersistence {
    1591            0 :     pub(crate) fn from_upsert(
    1592            0 :         upsert: SafekeeperUpsert,
    1593            0 :         scheduling_policy: SkSchedulingPolicy,
    1594            0 :     ) -> Self {
    1595            0 :         crate::persistence::SafekeeperPersistence {
    1596            0 :             id: upsert.id,
    1597            0 :             region_id: upsert.region_id,
    1598            0 :             version: upsert.version,
    1599            0 :             host: upsert.host,
    1600            0 :             port: upsert.port,
    1601            0 :             http_port: upsert.http_port,
    1602            0 :             availability_zone_id: upsert.availability_zone_id,
    1603            0 :             scheduling_policy: SkSchedulingPolicyFromSql(scheduling_policy),
    1604            0 :         }
    1605            0 :     }
    1606            0 :     pub(crate) fn as_describe_response(&self) -> Result<SafekeeperDescribeResponse, DatabaseError> {
    1607            0 :         Ok(SafekeeperDescribeResponse {
    1608            0 :             id: NodeId(self.id as u64),
    1609            0 :             region_id: self.region_id.clone(),
    1610            0 :             version: self.version,
    1611            0 :             host: self.host.clone(),
    1612            0 :             port: self.port,
    1613            0 :             http_port: self.http_port,
    1614            0 :             availability_zone_id: self.availability_zone_id.clone(),
    1615            0 :             scheduling_policy: self.scheduling_policy.0,
    1616            0 :         })
    1617            0 :     }
    1618              : }
    1619              : 
    1620              : /// What we expect from the upsert http api
    1621            0 : #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)]
    1622              : pub(crate) struct SafekeeperUpsert {
    1623              :     pub(crate) id: i64,
    1624              :     pub(crate) region_id: String,
    1625              :     /// 1 is special, it means just created (not currently posted to storcon).
    1626              :     /// Zero or negative is not really expected.
    1627              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    1628              :     pub(crate) version: i64,
    1629              :     pub(crate) host: String,
    1630              :     pub(crate) port: i32,
    1631              :     /// The active flag will not be stored in the database and will be ignored.
    1632              :     pub(crate) active: Option<bool>,
    1633              :     pub(crate) http_port: i32,
    1634              :     pub(crate) availability_zone_id: String,
    1635              : }
    1636              : 
    1637              : impl SafekeeperUpsert {
    1638            0 :     fn as_insert_or_update(&self) -> anyhow::Result<InsertUpdateSafekeeper<'_>> {
    1639            0 :         if self.version < 0 {
    1640            0 :             anyhow::bail!("negative version: {}", self.version);
    1641            0 :         }
    1642            0 :         Ok(InsertUpdateSafekeeper {
    1643            0 :             id: self.id,
    1644            0 :             region_id: &self.region_id,
    1645            0 :             version: self.version,
    1646            0 :             host: &self.host,
    1647            0 :             port: self.port,
    1648            0 :             http_port: self.http_port,
    1649            0 :             availability_zone_id: &self.availability_zone_id,
    1650            0 :             // None means a wish to not update this column. We expose abilities to update it via other means.
    1651            0 :             scheduling_policy: None,
    1652            0 :         })
    1653            0 :     }
    1654              : }
    1655              : 
    1656            0 : #[derive(Insertable, AsChangeset)]
    1657              : #[diesel(table_name = crate::schema::safekeepers)]
    1658              : struct InsertUpdateSafekeeper<'a> {
    1659              :     id: i64,
    1660              :     region_id: &'a str,
    1661              :     version: i64,
    1662              :     host: &'a str,
    1663              :     port: i32,
    1664              :     http_port: i32,
    1665              :     availability_zone_id: &'a str,
    1666              :     scheduling_policy: Option<&'a str>,
    1667              : }
        

Generated by: LCOV version 2.1-beta