LCOV - code coverage report
Current view: top level - storage_controller/src - persistence.rs (source / functions) Coverage Total Hit
Test: 5445d246133daeceb0507e6cc0797ab7c1c70cb8.info Lines: 0.6 % 1246 7
Test Date: 2025-03-12 18:05:02 Functions: 0.2 % 488 1

            Line data    Source code
       1              : pub(crate) mod split_state;
       2              : use std::collections::HashMap;
       3              : use std::io::Write;
       4              : use std::str::FromStr;
       5              : use std::sync::Arc;
       6              : use std::time::{Duration, Instant};
       7              : 
       8              : use diesel::deserialize::{FromSql, FromSqlRow};
       9              : use diesel::expression::AsExpression;
      10              : use diesel::pg::Pg;
      11              : use diesel::prelude::*;
      12              : use diesel::serialize::{IsNull, ToSql};
      13              : use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
      14              : use diesel_async::pooled_connection::bb8::Pool;
      15              : use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig};
      16              : use diesel_async::{AsyncPgConnection, RunQueryDsl};
      17              : use diesel_migrations::{EmbeddedMigrations, embed_migrations};
      18              : use futures::FutureExt;
      19              : use futures::future::BoxFuture;
      20              : use itertools::Itertools;
      21              : use pageserver_api::controller_api::{
      22              :     AvailabilityZone, MetadataHealthRecord, NodeSchedulingPolicy, PlacementPolicy,
      23              :     SafekeeperDescribeResponse, ShardSchedulingPolicy, SkSchedulingPolicy,
      24              : };
      25              : use pageserver_api::models::TenantConfig;
      26              : use pageserver_api::shard::{
      27              :     ShardConfigError, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
      28              : };
      29              : use rustls::client::WebPkiServerVerifier;
      30              : use rustls::client::danger::{ServerCertVerified, ServerCertVerifier};
      31              : use rustls::crypto::ring;
      32              : use scoped_futures::ScopedBoxFuture;
      33              : use serde::{Deserialize, Serialize};
      34              : use utils::generation::Generation;
      35              : use utils::id::{NodeId, TenantId, TimelineId};
      36              : use utils::lsn::Lsn;
      37              : 
      38              : use self::split_state::SplitState;
      39              : use crate::metrics::{
      40              :     DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
      41              : };
      42              : use crate::node::Node;
      43              : const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
      44              : 
      45              : /// ## What do we store?
      46              : ///
      47              : /// The storage controller service does not store most of its state durably.
      48              : ///
      49              : /// The essential things to store durably are:
      50              : /// - generation numbers, as these must always advance monotonically to ensure data safety.
      51              : /// - Tenant's PlacementPolicy and TenantConfig, as the source of truth for these is something external.
      52              : /// - Node's scheduling policies, as the source of truth for these is something external.
      53              : ///
      54              : /// Other things we store durably as an implementation detail:
      55              : /// - Node's host/port: this could be avoided it we made nodes emit a self-registering heartbeat,
      56              : ///   but it is operationally simpler to make this service the authority for which nodes
      57              : ///   it talks to.
      58              : ///
      59              : /// ## Performance/efficiency
      60              : ///
      61              : /// The storage controller service does not go via the database for most things: there are
      62              : /// a couple of places where we must, and where efficiency matters:
      63              : /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
      64              : ///   before it can attach a tenant, so this acts as a bound on how fast things like
      65              : ///   failover can happen.
      66              : /// - Pageserver re-attach: we will increment many shards' generations when this happens,
      67              : ///   so it is important to avoid e.g. issuing O(N) queries.
      68              : ///
      69              : /// Database calls relating to nodes have low performance requirements, as they are very rarely
      70              : /// updated, and reads of nodes are always from memory, not the database.  We only require that
      71              : /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
      72              : pub struct Persistence {
      73              :     connection_pool: Pool<AsyncPgConnection>,
      74              : }
      75              : 
      76              : /// Legacy format, for use in JSON compat objects in test environment
      77            0 : #[derive(Serialize, Deserialize)]
      78              : struct JsonPersistence {
      79              :     tenants: HashMap<TenantShardId, TenantShardPersistence>,
      80              : }
      81              : 
      82              : #[derive(thiserror::Error, Debug)]
      83              : pub(crate) enum DatabaseError {
      84              :     #[error(transparent)]
      85              :     Query(#[from] diesel::result::Error),
      86              :     #[error(transparent)]
      87              :     Connection(#[from] diesel::result::ConnectionError),
      88              :     #[error(transparent)]
      89              :     ConnectionPool(#[from] diesel_async::pooled_connection::bb8::RunError),
      90              :     #[error("Logical error: {0}")]
      91              :     Logical(String),
      92              :     #[error("Migration error: {0}")]
      93              :     Migration(String),
      94              : }
      95              : 
      96              : #[derive(measured::FixedCardinalityLabel, Copy, Clone)]
      97              : pub(crate) enum DatabaseOperation {
      98              :     InsertNode,
      99              :     UpdateNode,
     100              :     DeleteNode,
     101              :     ListNodes,
     102              :     BeginShardSplit,
     103              :     CompleteShardSplit,
     104              :     AbortShardSplit,
     105              :     Detach,
     106              :     ReAttach,
     107              :     IncrementGeneration,
     108              :     TenantGenerations,
     109              :     ShardGenerations,
     110              :     ListTenantShards,
     111              :     LoadTenant,
     112              :     InsertTenantShards,
     113              :     UpdateTenantShard,
     114              :     DeleteTenant,
     115              :     UpdateTenantConfig,
     116              :     UpdateMetadataHealth,
     117              :     ListMetadataHealth,
     118              :     ListMetadataHealthUnhealthy,
     119              :     ListMetadataHealthOutdated,
     120              :     ListSafekeepers,
     121              :     GetLeader,
     122              :     UpdateLeader,
     123              :     SetPreferredAzs,
     124              :     InsertTimeline,
     125              :     GetTimeline,
     126              :     InsertTimelineReconcile,
     127              :     RemoveTimelineReconcile,
     128              :     ListTimelineReconcile,
     129              : }
     130              : 
     131              : #[must_use]
     132              : pub(crate) enum AbortShardSplitStatus {
     133              :     /// We aborted the split in the database by reverting to the parent shards
     134              :     Aborted,
     135              :     /// The split had already been persisted.
     136              :     Complete,
     137              : }
     138              : 
     139              : pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
     140              : 
     141              : /// Some methods can operate on either a whole tenant or a single shard
     142              : #[derive(Clone)]
     143              : pub(crate) enum TenantFilter {
     144              :     Tenant(TenantId),
     145              :     Shard(TenantShardId),
     146              : }
     147              : 
     148              : /// Represents the results of looking up generation+pageserver for the shards of a tenant
     149              : pub(crate) struct ShardGenerationState {
     150              :     pub(crate) tenant_shard_id: TenantShardId,
     151              :     pub(crate) generation: Option<Generation>,
     152              :     pub(crate) generation_pageserver: Option<NodeId>,
     153              : }
     154              : 
     155              : // A generous allowance for how many times we may retry serializable transactions
     156              : // before giving up.  This is not expected to be hit: it is a defensive measure in case we
     157              : // somehow engineer a situation where duelling transactions might otherwise live-lock.
     158              : const MAX_RETRIES: usize = 128;
     159              : 
     160              : impl Persistence {
     161              :     // The default postgres connection limit is 100.  We use up to 99, to leave one free for a human admin under
     162              :     // normal circumstances.  This assumes we have exclusive use of the database cluster to which we connect.
     163              :     pub const MAX_CONNECTIONS: u32 = 99;
     164              : 
     165              :     // We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
     166              :     const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
     167              :     const MAX_CONNECTION_LIFETIME: Duration = Duration::from_secs(60);
     168              : 
     169            0 :     pub async fn new(database_url: String) -> Self {
     170            0 :         let mut mgr_config = ManagerConfig::default();
     171            0 :         mgr_config.custom_setup = Box::new(establish_connection_rustls);
     172            0 : 
     173            0 :         let manager = AsyncDieselConnectionManager::<AsyncPgConnection>::new_with_config(
     174            0 :             database_url,
     175            0 :             mgr_config,
     176            0 :         );
     177              : 
     178              :         // We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
     179              :         // to execute queries (database queries are not generally on latency-sensitive paths).
     180            0 :         let connection_pool = Pool::builder()
     181            0 :             .max_size(Self::MAX_CONNECTIONS)
     182            0 :             .max_lifetime(Some(Self::MAX_CONNECTION_LIFETIME))
     183            0 :             .idle_timeout(Some(Self::IDLE_CONNECTION_TIMEOUT))
     184            0 :             // Always keep at least one connection ready to go
     185            0 :             .min_idle(Some(1))
     186            0 :             .test_on_check_out(true)
     187            0 :             .build(manager)
     188            0 :             .await
     189            0 :             .expect("Could not build connection pool");
     190            0 : 
     191            0 :         Self { connection_pool }
     192            0 :     }
     193              : 
     194              :     /// A helper for use during startup, where we would like to tolerate concurrent restarts of the
     195              :     /// database and the storage controller, therefore the database might not be available right away
     196            0 :     pub async fn await_connection(
     197            0 :         database_url: &str,
     198            0 :         timeout: Duration,
     199            0 :     ) -> Result<(), diesel::ConnectionError> {
     200            0 :         let started_at = Instant::now();
     201            0 :         log_postgres_connstr_info(database_url)
     202            0 :             .map_err(|e| diesel::ConnectionError::InvalidConnectionUrl(e.to_string()))?;
     203              :         loop {
     204            0 :             match establish_connection_rustls(database_url).await {
     205              :                 Ok(_) => {
     206            0 :                     tracing::info!("Connected to database.");
     207            0 :                     return Ok(());
     208              :                 }
     209            0 :                 Err(e) => {
     210            0 :                     if started_at.elapsed() > timeout {
     211            0 :                         return Err(e);
     212              :                     } else {
     213            0 :                         tracing::info!("Database not yet available, waiting... ({e})");
     214            0 :                         tokio::time::sleep(Duration::from_millis(100)).await;
     215              :                     }
     216              :                 }
     217              :             }
     218              :         }
     219            0 :     }
     220              : 
     221              :     /// Execute the diesel migrations that are built into this binary
     222            0 :     pub(crate) async fn migration_run(&self) -> DatabaseResult<()> {
     223              :         use diesel_migrations::{HarnessWithOutput, MigrationHarness};
     224              : 
     225              :         // Can't use self.with_conn here as we do spawn_blocking which requires static.
     226            0 :         let conn = self
     227            0 :             .connection_pool
     228            0 :             .dedicated_connection()
     229            0 :             .await
     230            0 :             .map_err(|e| DatabaseError::Migration(e.to_string()))?;
     231            0 :         let mut async_wrapper: AsyncConnectionWrapper<AsyncPgConnection> =
     232            0 :             AsyncConnectionWrapper::from(conn);
     233            0 :         tokio::task::spawn_blocking(move || {
     234            0 :             let mut retry_count = 0;
     235            0 :             loop {
     236            0 :                 let result = HarnessWithOutput::write_to_stdout(&mut async_wrapper)
     237            0 :                     .run_pending_migrations(MIGRATIONS)
     238            0 :                     .map(|_| ())
     239            0 :                     .map_err(|e| DatabaseError::Migration(e.to_string()));
     240            0 :                 match result {
     241            0 :                     Ok(r) => break Ok(r),
     242              :                     Err(
     243            0 :                         err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
     244            0 :                             diesel::result::DatabaseErrorKind::SerializationFailure,
     245            0 :                             _,
     246            0 :                         )),
     247            0 :                     ) => {
     248            0 :                         retry_count += 1;
     249            0 :                         if retry_count > MAX_RETRIES {
     250            0 :                             tracing::error!(
     251            0 :                                 "Exceeded max retries on SerializationFailure errors: {err:?}"
     252              :                             );
     253            0 :                             break Err(err);
     254              :                         } else {
     255              :                             // Retry on serialization errors: these are expected, because even though our
     256              :                             // transactions don't fight for the same rows, they will occasionally collide
     257              :                             // on index pages (e.g. increment_generation for unrelated shards can collide)
     258            0 :                             tracing::debug!(
     259            0 :                                 "Retrying transaction on serialization failure {err:?}"
     260              :                             );
     261            0 :                             continue;
     262              :                         }
     263              :                     }
     264            0 :                     Err(e) => break Err(e),
     265              :                 }
     266              :             }
     267            0 :         })
     268            0 :         .await
     269            0 :         .map_err(|e| DatabaseError::Migration(e.to_string()))??;
     270            0 :         Ok(())
     271            0 :     }
     272              : 
     273              :     /// Wraps `with_conn` in order to collect latency and error metrics
     274            0 :     async fn with_measured_conn<'a, 'b, F, R>(
     275            0 :         &self,
     276            0 :         op: DatabaseOperation,
     277            0 :         func: F,
     278            0 :     ) -> DatabaseResult<R>
     279            0 :     where
     280            0 :         F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
     281            0 :             + Send
     282            0 :             + std::marker::Sync
     283            0 :             + 'a,
     284            0 :         R: Send + 'b,
     285            0 :     {
     286            0 :         let latency = &METRICS_REGISTRY
     287            0 :             .metrics_group
     288            0 :             .storage_controller_database_query_latency;
     289            0 :         let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op });
     290              : 
     291            0 :         let res = self.with_conn(func).await;
     292              : 
     293            0 :         if let Err(err) = &res {
     294            0 :             let error_counter = &METRICS_REGISTRY
     295            0 :                 .metrics_group
     296            0 :                 .storage_controller_database_query_error;
     297            0 :             error_counter.inc(DatabaseQueryErrorLabelGroup {
     298            0 :                 error_type: err.error_label(),
     299            0 :                 operation: op,
     300            0 :             })
     301            0 :         }
     302              : 
     303            0 :         res
     304            0 :     }
     305              : 
     306              :     /// Call the provided function with a Diesel database connection in a retry loop
     307            0 :     async fn with_conn<'a, 'b, F, R>(&self, func: F) -> DatabaseResult<R>
     308            0 :     where
     309            0 :         F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
     310            0 :             + Send
     311            0 :             + std::marker::Sync
     312            0 :             + 'a,
     313            0 :         R: Send + 'b,
     314            0 :     {
     315            0 :         let mut retry_count = 0;
     316              :         loop {
     317            0 :             let mut conn = self.connection_pool.get().await?;
     318            0 :             match conn
     319            0 :                 .build_transaction()
     320            0 :                 .serializable()
     321            0 :                 .run(|c| func(c))
     322            0 :                 .await
     323              :             {
     324            0 :                 Ok(r) => break Ok(r),
     325              :                 Err(
     326            0 :                     err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
     327            0 :                         diesel::result::DatabaseErrorKind::SerializationFailure,
     328            0 :                         _,
     329            0 :                     )),
     330            0 :                 ) => {
     331            0 :                     retry_count += 1;
     332            0 :                     if retry_count > MAX_RETRIES {
     333            0 :                         tracing::error!(
     334            0 :                             "Exceeded max retries on SerializationFailure errors: {err:?}"
     335              :                         );
     336            0 :                         break Err(err);
     337              :                     } else {
     338              :                         // Retry on serialization errors: these are expected, because even though our
     339              :                         // transactions don't fight for the same rows, they will occasionally collide
     340              :                         // on index pages (e.g. increment_generation for unrelated shards can collide)
     341            0 :                         tracing::debug!("Retrying transaction on serialization failure {err:?}");
     342            0 :                         continue;
     343              :                     }
     344              :                 }
     345            0 :                 Err(e) => break Err(e),
     346              :             }
     347              :         }
     348            0 :     }
     349              : 
     350              :     /// When a node is first registered, persist it before using it for anything
     351            0 :     pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
     352            0 :         let np = &node.to_persistent();
     353            0 :         self.with_measured_conn(DatabaseOperation::InsertNode, move |conn| {
     354            0 :             Box::pin(async move {
     355            0 :                 diesel::insert_into(crate::schema::nodes::table)
     356            0 :                     .values(np)
     357            0 :                     .execute(conn)
     358            0 :                     .await?;
     359            0 :                 Ok(())
     360            0 :             })
     361            0 :         })
     362            0 :         .await
     363            0 :     }
     364              : 
     365              :     /// At startup, populate the list of nodes which our shards may be placed on
     366            0 :     pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> {
     367            0 :         let nodes: Vec<NodePersistence> = self
     368            0 :             .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
     369            0 :                 Box::pin(async move {
     370            0 :                     Ok(crate::schema::nodes::table
     371            0 :                         .load::<NodePersistence>(conn)
     372            0 :                         .await?)
     373            0 :                 })
     374            0 :             })
     375            0 :             .await?;
     376              : 
     377            0 :         tracing::info!("list_nodes: loaded {} nodes", nodes.len());
     378              : 
     379            0 :         Ok(nodes)
     380            0 :     }
     381              : 
     382            0 :     pub(crate) async fn update_node<V>(
     383            0 :         &self,
     384            0 :         input_node_id: NodeId,
     385            0 :         values: V,
     386            0 :     ) -> DatabaseResult<()>
     387            0 :     where
     388            0 :         V: diesel::AsChangeset<Target = crate::schema::nodes::table> + Clone + Send + Sync,
     389            0 :         V::Changeset: diesel::query_builder::QueryFragment<diesel::pg::Pg> + Send, // valid Postgres SQL
     390            0 :     {
     391              :         use crate::schema::nodes::dsl::*;
     392            0 :         let updated = self
     393            0 :             .with_measured_conn(DatabaseOperation::UpdateNode, move |conn| {
     394            0 :                 let values = values.clone();
     395            0 :                 Box::pin(async move {
     396            0 :                     let updated = diesel::update(nodes)
     397            0 :                         .filter(node_id.eq(input_node_id.0 as i64))
     398            0 :                         .set(values)
     399            0 :                         .execute(conn)
     400            0 :                         .await?;
     401            0 :                     Ok(updated)
     402            0 :                 })
     403            0 :             })
     404            0 :             .await?;
     405              : 
     406            0 :         if updated != 1 {
     407            0 :             Err(DatabaseError::Logical(format!(
     408            0 :                 "Node {node_id:?} not found for update",
     409            0 :             )))
     410              :         } else {
     411            0 :             Ok(())
     412              :         }
     413            0 :     }
     414              : 
     415            0 :     pub(crate) async fn update_node_scheduling_policy(
     416            0 :         &self,
     417            0 :         input_node_id: NodeId,
     418            0 :         input_scheduling: NodeSchedulingPolicy,
     419            0 :     ) -> DatabaseResult<()> {
     420              :         use crate::schema::nodes::dsl::*;
     421            0 :         self.update_node(
     422            0 :             input_node_id,
     423            0 :             scheduling_policy.eq(String::from(input_scheduling)),
     424            0 :         )
     425            0 :         .await
     426            0 :     }
     427              : 
     428            0 :     pub(crate) async fn update_node_on_registration(
     429            0 :         &self,
     430            0 :         input_node_id: NodeId,
     431            0 :         input_https_port: Option<u16>,
     432            0 :     ) -> DatabaseResult<()> {
     433              :         use crate::schema::nodes::dsl::*;
     434            0 :         self.update_node(
     435            0 :             input_node_id,
     436            0 :             listen_https_port.eq(input_https_port.map(|x| x as i32)),
     437            0 :         )
     438            0 :         .await
     439            0 :     }
     440              : 
     441              :     /// At startup, load the high level state for shards, such as their config + policy.  This will
     442              :     /// be enriched at runtime with state discovered on pageservers.
     443              :     ///
     444              :     /// We exclude shards configured to be detached.  During startup, if we see any attached locations
     445              :     /// for such shards, they will automatically be detached as 'orphans'.
     446            0 :     pub(crate) async fn load_active_tenant_shards(
     447            0 :         &self,
     448            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     449              :         use crate::schema::tenant_shards::dsl::*;
     450            0 :         self.with_measured_conn(DatabaseOperation::ListTenantShards, move |conn| {
     451            0 :             Box::pin(async move {
     452            0 :                 let query = tenant_shards.filter(
     453            0 :                     placement_policy.ne(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
     454            0 :                 );
     455            0 :                 let result = query.load::<TenantShardPersistence>(conn).await?;
     456              : 
     457            0 :                 Ok(result)
     458            0 :             })
     459            0 :         })
     460            0 :         .await
     461            0 :     }
     462              : 
     463              :     /// When restoring a previously detached tenant into memory, load it from the database
     464            0 :     pub(crate) async fn load_tenant(
     465            0 :         &self,
     466            0 :         filter_tenant_id: TenantId,
     467            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     468              :         use crate::schema::tenant_shards::dsl::*;
     469            0 :         self.with_measured_conn(DatabaseOperation::LoadTenant, move |conn| {
     470            0 :             Box::pin(async move {
     471            0 :                 let query = tenant_shards.filter(tenant_id.eq(filter_tenant_id.to_string()));
     472            0 :                 let result = query.load::<TenantShardPersistence>(conn).await?;
     473              : 
     474            0 :                 Ok(result)
     475            0 :             })
     476            0 :         })
     477            0 :         .await
     478            0 :     }
     479              : 
     480              :     /// Tenants must be persisted before we schedule them for the first time.  This enables us
     481              :     /// to correctly retain generation monotonicity, and the externally provided placement policy & config.
     482            0 :     pub(crate) async fn insert_tenant_shards(
     483            0 :         &self,
     484            0 :         shards: Vec<TenantShardPersistence>,
     485            0 :     ) -> DatabaseResult<()> {
     486              :         use crate::schema::{metadata_health, tenant_shards};
     487              : 
     488            0 :         let now = chrono::Utc::now();
     489            0 : 
     490            0 :         let metadata_health_records = shards
     491            0 :             .iter()
     492            0 :             .map(|t| MetadataHealthPersistence {
     493            0 :                 tenant_id: t.tenant_id.clone(),
     494            0 :                 shard_number: t.shard_number,
     495            0 :                 shard_count: t.shard_count,
     496            0 :                 healthy: true,
     497            0 :                 last_scrubbed_at: now,
     498            0 :             })
     499            0 :             .collect::<Vec<_>>();
     500            0 : 
     501            0 :         let shards = &shards;
     502            0 :         let metadata_health_records = &metadata_health_records;
     503            0 :         self.with_measured_conn(DatabaseOperation::InsertTenantShards, move |conn| {
     504            0 :             Box::pin(async move {
     505            0 :                 diesel::insert_into(tenant_shards::table)
     506            0 :                     .values(shards)
     507            0 :                     .execute(conn)
     508            0 :                     .await?;
     509              : 
     510            0 :                 diesel::insert_into(metadata_health::table)
     511            0 :                     .values(metadata_health_records)
     512            0 :                     .execute(conn)
     513            0 :                     .await?;
     514            0 :                 Ok(())
     515            0 :             })
     516            0 :         })
     517            0 :         .await
     518            0 :     }
     519              : 
     520              :     /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
     521              :     /// the tenant from memory on this server.
     522            0 :     pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
     523              :         use crate::schema::tenant_shards::dsl::*;
     524            0 :         self.with_measured_conn(DatabaseOperation::DeleteTenant, move |conn| {
     525            0 :             Box::pin(async move {
     526            0 :                 // `metadata_health` status (if exists) is also deleted based on the cascade behavior.
     527            0 :                 diesel::delete(tenant_shards)
     528            0 :                     .filter(tenant_id.eq(del_tenant_id.to_string()))
     529            0 :                     .execute(conn)
     530            0 :                     .await?;
     531            0 :                 Ok(())
     532            0 :             })
     533            0 :         })
     534            0 :         .await
     535            0 :     }
     536              : 
     537            0 :     pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
     538              :         use crate::schema::nodes::dsl::*;
     539            0 :         self.with_measured_conn(DatabaseOperation::DeleteNode, move |conn| {
     540            0 :             Box::pin(async move {
     541            0 :                 diesel::delete(nodes)
     542            0 :                     .filter(node_id.eq(del_node_id.0 as i64))
     543            0 :                     .execute(conn)
     544            0 :                     .await?;
     545              : 
     546            0 :                 Ok(())
     547            0 :             })
     548            0 :         })
     549            0 :         .await
     550            0 :     }
     551              : 
     552              :     /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient
     553              :     /// batched increment of the generations of all tenants whose generation_pageserver is equal to
     554              :     /// the node that called /re-attach.
     555              :     #[tracing::instrument(skip_all, fields(node_id))]
     556              :     pub(crate) async fn re_attach(
     557              :         &self,
     558              :         input_node_id: NodeId,
     559              :     ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
     560              :         use crate::schema::nodes::dsl::{scheduling_policy, *};
     561              :         use crate::schema::tenant_shards::dsl::*;
     562              :         let updated = self
     563            0 :             .with_measured_conn(DatabaseOperation::ReAttach, move |conn| {
     564            0 :                 Box::pin(async move {
     565            0 :                     let rows_updated = diesel::update(tenant_shards)
     566            0 :                         .filter(generation_pageserver.eq(input_node_id.0 as i64))
     567            0 :                         .set(generation.eq(generation + 1))
     568            0 :                         .execute(conn)
     569            0 :                         .await?;
     570              : 
     571            0 :                     tracing::info!("Incremented {} tenants' generations", rows_updated);
     572              : 
     573              :                     // TODO: UPDATE+SELECT in one query
     574              : 
     575            0 :                     let updated = tenant_shards
     576            0 :                         .filter(generation_pageserver.eq(input_node_id.0 as i64))
     577            0 :                         .select(TenantShardPersistence::as_select())
     578            0 :                         .load(conn)
     579            0 :                         .await?;
     580              : 
     581              :                     // If the node went through a drain and restart phase before re-attaching,
     582              :                     // then reset it's node scheduling policy to active.
     583            0 :                     diesel::update(nodes)
     584            0 :                         .filter(node_id.eq(input_node_id.0 as i64))
     585            0 :                         .filter(
     586            0 :                             scheduling_policy
     587            0 :                                 .eq(String::from(NodeSchedulingPolicy::PauseForRestart))
     588            0 :                                 .or(scheduling_policy
     589            0 :                                     .eq(String::from(NodeSchedulingPolicy::Draining)))
     590            0 :                                 .or(scheduling_policy
     591            0 :                                     .eq(String::from(NodeSchedulingPolicy::Filling))),
     592            0 :                         )
     593            0 :                         .set(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Active)))
     594            0 :                         .execute(conn)
     595            0 :                         .await?;
     596              : 
     597            0 :                     Ok(updated)
     598            0 :                 })
     599            0 :             })
     600              :             .await?;
     601              : 
     602              :         let mut result = HashMap::new();
     603              :         for tsp in updated {
     604              :             let tenant_shard_id = TenantShardId {
     605              :                 tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
     606            0 :                     .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
     607              :                 shard_number: ShardNumber(tsp.shard_number as u8),
     608              :                 shard_count: ShardCount::new(tsp.shard_count as u8),
     609              :             };
     610              : 
     611              :             let Some(g) = tsp.generation else {
     612              :                 // If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
     613              :                 // we only set generation_pageserver when setting generation.
     614              :                 return Err(DatabaseError::Logical(
     615              :                     "Generation should always be set after incrementing".to_string(),
     616              :                 ));
     617              :             };
     618              :             result.insert(tenant_shard_id, Generation::new(g as u32));
     619              :         }
     620              : 
     621              :         Ok(result)
     622              :     }
     623              : 
     624              :     /// Reconciler calls this immediately before attaching to a new pageserver, to acquire a unique, monotonically
     625              :     /// advancing generation number.  We also store the NodeId for which the generation was issued, so that in
     626              :     /// [`Self::re_attach`] we can do a bulk UPDATE on the generations for that node.
     627            0 :     pub(crate) async fn increment_generation(
     628            0 :         &self,
     629            0 :         tenant_shard_id: TenantShardId,
     630            0 :         node_id: NodeId,
     631            0 :     ) -> anyhow::Result<Generation> {
     632              :         use crate::schema::tenant_shards::dsl::*;
     633            0 :         let updated = self
     634            0 :             .with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| {
     635            0 :                 Box::pin(async move {
     636            0 :                     let updated = diesel::update(tenant_shards)
     637            0 :                         .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     638            0 :                         .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     639            0 :                         .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     640            0 :                         .set((
     641            0 :                             generation.eq(generation + 1),
     642            0 :                             generation_pageserver.eq(node_id.0 as i64),
     643            0 :                         ))
     644            0 :                         // TODO: only returning() the generation column
     645            0 :                         .returning(TenantShardPersistence::as_returning())
     646            0 :                         .get_result(conn)
     647            0 :                         .await?;
     648              : 
     649            0 :                     Ok(updated)
     650            0 :                 })
     651            0 :             })
     652            0 :             .await?;
     653              : 
     654              :         // Generation is always non-null in the rseult: if the generation column had been NULL, then we
     655              :         // should have experienced an SQL Confilict error while executing a query that tries to increment it.
     656            0 :         debug_assert!(updated.generation.is_some());
     657            0 :         let Some(g) = updated.generation else {
     658            0 :             return Err(DatabaseError::Logical(
     659            0 :                 "Generation should always be set after incrementing".to_string(),
     660            0 :             )
     661            0 :             .into());
     662              :         };
     663              : 
     664            0 :         Ok(Generation::new(g as u32))
     665            0 :     }
     666              : 
     667              :     /// When we want to call out to the running shards for a tenant, e.g. during timeline CRUD operations,
     668              :     /// we need to know where the shard is attached, _and_ the generation, so that we can re-check the generation
     669              :     /// afterwards to confirm that our timeline CRUD operation is truly persistent (it must have happened in the
     670              :     /// latest generation)
     671              :     ///
     672              :     /// If the tenant doesn't exist, an empty vector is returned.
     673              :     ///
     674              :     /// Output is sorted by shard number
     675            0 :     pub(crate) async fn tenant_generations(
     676            0 :         &self,
     677            0 :         filter_tenant_id: TenantId,
     678            0 :     ) -> Result<Vec<ShardGenerationState>, DatabaseError> {
     679              :         use crate::schema::tenant_shards::dsl::*;
     680            0 :         let rows = self
     681            0 :             .with_measured_conn(DatabaseOperation::TenantGenerations, move |conn| {
     682            0 :                 Box::pin(async move {
     683            0 :                     let result = tenant_shards
     684            0 :                         .filter(tenant_id.eq(filter_tenant_id.to_string()))
     685            0 :                         .select(TenantShardPersistence::as_select())
     686            0 :                         .order(shard_number)
     687            0 :                         .load(conn)
     688            0 :                         .await?;
     689            0 :                     Ok(result)
     690            0 :                 })
     691            0 :             })
     692            0 :             .await?;
     693              : 
     694            0 :         Ok(rows
     695            0 :             .into_iter()
     696            0 :             .map(|p| ShardGenerationState {
     697            0 :                 tenant_shard_id: p
     698            0 :                     .get_tenant_shard_id()
     699            0 :                     .expect("Corrupt tenant shard id in database"),
     700            0 :                 generation: p.generation.map(|g| Generation::new(g as u32)),
     701            0 :                 generation_pageserver: p.generation_pageserver.map(|n| NodeId(n as u64)),
     702            0 :             })
     703            0 :             .collect())
     704            0 :     }
     705              : 
     706              :     /// Read the generation number of specific tenant shards
     707              :     ///
     708              :     /// Output is unsorted.  Output may not include values for all inputs, if they are missing in the database.
     709            0 :     pub(crate) async fn shard_generations(
     710            0 :         &self,
     711            0 :         mut tenant_shard_ids: impl Iterator<Item = &TenantShardId>,
     712            0 :     ) -> Result<Vec<(TenantShardId, Option<Generation>)>, DatabaseError> {
     713            0 :         let mut rows = Vec::with_capacity(tenant_shard_ids.size_hint().0);
     714              : 
     715              :         // We will chunk our input to avoid composing arbitrarily long `IN` clauses.  Typically we are
     716              :         // called with a single digit number of IDs, but in principle we could be called with tens
     717              :         // of thousands (all the shards on one pageserver) from the generation validation API.
     718            0 :         loop {
     719            0 :             // A modest hardcoded chunk size to handle typical cases in a single query but never generate particularly
     720            0 :             // large query strings.
     721            0 :             let chunk_ids = tenant_shard_ids.by_ref().take(32);
     722            0 : 
     723            0 :             // Compose a comma separated list of tuples for matching on (tenant_id, shard_number, shard_count)
     724            0 :             let in_clause = chunk_ids
     725            0 :                 .map(|tsid| {
     726            0 :                     format!(
     727            0 :                         "('{}', {}, {})",
     728            0 :                         tsid.tenant_id, tsid.shard_number.0, tsid.shard_count.0
     729            0 :                     )
     730            0 :                 })
     731            0 :                 .join(",");
     732            0 : 
     733            0 :             // We are done when our iterator gives us nothing to filter on
     734            0 :             if in_clause.is_empty() {
     735            0 :                 break;
     736            0 :             }
     737            0 : 
     738            0 :             let in_clause = &in_clause;
     739            0 :             let chunk_rows = self
     740            0 :                 .with_measured_conn(DatabaseOperation::ShardGenerations, move |conn| {
     741            0 :                     Box::pin(async move {
     742              :                         // diesel doesn't support multi-column IN queries, so we compose raw SQL.  No escaping is required because
     743              :                         // the inputs are strongly typed and cannot carry any user-supplied raw string content.
     744            0 :                         let result : Vec<TenantShardPersistence> = diesel::sql_query(
     745            0 :                             format!("SELECT * from tenant_shards where (tenant_id, shard_number, shard_count) in ({in_clause});").as_str()
     746            0 :                         ).load(conn).await?;
     747              : 
     748            0 :                         Ok(result)
     749            0 :                     })
     750            0 :                 })
     751            0 :                 .await?;
     752            0 :             rows.extend(chunk_rows.into_iter())
     753              :         }
     754              : 
     755            0 :         Ok(rows
     756            0 :             .into_iter()
     757            0 :             .map(|tsp| {
     758            0 :                 (
     759            0 :                     tsp.get_tenant_shard_id()
     760            0 :                         .expect("Bad tenant ID in database"),
     761            0 :                     tsp.generation.map(|g| Generation::new(g as u32)),
     762            0 :                 )
     763            0 :             })
     764            0 :             .collect())
     765            0 :     }
     766              : 
     767              :     #[allow(non_local_definitions)]
     768              :     /// For use when updating a persistent property of a tenant, such as its config or placement_policy.
     769              :     ///
     770              :     /// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
     771              :     /// API: use [`Self::increment_generation`] instead.  Setting the generation via this route is a one-time thing
     772              :     /// that we only do the first time a tenant is set to an attached policy via /location_config.
     773            0 :     pub(crate) async fn update_tenant_shard(
     774            0 :         &self,
     775            0 :         tenant: TenantFilter,
     776            0 :         input_placement_policy: Option<PlacementPolicy>,
     777            0 :         input_config: Option<TenantConfig>,
     778            0 :         input_generation: Option<Generation>,
     779            0 :         input_scheduling_policy: Option<ShardSchedulingPolicy>,
     780            0 :     ) -> DatabaseResult<()> {
     781              :         use crate::schema::tenant_shards::dsl::*;
     782              : 
     783            0 :         let tenant = &tenant;
     784            0 :         let input_placement_policy = &input_placement_policy;
     785            0 :         let input_config = &input_config;
     786            0 :         let input_generation = &input_generation;
     787            0 :         let input_scheduling_policy = &input_scheduling_policy;
     788            0 :         self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
     789            0 :             Box::pin(async move {
     790            0 :                 let query = match tenant {
     791            0 :                     TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards)
     792            0 :                         .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     793            0 :                         .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     794            0 :                         .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     795            0 :                         .into_boxed(),
     796            0 :                     TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
     797            0 :                         .filter(tenant_id.eq(input_tenant_id.to_string()))
     798            0 :                         .into_boxed(),
     799              :                 };
     800              : 
     801              :                 // Clear generation_pageserver if we are moving into a state where we won't have
     802              :                 // any attached pageservers.
     803            0 :                 let input_generation_pageserver = match input_placement_policy {
     804            0 :                     None | Some(PlacementPolicy::Attached(_)) => None,
     805            0 :                     Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) => Some(None),
     806              :                 };
     807              : 
     808            0 :                 #[derive(AsChangeset)]
     809              :                 #[diesel(table_name = crate::schema::tenant_shards)]
     810              :                 struct ShardUpdate {
     811              :                     generation: Option<i32>,
     812              :                     placement_policy: Option<String>,
     813              :                     config: Option<String>,
     814              :                     scheduling_policy: Option<String>,
     815              :                     generation_pageserver: Option<Option<i64>>,
     816              :                 }
     817              : 
     818            0 :                 let update = ShardUpdate {
     819            0 :                     generation: input_generation.map(|g| g.into().unwrap() as i32),
     820            0 :                     placement_policy: input_placement_policy
     821            0 :                         .as_ref()
     822            0 :                         .map(|p| serde_json::to_string(&p).unwrap()),
     823            0 :                     config: input_config
     824            0 :                         .as_ref()
     825            0 :                         .map(|c| serde_json::to_string(&c).unwrap()),
     826            0 :                     scheduling_policy: input_scheduling_policy
     827            0 :                         .map(|p| serde_json::to_string(&p).unwrap()),
     828            0 :                     generation_pageserver: input_generation_pageserver,
     829            0 :                 };
     830            0 : 
     831            0 :                 query.set(update).execute(conn).await?;
     832              : 
     833            0 :                 Ok(())
     834            0 :             })
     835            0 :         })
     836            0 :         .await?;
     837              : 
     838            0 :         Ok(())
     839            0 :     }
     840              : 
     841              :     /// Note that passing None for a shard clears the preferred AZ (rather than leaving it unmodified)
     842            0 :     pub(crate) async fn set_tenant_shard_preferred_azs(
     843            0 :         &self,
     844            0 :         preferred_azs: Vec<(TenantShardId, Option<AvailabilityZone>)>,
     845            0 :     ) -> DatabaseResult<Vec<(TenantShardId, Option<AvailabilityZone>)>> {
     846              :         use crate::schema::tenant_shards::dsl::*;
     847              : 
     848            0 :         let preferred_azs = preferred_azs.as_slice();
     849            0 :         self.with_measured_conn(DatabaseOperation::SetPreferredAzs, move |conn| {
     850            0 :             Box::pin(async move {
     851            0 :                 let mut shards_updated = Vec::default();
     852              : 
     853            0 :                 for (tenant_shard_id, preferred_az) in preferred_azs.iter() {
     854            0 :                     let updated = diesel::update(tenant_shards)
     855            0 :                         .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     856            0 :                         .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     857            0 :                         .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     858            0 :                         .set(preferred_az_id.eq(preferred_az.as_ref().map(|az| az.0.clone())))
     859            0 :                         .execute(conn)
     860            0 :                         .await?;
     861              : 
     862            0 :                     if updated == 1 {
     863            0 :                         shards_updated.push((*tenant_shard_id, preferred_az.clone()));
     864            0 :                     }
     865              :                 }
     866              : 
     867            0 :                 Ok(shards_updated)
     868            0 :             })
     869            0 :         })
     870            0 :         .await
     871            0 :     }
     872              : 
     873            0 :     pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
     874              :         use crate::schema::tenant_shards::dsl::*;
     875            0 :         self.with_measured_conn(DatabaseOperation::Detach, move |conn| {
     876            0 :             Box::pin(async move {
     877            0 :                 let updated = diesel::update(tenant_shards)
     878            0 :                     .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     879            0 :                     .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     880            0 :                     .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     881            0 :                     .set((
     882            0 :                         generation_pageserver.eq(Option::<i64>::None),
     883            0 :                         placement_policy
     884            0 :                             .eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
     885            0 :                     ))
     886            0 :                     .execute(conn)
     887            0 :                     .await?;
     888              : 
     889            0 :                 Ok(updated)
     890            0 :             })
     891            0 :         })
     892            0 :         .await?;
     893              : 
     894            0 :         Ok(())
     895            0 :     }
     896              : 
     897              :     // When we start shard splitting, we must durably mark the tenant so that
     898              :     // on restart, we know that we must go through recovery.
     899              :     //
     900              :     // We create the child shards here, so that they will be available for increment_generation calls
     901              :     // if some pageserver holding a child shard needs to restart before the overall tenant split is complete.
     902            0 :     pub(crate) async fn begin_shard_split(
     903            0 :         &self,
     904            0 :         old_shard_count: ShardCount,
     905            0 :         split_tenant_id: TenantId,
     906            0 :         parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
     907            0 :     ) -> DatabaseResult<()> {
     908              :         use crate::schema::tenant_shards::dsl::*;
     909            0 :         let parent_to_children = parent_to_children.as_slice();
     910            0 :         self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| {
     911            0 :             Box::pin(async move {
     912              :             // Mark parent shards as splitting
     913              : 
     914            0 :             let updated = diesel::update(tenant_shards)
     915            0 :                 .filter(tenant_id.eq(split_tenant_id.to_string()))
     916            0 :                 .filter(shard_count.eq(old_shard_count.literal() as i32))
     917            0 :                 .set((splitting.eq(1),))
     918            0 :                 .execute(conn).await?;
     919            0 :             if u8::try_from(updated)
     920            0 :                 .map_err(|_| DatabaseError::Logical(
     921            0 :                     format!("Overflow existing shard count {} while splitting", updated))
     922            0 :                 )? != old_shard_count.count() {
     923              :                 // Perhaps a deletion or another split raced with this attempt to split, mutating
     924              :                 // the parent shards that we intend to split. In this case the split request should fail.
     925            0 :                 return Err(DatabaseError::Logical(
     926            0 :                     format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
     927            0 :                 ));
     928            0 :             }
     929            0 : 
     930            0 :             // FIXME: spurious clone to sidestep closure move rules
     931            0 :             let parent_to_children = parent_to_children.to_vec();
     932              : 
     933              :             // Insert child shards
     934            0 :             for (parent_shard_id, children) in parent_to_children {
     935            0 :                 let mut parent = crate::schema::tenant_shards::table
     936            0 :                     .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
     937            0 :                     .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
     938            0 :                     .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
     939            0 :                     .load::<TenantShardPersistence>(conn).await?;
     940            0 :                 let parent = if parent.len() != 1 {
     941            0 :                     return Err(DatabaseError::Logical(format!(
     942            0 :                         "Parent shard {parent_shard_id} not found"
     943            0 :                     )));
     944              :                 } else {
     945            0 :                     parent.pop().unwrap()
     946              :                 };
     947            0 :                 for mut shard in children {
     948              :                     // Carry the parent's generation into the child
     949            0 :                     shard.generation = parent.generation;
     950            0 : 
     951            0 :                     debug_assert!(shard.splitting == SplitState::Splitting);
     952            0 :                     diesel::insert_into(tenant_shards)
     953            0 :                         .values(shard)
     954            0 :                         .execute(conn).await?;
     955              :                 }
     956              :             }
     957              : 
     958            0 :             Ok(())
     959            0 :         })
     960            0 :         })
     961            0 :         .await
     962            0 :     }
     963              : 
     964              :     // When we finish shard splitting, we must atomically clean up the old shards
     965              :     // and insert the new shards, and clear the splitting marker.
     966            0 :     pub(crate) async fn complete_shard_split(
     967            0 :         &self,
     968            0 :         split_tenant_id: TenantId,
     969            0 :         old_shard_count: ShardCount,
     970            0 :     ) -> DatabaseResult<()> {
     971              :         use crate::schema::tenant_shards::dsl::*;
     972            0 :         self.with_measured_conn(DatabaseOperation::CompleteShardSplit, move |conn| {
     973            0 :             Box::pin(async move {
     974            0 :                 // Drop parent shards
     975            0 :                 diesel::delete(tenant_shards)
     976            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     977            0 :                     .filter(shard_count.eq(old_shard_count.literal() as i32))
     978            0 :                     .execute(conn)
     979            0 :                     .await?;
     980              : 
     981              :                 // Clear sharding flag
     982            0 :                 let updated = diesel::update(tenant_shards)
     983            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
     984            0 :                     .set((splitting.eq(0),))
     985            0 :                     .execute(conn)
     986            0 :                     .await?;
     987            0 :                 debug_assert!(updated > 0);
     988              : 
     989            0 :                 Ok(())
     990            0 :             })
     991            0 :         })
     992            0 :         .await
     993            0 :     }
     994              : 
     995              :     /// Used when the remote part of a shard split failed: we will revert the database state to have only
     996              :     /// the parent shards, with SplitState::Idle.
     997            0 :     pub(crate) async fn abort_shard_split(
     998            0 :         &self,
     999            0 :         split_tenant_id: TenantId,
    1000            0 :         new_shard_count: ShardCount,
    1001            0 :     ) -> DatabaseResult<AbortShardSplitStatus> {
    1002              :         use crate::schema::tenant_shards::dsl::*;
    1003            0 :         self.with_measured_conn(DatabaseOperation::AbortShardSplit, move |conn| {
    1004            0 :             Box::pin(async move {
    1005              :                 // Clear the splitting state on parent shards
    1006            0 :                 let updated = diesel::update(tenant_shards)
    1007            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
    1008            0 :                     .filter(shard_count.ne(new_shard_count.literal() as i32))
    1009            0 :                     .set((splitting.eq(0),))
    1010            0 :                     .execute(conn)
    1011            0 :                     .await?;
    1012              : 
    1013              :                 // Parent shards are already gone: we cannot abort.
    1014            0 :                 if updated == 0 {
    1015            0 :                     return Ok(AbortShardSplitStatus::Complete);
    1016            0 :                 }
    1017            0 : 
    1018            0 :                 // Sanity check: if parent shards were present, their cardinality should
    1019            0 :                 // be less than the number of child shards.
    1020            0 :                 if updated >= new_shard_count.count() as usize {
    1021            0 :                     return Err(DatabaseError::Logical(format!(
    1022            0 :                         "Unexpected parent shard count {updated} while aborting split to \
    1023            0 :                             count {new_shard_count:?} on tenant {split_tenant_id}"
    1024            0 :                     )));
    1025            0 :                 }
    1026            0 : 
    1027            0 :                 // Erase child shards
    1028            0 :                 diesel::delete(tenant_shards)
    1029            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
    1030            0 :                     .filter(shard_count.eq(new_shard_count.literal() as i32))
    1031            0 :                     .execute(conn)
    1032            0 :                     .await?;
    1033              : 
    1034            0 :                 Ok(AbortShardSplitStatus::Aborted)
    1035            0 :             })
    1036            0 :         })
    1037            0 :         .await
    1038            0 :     }
    1039              : 
    1040              :     /// Stores all the latest metadata health updates durably. Updates existing entry on conflict.
    1041              :     ///
    1042              :     /// **Correctness:** `metadata_health_updates` should all belong the tenant shards managed by the storage controller.
    1043              :     #[allow(dead_code)]
    1044            0 :     pub(crate) async fn update_metadata_health_records(
    1045            0 :         &self,
    1046            0 :         healthy_records: Vec<MetadataHealthPersistence>,
    1047            0 :         unhealthy_records: Vec<MetadataHealthPersistence>,
    1048            0 :         now: chrono::DateTime<chrono::Utc>,
    1049            0 :     ) -> DatabaseResult<()> {
    1050              :         use crate::schema::metadata_health::dsl::*;
    1051              : 
    1052            0 :         let healthy_records = healthy_records.as_slice();
    1053            0 :         let unhealthy_records = unhealthy_records.as_slice();
    1054            0 :         self.with_measured_conn(DatabaseOperation::UpdateMetadataHealth, move |conn| {
    1055            0 :             Box::pin(async move {
    1056            0 :                 diesel::insert_into(metadata_health)
    1057            0 :                     .values(healthy_records)
    1058            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
    1059            0 :                     .do_update()
    1060            0 :                     .set((healthy.eq(true), last_scrubbed_at.eq(now)))
    1061            0 :                     .execute(conn)
    1062            0 :                     .await?;
    1063              : 
    1064            0 :                 diesel::insert_into(metadata_health)
    1065            0 :                     .values(unhealthy_records)
    1066            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
    1067            0 :                     .do_update()
    1068            0 :                     .set((healthy.eq(false), last_scrubbed_at.eq(now)))
    1069            0 :                     .execute(conn)
    1070            0 :                     .await?;
    1071            0 :                 Ok(())
    1072            0 :             })
    1073            0 :         })
    1074            0 :         .await
    1075            0 :     }
    1076              : 
    1077              :     /// Lists all the metadata health records.
    1078              :     #[allow(dead_code)]
    1079            0 :     pub(crate) async fn list_metadata_health_records(
    1080            0 :         &self,
    1081            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
    1082            0 :         self.with_measured_conn(DatabaseOperation::ListMetadataHealth, move |conn| {
    1083            0 :             Box::pin(async {
    1084            0 :                 Ok(crate::schema::metadata_health::table
    1085            0 :                     .load::<MetadataHealthPersistence>(conn)
    1086            0 :                     .await?)
    1087            0 :             })
    1088            0 :         })
    1089            0 :         .await
    1090            0 :     }
    1091              : 
    1092              :     /// Lists all the metadata health records that is unhealthy.
    1093              :     #[allow(dead_code)]
    1094            0 :     pub(crate) async fn list_unhealthy_metadata_health_records(
    1095            0 :         &self,
    1096            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
    1097              :         use crate::schema::metadata_health::dsl::*;
    1098            0 :         self.with_measured_conn(
    1099            0 :             DatabaseOperation::ListMetadataHealthUnhealthy,
    1100            0 :             move |conn| {
    1101            0 :                 Box::pin(async {
    1102            0 :                     DatabaseResult::Ok(
    1103            0 :                         crate::schema::metadata_health::table
    1104            0 :                             .filter(healthy.eq(false))
    1105            0 :                             .load::<MetadataHealthPersistence>(conn)
    1106            0 :                             .await?,
    1107              :                     )
    1108            0 :                 })
    1109            0 :             },
    1110            0 :         )
    1111            0 :         .await
    1112            0 :     }
    1113              : 
    1114              :     /// Lists all the metadata health records that have not been updated since an `earlier` time.
    1115              :     #[allow(dead_code)]
    1116            0 :     pub(crate) async fn list_outdated_metadata_health_records(
    1117            0 :         &self,
    1118            0 :         earlier: chrono::DateTime<chrono::Utc>,
    1119            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
    1120              :         use crate::schema::metadata_health::dsl::*;
    1121              : 
    1122            0 :         self.with_measured_conn(DatabaseOperation::ListMetadataHealthOutdated, move |conn| {
    1123            0 :             Box::pin(async move {
    1124            0 :                 let query = metadata_health.filter(last_scrubbed_at.lt(earlier));
    1125            0 :                 let res = query.load::<MetadataHealthPersistence>(conn).await?;
    1126              : 
    1127            0 :                 Ok(res)
    1128            0 :             })
    1129            0 :         })
    1130            0 :         .await
    1131            0 :     }
    1132              : 
    1133              :     /// Get the current entry from the `leader` table if one exists.
    1134              :     /// It is an error for the table to contain more than one entry.
    1135            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
    1136            0 :         let mut leader: Vec<ControllerPersistence> = self
    1137            0 :             .with_measured_conn(DatabaseOperation::GetLeader, move |conn| {
    1138            0 :                 Box::pin(async move {
    1139            0 :                     Ok(crate::schema::controllers::table
    1140            0 :                         .load::<ControllerPersistence>(conn)
    1141            0 :                         .await?)
    1142            0 :                 })
    1143            0 :             })
    1144            0 :             .await?;
    1145              : 
    1146            0 :         if leader.len() > 1 {
    1147            0 :             return Err(DatabaseError::Logical(format!(
    1148            0 :                 "More than one entry present in the leader table: {leader:?}"
    1149            0 :             )));
    1150            0 :         }
    1151            0 : 
    1152            0 :         Ok(leader.pop())
    1153            0 :     }
    1154              : 
    1155              :     /// Update the new leader with compare-exchange semantics. If `prev` does not
    1156              :     /// match the current leader entry, then the update is treated as a failure.
    1157              :     /// When `prev` is not specified, the update is forced.
    1158            0 :     pub(crate) async fn update_leader(
    1159            0 :         &self,
    1160            0 :         prev: Option<ControllerPersistence>,
    1161            0 :         new: ControllerPersistence,
    1162            0 :     ) -> DatabaseResult<()> {
    1163              :         use crate::schema::controllers::dsl::*;
    1164              : 
    1165            0 :         let updated = self
    1166            0 :             .with_measured_conn(DatabaseOperation::UpdateLeader, move |conn| {
    1167            0 :                 let prev = prev.clone();
    1168            0 :                 let new = new.clone();
    1169            0 :                 Box::pin(async move {
    1170            0 :                     let updated = match &prev {
    1171            0 :                         Some(prev) => {
    1172            0 :                             diesel::update(controllers)
    1173            0 :                                 .filter(address.eq(prev.address.clone()))
    1174            0 :                                 .filter(started_at.eq(prev.started_at))
    1175            0 :                                 .set((
    1176            0 :                                     address.eq(new.address.clone()),
    1177            0 :                                     started_at.eq(new.started_at),
    1178            0 :                                 ))
    1179            0 :                                 .execute(conn)
    1180            0 :                                 .await?
    1181              :                         }
    1182              :                         None => {
    1183            0 :                             diesel::insert_into(controllers)
    1184            0 :                                 .values(new.clone())
    1185            0 :                                 .execute(conn)
    1186            0 :                                 .await?
    1187              :                         }
    1188              :                     };
    1189              : 
    1190            0 :                     Ok(updated)
    1191            0 :                 })
    1192            0 :             })
    1193            0 :             .await?;
    1194              : 
    1195            0 :         if updated == 0 {
    1196            0 :             return Err(DatabaseError::Logical(
    1197            0 :                 "Leader table update failed".to_string(),
    1198            0 :             ));
    1199            0 :         }
    1200            0 : 
    1201            0 :         Ok(())
    1202            0 :     }
    1203              : 
    1204              :     /// At startup, populate the list of nodes which our shards may be placed on
    1205            0 :     pub(crate) async fn list_safekeepers(&self) -> DatabaseResult<Vec<SafekeeperPersistence>> {
    1206            0 :         let safekeepers: Vec<SafekeeperPersistence> = self
    1207            0 :             .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
    1208            0 :                 Box::pin(async move {
    1209            0 :                     Ok(crate::schema::safekeepers::table
    1210            0 :                         .load::<SafekeeperPersistence>(conn)
    1211            0 :                         .await?)
    1212            0 :                 })
    1213            0 :             })
    1214            0 :             .await?;
    1215              : 
    1216            0 :         tracing::info!("list_safekeepers: loaded {} nodes", safekeepers.len());
    1217              : 
    1218            0 :         Ok(safekeepers)
    1219            0 :     }
    1220              : 
    1221            0 :     pub(crate) async fn safekeeper_upsert(
    1222            0 :         &self,
    1223            0 :         record: SafekeeperUpsert,
    1224            0 :     ) -> Result<(), DatabaseError> {
    1225              :         use crate::schema::safekeepers::dsl::*;
    1226              : 
    1227            0 :         self.with_conn(move |conn| {
    1228            0 :             let record = record.clone();
    1229            0 :             Box::pin(async move {
    1230            0 :                 let bind = record
    1231            0 :                     .as_insert_or_update()
    1232            0 :                     .map_err(|e| DatabaseError::Logical(format!("{e}")))?;
    1233              : 
    1234            0 :                 let inserted_updated = diesel::insert_into(safekeepers)
    1235            0 :                     .values(&bind)
    1236            0 :                     .on_conflict(id)
    1237            0 :                     .do_update()
    1238            0 :                     .set(&bind)
    1239            0 :                     .execute(conn)
    1240            0 :                     .await?;
    1241              : 
    1242            0 :                 if inserted_updated != 1 {
    1243            0 :                     return Err(DatabaseError::Logical(format!(
    1244            0 :                         "unexpected number of rows ({})",
    1245            0 :                         inserted_updated
    1246            0 :                     )));
    1247            0 :                 }
    1248            0 : 
    1249            0 :                 Ok(())
    1250            0 :             })
    1251            0 :         })
    1252            0 :         .await
    1253            0 :     }
    1254              : 
    1255            0 :     pub(crate) async fn set_safekeeper_scheduling_policy(
    1256            0 :         &self,
    1257            0 :         id_: i64,
    1258            0 :         scheduling_policy_: SkSchedulingPolicy,
    1259            0 :     ) -> Result<(), DatabaseError> {
    1260              :         use crate::schema::safekeepers::dsl::*;
    1261              : 
    1262            0 :         self.with_conn(move |conn| {
    1263            0 :             Box::pin(async move {
    1264            0 :                 #[derive(Insertable, AsChangeset)]
    1265              :                 #[diesel(table_name = crate::schema::safekeepers)]
    1266              :                 struct UpdateSkSchedulingPolicy<'a> {
    1267              :                     id: i64,
    1268              :                     scheduling_policy: &'a str,
    1269              :                 }
    1270            0 :                 let scheduling_policy_ = String::from(scheduling_policy_);
    1271              : 
    1272            0 :                 let rows_affected = diesel::update(safekeepers.filter(id.eq(id_)))
    1273            0 :                     .set(scheduling_policy.eq(scheduling_policy_))
    1274            0 :                     .execute(conn)
    1275            0 :                     .await?;
    1276              : 
    1277            0 :                 if rows_affected != 1 {
    1278            0 :                     return Err(DatabaseError::Logical(format!(
    1279            0 :                         "unexpected number of rows ({rows_affected})",
    1280            0 :                     )));
    1281            0 :                 }
    1282            0 : 
    1283            0 :                 Ok(())
    1284            0 :             })
    1285            0 :         })
    1286            0 :         .await
    1287            0 :     }
    1288              : 
    1289              :     /// Persist timeline. Returns if the timeline was newly inserted. If it wasn't, we haven't done any writes.
    1290            0 :     pub(crate) async fn insert_timeline(&self, entry: TimelinePersistence) -> DatabaseResult<bool> {
    1291              :         use crate::schema::timelines;
    1292              : 
    1293            0 :         let entry = &entry;
    1294            0 :         self.with_measured_conn(DatabaseOperation::InsertTimeline, move |conn| {
    1295            0 :             Box::pin(async move {
    1296            0 :                 let inserted_updated = diesel::insert_into(timelines::table)
    1297            0 :                     .values(entry)
    1298            0 :                     .on_conflict((timelines::tenant_id, timelines::timeline_id))
    1299            0 :                     .do_nothing()
    1300            0 :                     .execute(conn)
    1301            0 :                     .await?;
    1302              : 
    1303            0 :                 match inserted_updated {
    1304            0 :                     0 => Ok(false),
    1305            0 :                     1 => Ok(true),
    1306            0 :                     _ => Err(DatabaseError::Logical(format!(
    1307            0 :                         "unexpected number of rows ({})",
    1308            0 :                         inserted_updated
    1309            0 :                     ))),
    1310              :                 }
    1311            0 :             })
    1312            0 :         })
    1313            0 :         .await
    1314            0 :     }
    1315              : 
    1316              :     /// Load timeline from db. Returns `None` if not present.
    1317            0 :     pub(crate) async fn get_timeline(
    1318            0 :         &self,
    1319            0 :         tenant_id: TenantId,
    1320            0 :         timeline_id: TimelineId,
    1321            0 :     ) -> DatabaseResult<Option<TimelinePersistence>> {
    1322              :         use crate::schema::timelines::dsl;
    1323              : 
    1324            0 :         let tenant_id = &tenant_id;
    1325            0 :         let timeline_id = &timeline_id;
    1326            0 :         let timeline_from_db = self
    1327            0 :             .with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
    1328            0 :                 Box::pin(async move {
    1329            0 :                     let mut from_db: Vec<TimelineFromDb> = dsl::timelines
    1330            0 :                         .filter(
    1331            0 :                             dsl::tenant_id
    1332            0 :                                 .eq(&tenant_id.to_string())
    1333            0 :                                 .and(dsl::timeline_id.eq(&timeline_id.to_string())),
    1334            0 :                         )
    1335            0 :                         .load(conn)
    1336            0 :                         .await?;
    1337            0 :                     if from_db.is_empty() {
    1338            0 :                         return Ok(None);
    1339            0 :                     }
    1340            0 :                     if from_db.len() != 1 {
    1341            0 :                         return Err(DatabaseError::Logical(format!(
    1342            0 :                             "unexpected number of rows ({})",
    1343            0 :                             from_db.len()
    1344            0 :                         )));
    1345            0 :                     }
    1346            0 : 
    1347            0 :                     Ok(Some(from_db.pop().unwrap().into_persistence()))
    1348            0 :                 })
    1349            0 :             })
    1350            0 :             .await?;
    1351              : 
    1352            0 :         Ok(timeline_from_db)
    1353            0 :     }
    1354              :     /// Persist pending op. Returns if it was newly inserted. If it wasn't, we haven't done any writes.
    1355            0 :     pub(crate) async fn insert_pending_op(
    1356            0 :         &self,
    1357            0 :         entry: TimelinePendingOpPersistence,
    1358            0 :     ) -> DatabaseResult<bool> {
    1359              :         use crate::schema::safekeeper_timeline_pending_ops as skpo;
    1360              :         // This overrides the `filter` fn used in other functions, so contain the mayhem via a function-local use
    1361              :         use diesel::query_dsl::methods::FilterDsl;
    1362              : 
    1363            0 :         let entry = &entry;
    1364            0 :         self.with_measured_conn(DatabaseOperation::InsertTimelineReconcile, move |conn| {
    1365            0 :             Box::pin(async move {
    1366              :                 // For simplicity it makes sense to keep only the last operation
    1367              :                 // per (tenant, timeline, sk) tuple: if we migrated a timeline
    1368              :                 // from node and adding it back it is not necessary to remove
    1369              :                 // data on it. Hence, generation is not part of primary key and
    1370              :                 // we override any rows with lower generations here.
    1371            0 :                 let inserted_updated = diesel::insert_into(skpo::table)
    1372            0 :                     .values(entry)
    1373            0 :                     .on_conflict((skpo::tenant_id, skpo::timeline_id, skpo::sk_id))
    1374            0 :                     .do_update()
    1375            0 :                     .set(entry)
    1376            0 :                     .filter(skpo::generation.lt(entry.generation))
    1377            0 :                     .execute(conn)
    1378            0 :                     .await?;
    1379              : 
    1380            0 :                 match inserted_updated {
    1381            0 :                     0 => Ok(false),
    1382            0 :                     1 => Ok(true),
    1383            0 :                     _ => Err(DatabaseError::Logical(format!(
    1384            0 :                         "unexpected number of rows ({})",
    1385            0 :                         inserted_updated
    1386            0 :                     ))),
    1387              :                 }
    1388            0 :             })
    1389            0 :         })
    1390            0 :         .await
    1391            0 :     }
    1392              :     /// Remove persisted pending op.
    1393            0 :     pub(crate) async fn remove_pending_op(
    1394            0 :         &self,
    1395            0 :         tenant_id: TenantId,
    1396            0 :         timeline_id: TimelineId,
    1397            0 :         sk_id: NodeId,
    1398            0 :         generation: u32,
    1399            0 :     ) -> DatabaseResult<()> {
    1400              :         use crate::schema::safekeeper_timeline_pending_ops::dsl;
    1401              : 
    1402            0 :         let tenant_id = &tenant_id;
    1403            0 :         let timeline_id = &timeline_id;
    1404            0 :         self.with_measured_conn(DatabaseOperation::RemoveTimelineReconcile, move |conn| {
    1405            0 :             Box::pin(async move {
    1406            0 :                 diesel::delete(dsl::safekeeper_timeline_pending_ops)
    1407            0 :                     .filter(dsl::tenant_id.eq(tenant_id.to_string()))
    1408            0 :                     .filter(dsl::timeline_id.eq(timeline_id.to_string()))
    1409            0 :                     .filter(dsl::sk_id.eq(sk_id.0 as i64))
    1410            0 :                     .filter(dsl::generation.eq(generation as i32))
    1411            0 :                     .execute(conn)
    1412            0 :                     .await?;
    1413            0 :                 Ok(())
    1414            0 :             })
    1415            0 :         })
    1416            0 :         .await
    1417            0 :     }
    1418              : 
    1419              :     /// Load pending operations from db.
    1420            0 :     pub(crate) async fn list_pending_ops(
    1421            0 :         &self,
    1422            0 :         filter_for_sk: Option<NodeId>,
    1423            0 :     ) -> DatabaseResult<Vec<TimelinePendingOpPersistence>> {
    1424              :         use crate::schema::safekeeper_timeline_pending_ops::dsl;
    1425              : 
    1426              :         const FILTER_VAL_1: i64 = 1;
    1427              :         const FILTER_VAL_2: i64 = 2;
    1428            0 :         let filter_opt = filter_for_sk.map(|id| id.0 as i64);
    1429            0 :         let timeline_from_db = self
    1430            0 :             .with_measured_conn(DatabaseOperation::ListTimelineReconcile, move |conn| {
    1431            0 :                 Box::pin(async move {
    1432            0 :                     let from_db: Vec<TimelinePendingOpPersistence> =
    1433            0 :                         dsl::safekeeper_timeline_pending_ops
    1434            0 :                             .filter(
    1435            0 :                                 dsl::sk_id
    1436            0 :                                     .eq(filter_opt.unwrap_or(FILTER_VAL_1))
    1437            0 :                                     .and(dsl::sk_id.eq(filter_opt.unwrap_or(FILTER_VAL_2))),
    1438            0 :                             )
    1439            0 :                             .load(conn)
    1440            0 :                             .await?;
    1441            0 :                     Ok(from_db)
    1442            0 :                 })
    1443            0 :             })
    1444            0 :             .await?;
    1445              : 
    1446            0 :         Ok(timeline_from_db)
    1447            0 :     }
    1448              : }
    1449              : 
    1450            0 : pub(crate) fn load_certs() -> anyhow::Result<Arc<rustls::RootCertStore>> {
    1451            0 :     let der_certs = rustls_native_certs::load_native_certs();
    1452            0 : 
    1453            0 :     if !der_certs.errors.is_empty() {
    1454            0 :         anyhow::bail!("could not parse certificates: {:?}", der_certs.errors);
    1455            0 :     }
    1456            0 : 
    1457            0 :     let mut store = rustls::RootCertStore::empty();
    1458            0 :     store.add_parsable_certificates(der_certs.certs);
    1459            0 :     Ok(Arc::new(store))
    1460            0 : }
    1461              : 
    1462              : #[derive(Debug)]
    1463              : /// A verifier that accepts all certificates (but logs an error still)
    1464              : struct AcceptAll(Arc<WebPkiServerVerifier>);
    1465              : impl ServerCertVerifier for AcceptAll {
    1466            0 :     fn verify_server_cert(
    1467            0 :         &self,
    1468            0 :         end_entity: &rustls::pki_types::CertificateDer<'_>,
    1469            0 :         intermediates: &[rustls::pki_types::CertificateDer<'_>],
    1470            0 :         server_name: &rustls::pki_types::ServerName<'_>,
    1471            0 :         ocsp_response: &[u8],
    1472            0 :         now: rustls::pki_types::UnixTime,
    1473            0 :     ) -> Result<ServerCertVerified, rustls::Error> {
    1474            0 :         let r =
    1475            0 :             self.0
    1476            0 :                 .verify_server_cert(end_entity, intermediates, server_name, ocsp_response, now);
    1477            0 :         if let Err(err) = r {
    1478            0 :             tracing::info!(
    1479              :                 ?server_name,
    1480            0 :                 "ignoring db connection TLS validation error: {err:?}"
    1481              :             );
    1482            0 :             return Ok(ServerCertVerified::assertion());
    1483            0 :         }
    1484            0 :         r
    1485            0 :     }
    1486            0 :     fn verify_tls12_signature(
    1487            0 :         &self,
    1488            0 :         message: &[u8],
    1489            0 :         cert: &rustls::pki_types::CertificateDer<'_>,
    1490            0 :         dss: &rustls::DigitallySignedStruct,
    1491            0 :     ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
    1492            0 :         self.0.verify_tls12_signature(message, cert, dss)
    1493            0 :     }
    1494            0 :     fn verify_tls13_signature(
    1495            0 :         &self,
    1496            0 :         message: &[u8],
    1497            0 :         cert: &rustls::pki_types::CertificateDer<'_>,
    1498            0 :         dss: &rustls::DigitallySignedStruct,
    1499            0 :     ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
    1500            0 :         self.0.verify_tls13_signature(message, cert, dss)
    1501            0 :     }
    1502            0 :     fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
    1503            0 :         self.0.supported_verify_schemes()
    1504            0 :     }
    1505              : }
    1506              : 
    1507              : /// Loads the root certificates and constructs a client config suitable for connecting.
    1508              : /// This function is blocking.
    1509            0 : fn client_config_with_root_certs() -> anyhow::Result<rustls::ClientConfig> {
    1510            0 :     let client_config =
    1511            0 :         rustls::ClientConfig::builder_with_provider(Arc::new(ring::default_provider()))
    1512            0 :             .with_safe_default_protocol_versions()
    1513            0 :             .expect("ring should support the default protocol versions");
    1514              :     static DO_CERT_CHECKS: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
    1515            0 :     let do_cert_checks =
    1516            0 :         DO_CERT_CHECKS.get_or_init(|| std::env::var("STORCON_DB_CERT_CHECKS").is_ok());
    1517            0 :     Ok(if *do_cert_checks {
    1518            0 :         client_config
    1519            0 :             .with_root_certificates(load_certs()?)
    1520            0 :             .with_no_client_auth()
    1521              :     } else {
    1522            0 :         let verifier = AcceptAll(
    1523              :             WebPkiServerVerifier::builder_with_provider(
    1524            0 :                 load_certs()?,
    1525            0 :                 Arc::new(ring::default_provider()),
    1526            0 :             )
    1527            0 :             .build()?,
    1528              :         );
    1529            0 :         client_config
    1530            0 :             .dangerous()
    1531            0 :             .with_custom_certificate_verifier(Arc::new(verifier))
    1532            0 :             .with_no_client_auth()
    1533              :     })
    1534            0 : }
    1535              : 
    1536            0 : fn establish_connection_rustls(config: &str) -> BoxFuture<ConnectionResult<AsyncPgConnection>> {
    1537            0 :     let fut = async {
    1538              :         // We first set up the way we want rustls to work.
    1539            0 :         let rustls_config = client_config_with_root_certs()
    1540            0 :             .map_err(|err| ConnectionError::BadConnection(format!("{err:?}")))?;
    1541            0 :         let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config);
    1542            0 :         let (client, conn) = tokio_postgres::connect(config, tls)
    1543            0 :             .await
    1544            0 :             .map_err(|e| ConnectionError::BadConnection(e.to_string()))?;
    1545              : 
    1546            0 :         AsyncPgConnection::try_from_client_and_connection(client, conn).await
    1547            0 :     };
    1548            0 :     fut.boxed()
    1549            0 : }
    1550              : 
    1551              : #[cfg_attr(test, test)]
    1552            1 : fn test_config_debug_censors_password() {
    1553            1 :     let has_pw =
    1554            1 :         "host=/var/lib/postgresql,localhost port=1234 user=specialuser password='NOT ALLOWED TAG'";
    1555            1 :     let has_pw_cfg = has_pw.parse::<tokio_postgres::Config>().unwrap();
    1556            1 :     assert!(format!("{has_pw_cfg:?}").contains("specialuser"));
    1557              :     // Ensure that the password is not leaked by the debug impl
    1558            1 :     assert!(!format!("{has_pw_cfg:?}").contains("NOT ALLOWED TAG"));
    1559            1 : }
    1560              : 
    1561            0 : fn log_postgres_connstr_info(config_str: &str) -> anyhow::Result<()> {
    1562            0 :     let config = config_str
    1563            0 :         .parse::<tokio_postgres::Config>()
    1564            0 :         .map_err(|_e| anyhow::anyhow!("Couldn't parse config str"))?;
    1565              :     // We use debug formatting here, and use a unit test to ensure that we don't leak the password.
    1566              :     // To make extra sure the test gets ran, run it every time the function is called
    1567              :     // (this is rather cold code, we can afford it).
    1568              :     #[cfg(not(test))]
    1569            0 :     test_config_debug_censors_password();
    1570            0 :     tracing::info!("database connection config: {config:?}");
    1571            0 :     Ok(())
    1572            0 : }
    1573              : 
    1574              : /// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably
    1575              : #[derive(
    1576            0 :     QueryableByName, Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq,
    1577              : )]
    1578              : #[diesel(table_name = crate::schema::tenant_shards)]
    1579              : pub(crate) struct TenantShardPersistence {
    1580              :     #[serde(default)]
    1581              :     pub(crate) tenant_id: String,
    1582              :     #[serde(default)]
    1583              :     pub(crate) shard_number: i32,
    1584              :     #[serde(default)]
    1585              :     pub(crate) shard_count: i32,
    1586              :     #[serde(default)]
    1587              :     pub(crate) shard_stripe_size: i32,
    1588              : 
    1589              :     // Latest generation number: next time we attach, increment this
    1590              :     // and use the incremented number when attaching.
    1591              :     //
    1592              :     // Generation is only None when first onboarding a tenant, where it may
    1593              :     // be in PlacementPolicy::Secondary and therefore have no valid generation state.
    1594              :     pub(crate) generation: Option<i32>,
    1595              : 
    1596              :     // Currently attached pageserver
    1597              :     #[serde(rename = "pageserver")]
    1598              :     pub(crate) generation_pageserver: Option<i64>,
    1599              : 
    1600              :     #[serde(default)]
    1601              :     pub(crate) placement_policy: String,
    1602              :     #[serde(default)]
    1603              :     pub(crate) splitting: SplitState,
    1604              :     #[serde(default)]
    1605              :     pub(crate) config: String,
    1606              :     #[serde(default)]
    1607              :     pub(crate) scheduling_policy: String,
    1608              : 
    1609              :     // Hint that we should attempt to schedule this tenant shard the given
    1610              :     // availability zone in order to minimise the chances of cross-AZ communication
    1611              :     // with compute.
    1612              :     pub(crate) preferred_az_id: Option<String>,
    1613              : }
    1614              : 
    1615              : impl TenantShardPersistence {
    1616            0 :     fn get_shard_count(&self) -> Result<ShardCount, ShardConfigError> {
    1617            0 :         self.shard_count
    1618            0 :             .try_into()
    1619            0 :             .map(ShardCount)
    1620            0 :             .map_err(|_| ShardConfigError::InvalidCount)
    1621            0 :     }
    1622              : 
    1623            0 :     fn get_shard_number(&self) -> Result<ShardNumber, ShardConfigError> {
    1624            0 :         self.shard_number
    1625            0 :             .try_into()
    1626            0 :             .map(ShardNumber)
    1627            0 :             .map_err(|_| ShardConfigError::InvalidNumber)
    1628            0 :     }
    1629              : 
    1630            0 :     fn get_stripe_size(&self) -> Result<ShardStripeSize, ShardConfigError> {
    1631            0 :         self.shard_stripe_size
    1632            0 :             .try_into()
    1633            0 :             .map(ShardStripeSize)
    1634            0 :             .map_err(|_| ShardConfigError::InvalidStripeSize)
    1635            0 :     }
    1636              : 
    1637            0 :     pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
    1638            0 :         if self.shard_count == 0 {
    1639              :             // NB: carry over the stripe size from the persisted record, to avoid consistency check
    1640              :             // failures if the persisted value differs from the default stripe size. The stripe size
    1641              :             // doesn't really matter for unsharded tenants anyway.
    1642              :             Ok(ShardIdentity::unsharded_with_stripe_size(
    1643            0 :                 self.get_stripe_size()?,
    1644              :             ))
    1645              :         } else {
    1646              :             Ok(ShardIdentity::new(
    1647            0 :                 self.get_shard_number()?,
    1648            0 :                 self.get_shard_count()?,
    1649            0 :                 self.get_stripe_size()?,
    1650            0 :             )?)
    1651              :         }
    1652            0 :     }
    1653              : 
    1654            0 :     pub(crate) fn get_tenant_shard_id(&self) -> anyhow::Result<TenantShardId> {
    1655            0 :         Ok(TenantShardId {
    1656            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    1657            0 :             shard_number: self.get_shard_number()?,
    1658            0 :             shard_count: self.get_shard_count()?,
    1659              :         })
    1660            0 :     }
    1661              : }
    1662              : 
    1663              : /// Parts of [`crate::node::Node`] that are stored durably
    1664            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)]
    1665              : #[diesel(table_name = crate::schema::nodes)]
    1666              : pub(crate) struct NodePersistence {
    1667              :     pub(crate) node_id: i64,
    1668              :     pub(crate) scheduling_policy: String,
    1669              :     pub(crate) listen_http_addr: String,
    1670              :     pub(crate) listen_http_port: i32,
    1671              :     pub(crate) listen_pg_addr: String,
    1672              :     pub(crate) listen_pg_port: i32,
    1673              :     pub(crate) availability_zone_id: String,
    1674              :     pub(crate) listen_https_port: Option<i32>,
    1675              : }
    1676              : 
    1677              : /// Tenant metadata health status that are stored durably.
    1678            0 : #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
    1679              : #[diesel(table_name = crate::schema::metadata_health)]
    1680              : pub(crate) struct MetadataHealthPersistence {
    1681              :     #[serde(default)]
    1682              :     pub(crate) tenant_id: String,
    1683              :     #[serde(default)]
    1684              :     pub(crate) shard_number: i32,
    1685              :     #[serde(default)]
    1686              :     pub(crate) shard_count: i32,
    1687              : 
    1688              :     pub(crate) healthy: bool,
    1689              :     pub(crate) last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    1690              : }
    1691              : 
    1692              : impl MetadataHealthPersistence {
    1693            0 :     pub fn new(
    1694            0 :         tenant_shard_id: TenantShardId,
    1695            0 :         healthy: bool,
    1696            0 :         last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    1697            0 :     ) -> Self {
    1698            0 :         let tenant_id = tenant_shard_id.tenant_id.to_string();
    1699            0 :         let shard_number = tenant_shard_id.shard_number.0 as i32;
    1700            0 :         let shard_count = tenant_shard_id.shard_count.literal() as i32;
    1701            0 : 
    1702            0 :         MetadataHealthPersistence {
    1703            0 :             tenant_id,
    1704            0 :             shard_number,
    1705            0 :             shard_count,
    1706            0 :             healthy,
    1707            0 :             last_scrubbed_at,
    1708            0 :         }
    1709            0 :     }
    1710              : 
    1711              :     #[allow(dead_code)]
    1712            0 :     pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
    1713            0 :         Ok(TenantShardId {
    1714            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    1715            0 :             shard_number: ShardNumber(self.shard_number as u8),
    1716            0 :             shard_count: ShardCount::new(self.shard_count as u8),
    1717              :         })
    1718            0 :     }
    1719              : }
    1720              : 
    1721              : impl From<MetadataHealthPersistence> for MetadataHealthRecord {
    1722            0 :     fn from(value: MetadataHealthPersistence) -> Self {
    1723            0 :         MetadataHealthRecord {
    1724            0 :             tenant_shard_id: value
    1725            0 :                 .get_tenant_shard_id()
    1726            0 :                 .expect("stored tenant id should be valid"),
    1727            0 :             healthy: value.healthy,
    1728            0 :             last_scrubbed_at: value.last_scrubbed_at,
    1729            0 :         }
    1730            0 :     }
    1731              : }
    1732              : 
    1733              : #[derive(
    1734            0 :     Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Debug, Clone,
    1735              : )]
    1736              : #[diesel(table_name = crate::schema::controllers)]
    1737              : pub(crate) struct ControllerPersistence {
    1738              :     pub(crate) address: String,
    1739              :     pub(crate) started_at: chrono::DateTime<chrono::Utc>,
    1740              : }
    1741              : 
    1742              : // What we store in the database
    1743            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Eq, PartialEq, Debug, Clone)]
    1744              : #[diesel(table_name = crate::schema::safekeepers)]
    1745              : pub(crate) struct SafekeeperPersistence {
    1746              :     pub(crate) id: i64,
    1747              :     pub(crate) region_id: String,
    1748              :     /// 1 is special, it means just created (not currently posted to storcon).
    1749              :     /// Zero or negative is not really expected.
    1750              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    1751              :     pub(crate) version: i64,
    1752              :     pub(crate) host: String,
    1753              :     pub(crate) port: i32,
    1754              :     pub(crate) http_port: i32,
    1755              :     pub(crate) availability_zone_id: String,
    1756              :     pub(crate) scheduling_policy: SkSchedulingPolicyFromSql,
    1757              :     pub(crate) https_port: Option<i32>,
    1758              : }
    1759              : 
    1760              : /// Wrapper struct around [`SkSchedulingPolicy`] because both it and [`FromSql`] are from foreign crates,
    1761              : /// and we don't want to make [`safekeeper_api`] depend on [`diesel`].
    1762            0 : #[derive(Serialize, Deserialize, FromSqlRow, Eq, PartialEq, Debug, Copy, Clone)]
    1763              : pub(crate) struct SkSchedulingPolicyFromSql(pub(crate) SkSchedulingPolicy);
    1764              : 
    1765              : impl From<SkSchedulingPolicy> for SkSchedulingPolicyFromSql {
    1766            0 :     fn from(value: SkSchedulingPolicy) -> Self {
    1767            0 :         SkSchedulingPolicyFromSql(value)
    1768            0 :     }
    1769              : }
    1770              : 
    1771              : impl FromSql<diesel::sql_types::VarChar, Pg> for SkSchedulingPolicyFromSql {
    1772            0 :     fn from_sql(
    1773            0 :         bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
    1774            0 :     ) -> diesel::deserialize::Result<Self> {
    1775            0 :         let bytes = bytes.as_bytes();
    1776            0 :         match core::str::from_utf8(bytes) {
    1777            0 :             Ok(s) => match SkSchedulingPolicy::from_str(s) {
    1778            0 :                 Ok(policy) => Ok(SkSchedulingPolicyFromSql(policy)),
    1779            0 :                 Err(e) => Err(format!("can't parse: {e}").into()),
    1780              :             },
    1781            0 :             Err(e) => Err(format!("invalid UTF-8 for scheduling policy: {e}").into()),
    1782              :         }
    1783            0 :     }
    1784              : }
    1785              : 
    1786              : impl SafekeeperPersistence {
    1787            0 :     pub(crate) fn from_upsert(
    1788            0 :         upsert: SafekeeperUpsert,
    1789            0 :         scheduling_policy: SkSchedulingPolicy,
    1790            0 :     ) -> Self {
    1791            0 :         crate::persistence::SafekeeperPersistence {
    1792            0 :             id: upsert.id,
    1793            0 :             region_id: upsert.region_id,
    1794            0 :             version: upsert.version,
    1795            0 :             host: upsert.host,
    1796            0 :             port: upsert.port,
    1797            0 :             http_port: upsert.http_port,
    1798            0 :             https_port: upsert.https_port,
    1799            0 :             availability_zone_id: upsert.availability_zone_id,
    1800            0 :             scheduling_policy: SkSchedulingPolicyFromSql(scheduling_policy),
    1801            0 :         }
    1802            0 :     }
    1803            0 :     pub(crate) fn as_describe_response(&self) -> Result<SafekeeperDescribeResponse, DatabaseError> {
    1804            0 :         Ok(SafekeeperDescribeResponse {
    1805            0 :             id: NodeId(self.id as u64),
    1806            0 :             region_id: self.region_id.clone(),
    1807            0 :             version: self.version,
    1808            0 :             host: self.host.clone(),
    1809            0 :             port: self.port,
    1810            0 :             http_port: self.http_port,
    1811            0 :             https_port: self.https_port,
    1812            0 :             availability_zone_id: self.availability_zone_id.clone(),
    1813            0 :             scheduling_policy: self.scheduling_policy.0,
    1814            0 :         })
    1815            0 :     }
    1816              : }
    1817              : 
    1818              : /// What we expect from the upsert http api
    1819            0 : #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)]
    1820              : pub(crate) struct SafekeeperUpsert {
    1821              :     pub(crate) id: i64,
    1822              :     pub(crate) region_id: String,
    1823              :     /// 1 is special, it means just created (not currently posted to storcon).
    1824              :     /// Zero or negative is not really expected.
    1825              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    1826              :     pub(crate) version: i64,
    1827              :     pub(crate) host: String,
    1828              :     pub(crate) port: i32,
    1829              :     /// The active flag will not be stored in the database and will be ignored.
    1830              :     pub(crate) active: Option<bool>,
    1831              :     pub(crate) http_port: i32,
    1832              :     pub(crate) https_port: Option<i32>,
    1833              :     pub(crate) availability_zone_id: String,
    1834              : }
    1835              : 
    1836              : impl SafekeeperUpsert {
    1837            0 :     fn as_insert_or_update(&self) -> anyhow::Result<InsertUpdateSafekeeper<'_>> {
    1838            0 :         if self.version < 0 {
    1839            0 :             anyhow::bail!("negative version: {}", self.version);
    1840            0 :         }
    1841            0 :         Ok(InsertUpdateSafekeeper {
    1842            0 :             id: self.id,
    1843            0 :             region_id: &self.region_id,
    1844            0 :             version: self.version,
    1845            0 :             host: &self.host,
    1846            0 :             port: self.port,
    1847            0 :             http_port: self.http_port,
    1848            0 :             https_port: self.https_port,
    1849            0 :             availability_zone_id: &self.availability_zone_id,
    1850            0 :             // None means a wish to not update this column. We expose abilities to update it via other means.
    1851            0 :             scheduling_policy: None,
    1852            0 :         })
    1853            0 :     }
    1854              : }
    1855              : 
    1856            0 : #[derive(Insertable, AsChangeset)]
    1857              : #[diesel(table_name = crate::schema::safekeepers)]
    1858              : struct InsertUpdateSafekeeper<'a> {
    1859              :     id: i64,
    1860              :     region_id: &'a str,
    1861              :     version: i64,
    1862              :     host: &'a str,
    1863              :     port: i32,
    1864              :     http_port: i32,
    1865              :     https_port: Option<i32>,
    1866              :     availability_zone_id: &'a str,
    1867              :     scheduling_policy: Option<&'a str>,
    1868              : }
    1869              : 
    1870            0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
    1871              : #[diesel(sql_type = crate::schema::sql_types::PgLsn)]
    1872              : pub(crate) struct LsnWrapper(pub(crate) Lsn);
    1873              : 
    1874              : impl From<Lsn> for LsnWrapper {
    1875            0 :     fn from(value: Lsn) -> Self {
    1876            0 :         LsnWrapper(value)
    1877            0 :     }
    1878              : }
    1879              : 
    1880              : impl FromSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
    1881            0 :     fn from_sql(
    1882            0 :         bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
    1883            0 :     ) -> diesel::deserialize::Result<Self> {
    1884            0 :         let byte_arr: diesel::deserialize::Result<[u8; 8]> = bytes
    1885            0 :             .as_bytes()
    1886            0 :             .try_into()
    1887            0 :             .map_err(|_| "Can't obtain lsn from sql".into());
    1888            0 :         Ok(LsnWrapper(Lsn(u64::from_be_bytes(byte_arr?))))
    1889            0 :     }
    1890              : }
    1891              : 
    1892              : impl ToSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
    1893            0 :     fn to_sql<'b>(
    1894            0 :         &'b self,
    1895            0 :         out: &mut diesel::serialize::Output<'b, '_, Pg>,
    1896            0 :     ) -> diesel::serialize::Result {
    1897            0 :         out.write_all(&u64::to_be_bytes(self.0.0))
    1898            0 :             .map(|_| IsNull::No)
    1899            0 :             .map_err(Into::into)
    1900            0 :     }
    1901              : }
    1902              : 
    1903            0 : #[derive(Insertable, AsChangeset, Queryable, Selectable, Clone)]
    1904              : #[diesel(table_name = crate::schema::timelines)]
    1905              : pub(crate) struct TimelinePersistence {
    1906              :     pub(crate) tenant_id: String,
    1907              :     pub(crate) timeline_id: String,
    1908              :     pub(crate) start_lsn: LsnWrapper,
    1909              :     pub(crate) generation: i32,
    1910              :     pub(crate) sk_set: Vec<i64>,
    1911              :     pub(crate) new_sk_set: Option<Vec<i64>>,
    1912              :     pub(crate) cplane_notified_generation: i32,
    1913              :     pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
    1914              : }
    1915              : 
    1916              : /// This is separate from [TimelinePersistence] only because postgres allows NULLs
    1917              : /// in arrays and there is no way to forbid that at schema level. Hence diesel
    1918              : /// wants `sk_set` to be `Vec<Option<i64>>` instead of `Vec<i64>` for
    1919              : /// Queryable/Selectable. It does however allow insertions without redundant
    1920              : /// Option(s), so [TimelinePersistence] doesn't have them.
    1921            0 : #[derive(Queryable, Selectable)]
    1922              : #[diesel(table_name = crate::schema::timelines)]
    1923              : pub(crate) struct TimelineFromDb {
    1924              :     pub(crate) tenant_id: String,
    1925              :     pub(crate) timeline_id: String,
    1926              :     pub(crate) start_lsn: LsnWrapper,
    1927              :     pub(crate) generation: i32,
    1928              :     pub(crate) sk_set: Vec<Option<i64>>,
    1929              :     pub(crate) new_sk_set: Option<Vec<Option<i64>>>,
    1930              :     pub(crate) cplane_notified_generation: i32,
    1931              :     pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
    1932              : }
    1933              : 
    1934              : impl TimelineFromDb {
    1935            0 :     fn into_persistence(self) -> TimelinePersistence {
    1936            0 :         // We should never encounter null entries in the sets, but we need to filter them out.
    1937            0 :         // There is no way to forbid this in the schema that diesel recognizes (to our knowledge).
    1938            0 :         let sk_set = self.sk_set.into_iter().flatten().collect::<Vec<_>>();
    1939            0 :         let new_sk_set = self
    1940            0 :             .new_sk_set
    1941            0 :             .map(|s| s.into_iter().flatten().collect::<Vec<_>>());
    1942            0 :         TimelinePersistence {
    1943            0 :             tenant_id: self.tenant_id,
    1944            0 :             timeline_id: self.timeline_id,
    1945            0 :             start_lsn: self.start_lsn,
    1946            0 :             generation: self.generation,
    1947            0 :             sk_set,
    1948            0 :             new_sk_set,
    1949            0 :             cplane_notified_generation: self.cplane_notified_generation,
    1950            0 :             deleted_at: self.deleted_at,
    1951            0 :         }
    1952            0 :     }
    1953              : }
    1954              : 
    1955            0 : #[derive(Insertable, AsChangeset, Queryable, Selectable, Clone)]
    1956              : #[diesel(table_name = crate::schema::safekeeper_timeline_pending_ops)]
    1957              : pub(crate) struct TimelinePendingOpPersistence {
    1958              :     pub(crate) sk_id: i64,
    1959              :     pub(crate) tenant_id: String,
    1960              :     pub(crate) timeline_id: String,
    1961              :     pub(crate) generation: i32,
    1962              :     pub(crate) op_kind: SafekeeperTimelineOpKind,
    1963              : }
    1964              : 
    1965            0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
    1966              : #[diesel(sql_type = diesel::sql_types::VarChar)]
    1967              : pub(crate) enum SafekeeperTimelineOpKind {
    1968              :     Pull,
    1969              :     Exclude,
    1970              :     Delete,
    1971              : }
    1972              : 
    1973              : impl FromSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
    1974            0 :     fn from_sql(
    1975            0 :         bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
    1976            0 :     ) -> diesel::deserialize::Result<Self> {
    1977            0 :         let bytes = bytes.as_bytes();
    1978            0 :         match core::str::from_utf8(bytes) {
    1979            0 :             Ok(s) => match s {
    1980            0 :                 "pull" => Ok(SafekeeperTimelineOpKind::Pull),
    1981            0 :                 "exclude" => Ok(SafekeeperTimelineOpKind::Exclude),
    1982            0 :                 "delete" => Ok(SafekeeperTimelineOpKind::Delete),
    1983            0 :                 _ => Err(format!("can't parse: {s}").into()),
    1984              :             },
    1985            0 :             Err(e) => Err(format!("invalid UTF-8 for op_kind: {e}").into()),
    1986              :         }
    1987            0 :     }
    1988              : }
    1989              : 
    1990              : impl ToSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
    1991            0 :     fn to_sql<'b>(
    1992            0 :         &'b self,
    1993            0 :         out: &mut diesel::serialize::Output<'b, '_, Pg>,
    1994            0 :     ) -> diesel::serialize::Result {
    1995            0 :         let kind_str = match self {
    1996            0 :             SafekeeperTimelineOpKind::Pull => "pull",
    1997            0 :             SafekeeperTimelineOpKind::Exclude => "exclude",
    1998            0 :             SafekeeperTimelineOpKind::Delete => "delete",
    1999              :         };
    2000            0 :         out.write_all(kind_str.as_bytes())
    2001            0 :             .map(|_| IsNull::No)
    2002            0 :             .map_err(Into::into)
    2003            0 :     }
    2004              : }
        

Generated by: LCOV version 2.1-beta