LCOV - code coverage report
Current view: top level - storage_controller/src - persistence.rs (source / functions) Coverage Total Hit
Test: 950df2668cb713840d5f6df1a3b961d557e55aff.info Lines: 0.4 % 1826 7
Test Date: 2025-07-30 16:37:54 Functions: 0.2 % 640 1

            Line data    Source code
       1              : pub(crate) mod split_state;
       2              : use std::collections::HashMap;
       3              : use std::io::Write;
       4              : use std::str::FromStr;
       5              : use std::sync::Arc;
       6              : use std::time::{Duration, Instant};
       7              : 
       8              : use diesel::deserialize::{FromSql, FromSqlRow};
       9              : use diesel::expression::AsExpression;
      10              : use diesel::pg::Pg;
      11              : use diesel::prelude::*;
      12              : use diesel::serialize::{IsNull, ToSql};
      13              : use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
      14              : use diesel_async::pooled_connection::bb8::Pool;
      15              : use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig};
      16              : use diesel_async::{AsyncPgConnection, RunQueryDsl};
      17              : use diesel_migrations::{EmbeddedMigrations, embed_migrations};
      18              : use futures::FutureExt;
      19              : use futures::future::BoxFuture;
      20              : use itertools::Itertools;
      21              : use pageserver_api::controller_api::{
      22              :     AvailabilityZone, MetadataHealthRecord, NodeLifecycle, NodeSchedulingPolicy, PlacementPolicy,
      23              :     SCSafekeeperTimelinesResponse, SafekeeperDescribeResponse, ShardSchedulingPolicy,
      24              :     SkSchedulingPolicy,
      25              : };
      26              : use pageserver_api::models::{ShardImportStatus, TenantConfig};
      27              : use pageserver_api::shard::{
      28              :     ShardConfigError, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
      29              : };
      30              : use rustls::client::WebPkiServerVerifier;
      31              : use rustls::client::danger::{ServerCertVerified, ServerCertVerifier};
      32              : use rustls::crypto::ring;
      33              : use safekeeper_api::membership::SafekeeperGeneration;
      34              : use scoped_futures::ScopedBoxFuture;
      35              : use serde::{Deserialize, Serialize};
      36              : use utils::generation::Generation;
      37              : use utils::id::{NodeId, TenantId, TimelineId};
      38              : use utils::lsn::Lsn;
      39              : 
      40              : use self::split_state::SplitState;
      41              : use crate::hadron_queries::HadronSafekeeperRow;
      42              : use crate::hadron_queries::PageserverAndSafekeeperConnectionInfo;
      43              : use crate::hadron_queries::delete_timeline_safekeepers;
      44              : use crate::hadron_queries::execute_safekeeper_list_timelines;
      45              : use crate::hadron_queries::execute_sk_upsert;
      46              : use crate::hadron_queries::get_pageserver_and_safekeeper_connection_info;
      47              : use crate::hadron_queries::idempotently_persist_or_get_existing_timeline_safekeepers;
      48              : use crate::hadron_queries::scan_safekeepers_and_scheduled_timelines;
      49              : use crate::metrics::{
      50              :     DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
      51              : };
      52              : use crate::node::Node;
      53              : use crate::sk_node::SafeKeeperNode;
      54              : use crate::timeline_import::{
      55              :     TimelineImport, TimelineImportUpdateError, TimelineImportUpdateFollowUp,
      56              : };
      57              : const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
      58              : 
      59              : /// ## What do we store?
      60              : ///
      61              : /// The storage controller service does not store most of its state durably.
      62              : ///
      63              : /// The essential things to store durably are:
      64              : /// - generation numbers, as these must always advance monotonically to ensure data safety.
      65              : /// - Tenant's PlacementPolicy and TenantConfig, as the source of truth for these is something external.
      66              : /// - Node's scheduling policies, as the source of truth for these is something external.
      67              : ///
      68              : /// Other things we store durably as an implementation detail:
      69              : /// - Node's host/port: this could be avoided it we made nodes emit a self-registering heartbeat,
      70              : ///   but it is operationally simpler to make this service the authority for which nodes
      71              : ///   it talks to.
      72              : ///
      73              : /// ## Performance/efficiency
      74              : ///
      75              : /// The storage controller service does not go via the database for most things: there are
      76              : /// a couple of places where we must, and where efficiency matters:
      77              : /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
      78              : ///   before it can attach a tenant, so this acts as a bound on how fast things like
      79              : ///   failover can happen.
      80              : /// - Pageserver re-attach: we will increment many shards' generations when this happens,
      81              : ///   so it is important to avoid e.g. issuing O(N) queries.
      82              : ///
      83              : /// Database calls relating to nodes have low performance requirements, as they are very rarely
      84              : /// updated, and reads of nodes are always from memory, not the database.  We only require that
      85              : /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
      86              : pub struct Persistence {
      87              :     connection_pool: Pool<AsyncPgConnection>,
      88              : }
      89              : 
      90              : #[derive(Copy, Clone)]
      91              : pub struct PersistenceConfig {
      92              :     max_connections: u32,
      93              :     idle_connection_timeout: Duration,
      94              :     max_connection_lifetime: Duration,
      95              : }
      96              : 
      97              : impl PersistenceConfig {
      98              :     // If unspecified, use neon.com defaults
      99              :     //
     100              :     // The default postgres connection limit is 100.  We use up to 99, to leave one free for a human admin under
     101              :     // normal circumstances.  This assumes we have exclusive use of the database cluster to which we connect.
     102              :     pub const MAX_CONNECTIONS_DEFAULT: u32 = 99;
     103              :     // We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
     104              :     pub const IDLE_CONNECTION_TIMEOUT_DEFAULT: Duration = Duration::from_secs(10);
     105              :     pub const MAX_CONNECTION_LIFETIME_DEFAULT: Duration = Duration::from_secs(60);
     106              : 
     107            0 :     pub fn new(
     108            0 :         max_connections: Option<u32>,
     109            0 :         idle_connection_timeout: Option<Duration>,
     110            0 :         max_connection_lifetime: Option<Duration>,
     111            0 :     ) -> Self {
     112            0 :         PersistenceConfig {
     113            0 :             max_connections: max_connections.unwrap_or(Self::MAX_CONNECTIONS_DEFAULT),
     114            0 :             idle_connection_timeout: idle_connection_timeout
     115            0 :                 .unwrap_or(Self::IDLE_CONNECTION_TIMEOUT_DEFAULT),
     116            0 :             max_connection_lifetime: max_connection_lifetime
     117            0 :                 .unwrap_or(Self::MAX_CONNECTION_LIFETIME_DEFAULT),
     118            0 :         }
     119            0 :     }
     120              : }
     121              : 
     122              : /// Legacy format, for use in JSON compat objects in test environment
     123            0 : #[derive(Serialize, Deserialize)]
     124              : struct JsonPersistence {
     125              :     tenants: HashMap<TenantShardId, TenantShardPersistence>,
     126              : }
     127              : 
     128              : #[derive(thiserror::Error, Debug)]
     129              : pub(crate) enum DatabaseError {
     130              :     #[error(transparent)]
     131              :     Query(#[from] diesel::result::Error),
     132              :     #[error(transparent)]
     133              :     Connection(#[from] diesel::result::ConnectionError),
     134              :     #[error(transparent)]
     135              :     ConnectionPool(#[from] diesel_async::pooled_connection::bb8::RunError),
     136              :     #[error("Logical error: {0}")]
     137              :     Logical(String),
     138              :     #[error("Migration error: {0}")]
     139              :     Migration(String),
     140              :     #[error("CAS error: {0}")]
     141              :     Cas(String),
     142              : }
     143              : 
     144              : #[derive(measured::FixedCardinalityLabel, Copy, Clone)]
     145              : pub(crate) enum DatabaseOperation {
     146              :     InsertNode,
     147              :     UpdateNode,
     148              :     DeleteNode,
     149              :     ListNodes,
     150              :     ListTombstones,
     151              :     BeginShardSplit,
     152              :     CompleteShardSplit,
     153              :     AbortShardSplit,
     154              :     Detach,
     155              :     ReAttach,
     156              :     IncrementGeneration,
     157              :     TenantGenerations,
     158              :     ShardGenerations,
     159              :     ListTenantShards,
     160              :     LoadTenant,
     161              :     InsertTenantShards,
     162              :     UpdateTenantShard,
     163              :     DeleteTenant,
     164              :     UpdateTenantConfig,
     165              :     UpdateMetadataHealth,
     166              :     ListMetadataHealth,
     167              :     ListMetadataHealthUnhealthy,
     168              :     ListMetadataHealthOutdated,
     169              :     ListSafekeepers,
     170              :     GetLeader,
     171              :     UpdateLeader,
     172              :     SetPreferredAzs,
     173              :     InsertTimeline,
     174              :     UpdateTimeline,
     175              :     UpdateTimelineMembership,
     176              :     UpdateCplaneNotifiedGeneration,
     177              :     UpdateSkSetNotifiedGeneration,
     178              :     GetTimeline,
     179              :     InsertTimelineReconcile,
     180              :     RemoveTimelineReconcile,
     181              :     ListTimelineReconcile,
     182              :     ListTimelineReconcileStartup,
     183              :     InsertTimelineImport,
     184              :     UpdateTimelineImport,
     185              :     DeleteTimelineImport,
     186              :     ListTimelineImports,
     187              :     IsTenantImportingTimeline,
     188              :     // Brickstore Hadron
     189              :     UpsertSafeKeeperNode,
     190              :     LoadSafeKeepersAndEndpoints,
     191              :     EnsureHadronEndpointTransaction,
     192              :     DeleteHadronEndpoint,
     193              :     GetHadronEndpointInfo,
     194              :     FetchComputeSpec,
     195              :     GetTenandIdByEndpointId,
     196              :     GetTenantShardsByEndpointId,
     197              :     GetComputeNamesByTenantId,
     198              :     GetOrCreateHadronTimelineSafekeeper,
     199              :     FetchPageServerAndSafeKeeperConnections,
     200              :     DeleteHadronTimeline,
     201              :     ListSafekeeperTimelines,
     202              : }
     203              : 
     204              : #[must_use]
     205              : pub(crate) enum AbortShardSplitStatus {
     206              :     /// We aborted the split in the database by reverting to the parent shards
     207              :     Aborted,
     208              :     /// The split had already been persisted.
     209              :     Complete,
     210              : }
     211              : 
     212              : pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
     213              : 
     214              : /// Some methods can operate on either a whole tenant or a single shard
     215              : #[derive(Clone)]
     216              : pub(crate) enum TenantFilter {
     217              :     Tenant(TenantId),
     218              :     Shard(TenantShardId),
     219              : }
     220              : 
     221              : /// Represents the results of looking up generation+pageserver for the shards of a tenant
     222              : pub(crate) struct ShardGenerationState {
     223              :     pub(crate) tenant_shard_id: TenantShardId,
     224              :     pub(crate) generation: Option<Generation>,
     225              :     pub(crate) generation_pageserver: Option<NodeId>,
     226              : }
     227              : 
     228              : // A generous allowance for how many times we may retry serializable transactions
     229              : // before giving up.  This is not expected to be hit: it is a defensive measure in case we
     230              : // somehow engineer a situation where duelling transactions might otherwise live-lock.
     231              : const MAX_RETRIES: usize = 128;
     232              : 
     233              : impl Persistence {
     234              :     // The default postgres connection limit is 100.  We use up to 99, to leave one free for a human admin under
     235              :     // normal circumstances.  This assumes we have exclusive use of the database cluster to which we connect.
     236              :     pub const MAX_CONNECTIONS: u32 = 99;
     237              : 
     238            0 :     pub async fn new(database_url: String, config: PersistenceConfig) -> Self {
     239            0 :         let mut mgr_config = ManagerConfig::default();
     240            0 :         mgr_config.custom_setup = Box::new(establish_connection_rustls);
     241              : 
     242            0 :         let manager = AsyncDieselConnectionManager::<AsyncPgConnection>::new_with_config(
     243            0 :             database_url,
     244            0 :             mgr_config,
     245              :         );
     246              : 
     247              :         // We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
     248              :         // to execute queries (database queries are not generally on latency-sensitive paths).
     249            0 :         let connection_pool = Pool::builder()
     250            0 :             .max_size(config.max_connections)
     251            0 :             .max_lifetime(Some(config.max_connection_lifetime))
     252            0 :             .idle_timeout(Some(config.idle_connection_timeout))
     253            0 :             // Always keep at least one connection ready to go
     254            0 :             .min_idle(Some(1))
     255            0 :             .test_on_check_out(true)
     256            0 :             .build(manager)
     257            0 :             .await
     258            0 :             .expect("Could not build connection pool");
     259              : 
     260            0 :         Self { connection_pool }
     261            0 :     }
     262              : 
     263              :     /// A helper for use during startup, where we would like to tolerate concurrent restarts of the
     264              :     /// database and the storage controller, therefore the database might not be available right away
     265            0 :     pub async fn await_connection(
     266            0 :         database_url: &str,
     267            0 :         timeout: Duration,
     268            0 :     ) -> Result<(), diesel::ConnectionError> {
     269            0 :         let started_at = Instant::now();
     270            0 :         log_postgres_connstr_info(database_url)
     271            0 :             .map_err(|e| diesel::ConnectionError::InvalidConnectionUrl(e.to_string()))?;
     272              :         loop {
     273            0 :             match establish_connection_rustls(database_url).await {
     274              :                 Ok(_) => {
     275            0 :                     tracing::info!("Connected to database.");
     276            0 :                     return Ok(());
     277              :                 }
     278            0 :                 Err(e) => {
     279            0 :                     if started_at.elapsed() > timeout {
     280            0 :                         return Err(e);
     281              :                     } else {
     282            0 :                         tracing::info!("Database not yet available, waiting... ({e})");
     283            0 :                         tokio::time::sleep(Duration::from_millis(100)).await;
     284              :                     }
     285              :                 }
     286              :             }
     287              :         }
     288            0 :     }
     289              : 
     290              :     /// Execute the diesel migrations that are built into this binary
     291            0 :     pub(crate) async fn migration_run(&self) -> DatabaseResult<()> {
     292              :         use diesel_migrations::{HarnessWithOutput, MigrationHarness};
     293              : 
     294              :         // Can't use self.with_conn here as we do spawn_blocking which requires static.
     295            0 :         let conn = self
     296            0 :             .connection_pool
     297            0 :             .dedicated_connection()
     298            0 :             .await
     299            0 :             .map_err(|e| DatabaseError::Migration(e.to_string()))?;
     300            0 :         let mut async_wrapper: AsyncConnectionWrapper<AsyncPgConnection> =
     301            0 :             AsyncConnectionWrapper::from(conn);
     302            0 :         tokio::task::spawn_blocking(move || {
     303            0 :             let mut retry_count = 0;
     304              :             loop {
     305            0 :                 let result = HarnessWithOutput::write_to_stdout(&mut async_wrapper)
     306            0 :                     .run_pending_migrations(MIGRATIONS)
     307            0 :                     .map(|_| ())
     308            0 :                     .map_err(|e| DatabaseError::Migration(e.to_string()));
     309            0 :                 match result {
     310            0 :                     Ok(r) => break Ok(r),
     311              :                     Err(
     312            0 :                         err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
     313              :                             diesel::result::DatabaseErrorKind::SerializationFailure,
     314              :                             _,
     315              :                         )),
     316              :                     ) => {
     317            0 :                         retry_count += 1;
     318            0 :                         if retry_count > MAX_RETRIES {
     319            0 :                             tracing::error!(
     320            0 :                                 "Exceeded max retries on SerializationFailure errors: {err:?}"
     321              :                             );
     322            0 :                             break Err(err);
     323              :                         } else {
     324              :                             // Retry on serialization errors: these are expected, because even though our
     325              :                             // transactions don't fight for the same rows, they will occasionally collide
     326              :                             // on index pages (e.g. increment_generation for unrelated shards can collide)
     327            0 :                             tracing::debug!(
     328            0 :                                 "Retrying transaction on serialization failure {err:?}"
     329              :                             );
     330            0 :                             continue;
     331              :                         }
     332              :                     }
     333            0 :                     Err(e) => break Err(e),
     334              :                 }
     335              :             }
     336            0 :         })
     337            0 :         .await
     338            0 :         .map_err(|e| DatabaseError::Migration(e.to_string()))??;
     339            0 :         Ok(())
     340            0 :     }
     341              : 
     342              :     /// Wraps `with_conn` in order to collect latency and error metrics
     343            0 :     async fn with_measured_conn<'a, 'b, F, R>(
     344            0 :         &self,
     345            0 :         op: DatabaseOperation,
     346            0 :         func: F,
     347            0 :     ) -> DatabaseResult<R>
     348            0 :     where
     349            0 :         F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
     350            0 :             + Send
     351            0 :             + std::marker::Sync
     352            0 :             + 'a,
     353            0 :         R: Send + 'b,
     354            0 :     {
     355            0 :         let latency = &METRICS_REGISTRY
     356            0 :             .metrics_group
     357            0 :             .storage_controller_database_query_latency;
     358            0 :         let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op });
     359              : 
     360            0 :         let res = self.with_conn(func).await;
     361              : 
     362            0 :         if let Err(err) = &res {
     363            0 :             let error_counter = &METRICS_REGISTRY
     364            0 :                 .metrics_group
     365            0 :                 .storage_controller_database_query_error;
     366            0 :             error_counter.inc(DatabaseQueryErrorLabelGroup {
     367            0 :                 error_type: err.error_label(),
     368            0 :                 operation: op,
     369            0 :             })
     370            0 :         }
     371              : 
     372            0 :         res
     373            0 :     }
     374              : 
     375              :     /// Call the provided function with a Diesel database connection in a retry loop
     376            0 :     async fn with_conn<'a, 'b, F, R>(&self, func: F) -> DatabaseResult<R>
     377            0 :     where
     378            0 :         F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
     379            0 :             + Send
     380            0 :             + std::marker::Sync
     381            0 :             + 'a,
     382            0 :         R: Send + 'b,
     383            0 :     {
     384            0 :         let mut retry_count = 0;
     385              :         loop {
     386            0 :             let mut conn = self.connection_pool.get().await?;
     387            0 :             match conn
     388            0 :                 .build_transaction()
     389            0 :                 .serializable()
     390            0 :                 .run(|c| func(c))
     391            0 :                 .await
     392              :             {
     393            0 :                 Ok(r) => break Ok(r),
     394              :                 Err(
     395            0 :                     err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
     396              :                         diesel::result::DatabaseErrorKind::SerializationFailure,
     397              :                         _,
     398              :                     )),
     399              :                 ) => {
     400            0 :                     retry_count += 1;
     401            0 :                     if retry_count > MAX_RETRIES {
     402            0 :                         tracing::error!(
     403            0 :                             "Exceeded max retries on SerializationFailure errors: {err:?}"
     404              :                         );
     405            0 :                         break Err(err);
     406              :                     } else {
     407              :                         // Retry on serialization errors: these are expected, because even though our
     408              :                         // transactions don't fight for the same rows, they will occasionally collide
     409              :                         // on index pages (e.g. increment_generation for unrelated shards can collide)
     410            0 :                         tracing::debug!("Retrying transaction on serialization failure {err:?}");
     411            0 :                         continue;
     412              :                     }
     413              :                 }
     414            0 :                 Err(e) => break Err(e),
     415              :             }
     416              :         }
     417            0 :     }
     418              : 
     419              :     /// When a node is first registered, persist it before using it for anything
     420              :     /// If the provided node_id already exists, it will be error.
     421              :     /// The common case is when a node marked for deletion wants to register.
     422            0 :     pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
     423            0 :         let np = &node.to_persistent();
     424            0 :         self.with_measured_conn(DatabaseOperation::InsertNode, move |conn| {
     425            0 :             Box::pin(async move {
     426            0 :                 diesel::insert_into(crate::schema::nodes::table)
     427            0 :                     .values(np)
     428            0 :                     .execute(conn)
     429            0 :                     .await?;
     430            0 :                 Ok(())
     431            0 :             })
     432            0 :         })
     433            0 :         .await
     434            0 :     }
     435              : 
     436              :     /// At startup, populate the list of nodes which our shards may be placed on
     437            0 :     pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> {
     438              :         use crate::schema::nodes::dsl::*;
     439              : 
     440            0 :         let result: Vec<NodePersistence> = self
     441            0 :             .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
     442            0 :                 Box::pin(async move {
     443            0 :                     Ok(crate::schema::nodes::table
     444            0 :                         .filter(lifecycle.ne(String::from(NodeLifecycle::Deleted)))
     445            0 :                         .load::<NodePersistence>(conn)
     446            0 :                         .await?)
     447            0 :                 })
     448            0 :             })
     449            0 :             .await?;
     450              : 
     451            0 :         tracing::info!("list_nodes: loaded {} nodes", result.len());
     452              : 
     453            0 :         Ok(result)
     454            0 :     }
     455              : 
     456            0 :     pub(crate) async fn list_tombstones(&self) -> DatabaseResult<Vec<NodePersistence>> {
     457              :         use crate::schema::nodes::dsl::*;
     458              : 
     459            0 :         let result: Vec<NodePersistence> = self
     460            0 :             .with_measured_conn(DatabaseOperation::ListTombstones, move |conn| {
     461            0 :                 Box::pin(async move {
     462            0 :                     Ok(crate::schema::nodes::table
     463            0 :                         .filter(lifecycle.eq(String::from(NodeLifecycle::Deleted)))
     464            0 :                         .load::<NodePersistence>(conn)
     465            0 :                         .await?)
     466            0 :                 })
     467            0 :             })
     468            0 :             .await?;
     469              : 
     470            0 :         tracing::info!("list_tombstones: loaded {} nodes", result.len());
     471              : 
     472            0 :         Ok(result)
     473            0 :     }
     474              : 
     475            0 :     pub(crate) async fn update_node<V>(
     476            0 :         &self,
     477            0 :         input_node_id: NodeId,
     478            0 :         values: V,
     479            0 :     ) -> DatabaseResult<()>
     480            0 :     where
     481            0 :         V: diesel::AsChangeset<Target = crate::schema::nodes::table> + Clone + Send + Sync,
     482            0 :         V::Changeset: diesel::query_builder::QueryFragment<diesel::pg::Pg> + Send, // valid Postgres SQL
     483            0 :     {
     484              :         use crate::schema::nodes::dsl::*;
     485            0 :         let updated = self
     486            0 :             .with_measured_conn(DatabaseOperation::UpdateNode, move |conn| {
     487            0 :                 let values = values.clone();
     488            0 :                 Box::pin(async move {
     489            0 :                     let updated = diesel::update(nodes)
     490            0 :                         .filter(node_id.eq(input_node_id.0 as i64))
     491            0 :                         .filter(lifecycle.ne(String::from(NodeLifecycle::Deleted)))
     492            0 :                         .set(values)
     493            0 :                         .execute(conn)
     494            0 :                         .await?;
     495            0 :                     Ok(updated)
     496            0 :                 })
     497            0 :             })
     498            0 :             .await?;
     499              : 
     500            0 :         if updated != 1 {
     501            0 :             Err(DatabaseError::Logical(format!(
     502            0 :                 "Node {node_id:?} not found for update",
     503            0 :             )))
     504              :         } else {
     505            0 :             Ok(())
     506              :         }
     507            0 :     }
     508              : 
     509            0 :     pub(crate) async fn update_node_scheduling_policy(
     510            0 :         &self,
     511            0 :         input_node_id: NodeId,
     512            0 :         input_scheduling: NodeSchedulingPolicy,
     513            0 :     ) -> DatabaseResult<()> {
     514              :         use crate::schema::nodes::dsl::*;
     515            0 :         self.update_node(
     516            0 :             input_node_id,
     517            0 :             scheduling_policy.eq(String::from(input_scheduling)),
     518            0 :         )
     519            0 :         .await
     520            0 :     }
     521              : 
     522            0 :     pub(crate) async fn update_node_on_registration(
     523            0 :         &self,
     524            0 :         input_node_id: NodeId,
     525            0 :         input_https_port: Option<u16>,
     526            0 :     ) -> DatabaseResult<()> {
     527              :         use crate::schema::nodes::dsl::*;
     528            0 :         self.update_node(
     529            0 :             input_node_id,
     530            0 :             listen_https_port.eq(input_https_port.map(|x| x as i32)),
     531              :         )
     532            0 :         .await
     533            0 :     }
     534              : 
     535              :     /// Tombstone is a special state where the node is not deleted from the database,
     536              :     /// but it is not available for usage.
     537              :     /// The main reason for it is to prevent the flaky node to register.
     538            0 :     pub(crate) async fn set_tombstone(&self, del_node_id: NodeId) -> DatabaseResult<()> {
     539              :         use crate::schema::nodes::dsl::*;
     540            0 :         self.update_node(
     541            0 :             del_node_id,
     542            0 :             lifecycle.eq(String::from(NodeLifecycle::Deleted)),
     543            0 :         )
     544            0 :         .await
     545            0 :     }
     546              : 
     547            0 :     pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
     548              :         use crate::schema::nodes::dsl::*;
     549            0 :         self.with_measured_conn(DatabaseOperation::DeleteNode, move |conn| {
     550            0 :             Box::pin(async move {
     551              :                 // You can hard delete a node only if it has a tombstone.
     552              :                 // So we need to check if the node has lifecycle set to deleted.
     553            0 :                 let node_to_delete = nodes
     554            0 :                     .filter(node_id.eq(del_node_id.0 as i64))
     555            0 :                     .first::<NodePersistence>(conn)
     556            0 :                     .await
     557            0 :                     .optional()?;
     558              : 
     559            0 :                 if let Some(np) = node_to_delete {
     560            0 :                     let lc = NodeLifecycle::from_str(&np.lifecycle).map_err(|e| {
     561            0 :                         DatabaseError::Logical(format!(
     562            0 :                             "Node {del_node_id} has invalid lifecycle: {e}"
     563            0 :                         ))
     564            0 :                     })?;
     565              : 
     566            0 :                     if lc != NodeLifecycle::Deleted {
     567            0 :                         return Err(DatabaseError::Logical(format!(
     568            0 :                             "Node {del_node_id} was not soft deleted before, cannot hard delete it"
     569            0 :                         )));
     570            0 :                     }
     571              : 
     572            0 :                     diesel::delete(nodes)
     573            0 :                         .filter(node_id.eq(del_node_id.0 as i64))
     574            0 :                         .execute(conn)
     575            0 :                         .await?;
     576            0 :                 }
     577              : 
     578            0 :                 Ok(())
     579            0 :             })
     580            0 :         })
     581            0 :         .await
     582            0 :     }
     583              : 
     584              :     /// At startup, load the high level state for shards, such as their config + policy.  This will
     585              :     /// be enriched at runtime with state discovered on pageservers.
     586              :     ///
     587              :     /// We exclude shards configured to be detached.  During startup, if we see any attached locations
     588              :     /// for such shards, they will automatically be detached as 'orphans'.
     589            0 :     pub(crate) async fn load_active_tenant_shards(
     590            0 :         &self,
     591            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     592              :         use crate::schema::tenant_shards::dsl::*;
     593            0 :         self.with_measured_conn(DatabaseOperation::ListTenantShards, move |conn| {
     594            0 :             Box::pin(async move {
     595            0 :                 let query = tenant_shards.filter(
     596            0 :                     placement_policy.ne(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
     597              :                 );
     598            0 :                 let result = query.load::<TenantShardPersistence>(conn).await?;
     599              : 
     600            0 :                 Ok(result)
     601            0 :             })
     602            0 :         })
     603            0 :         .await
     604            0 :     }
     605              : 
     606              :     /// When restoring a previously detached tenant into memory, load it from the database
     607            0 :     pub(crate) async fn load_tenant(
     608            0 :         &self,
     609            0 :         filter_tenant_id: TenantId,
     610            0 :     ) -> DatabaseResult<Vec<TenantShardPersistence>> {
     611              :         use crate::schema::tenant_shards::dsl::*;
     612            0 :         self.with_measured_conn(DatabaseOperation::LoadTenant, move |conn| {
     613            0 :             Box::pin(async move {
     614            0 :                 let query = tenant_shards.filter(tenant_id.eq(filter_tenant_id.to_string()));
     615            0 :                 let result = query.load::<TenantShardPersistence>(conn).await?;
     616              : 
     617            0 :                 Ok(result)
     618            0 :             })
     619            0 :         })
     620            0 :         .await
     621            0 :     }
     622              : 
     623              :     /// Tenants must be persisted before we schedule them for the first time.  This enables us
     624              :     /// to correctly retain generation monotonicity, and the externally provided placement policy & config.
     625            0 :     pub(crate) async fn insert_tenant_shards(
     626            0 :         &self,
     627            0 :         shards: Vec<TenantShardPersistence>,
     628            0 :     ) -> DatabaseResult<()> {
     629              :         use crate::schema::{metadata_health, tenant_shards};
     630              : 
     631            0 :         let now = chrono::Utc::now();
     632              : 
     633            0 :         let metadata_health_records = shards
     634            0 :             .iter()
     635            0 :             .map(|t| MetadataHealthPersistence {
     636            0 :                 tenant_id: t.tenant_id.clone(),
     637            0 :                 shard_number: t.shard_number,
     638            0 :                 shard_count: t.shard_count,
     639              :                 healthy: true,
     640            0 :                 last_scrubbed_at: now,
     641            0 :             })
     642            0 :             .collect::<Vec<_>>();
     643              : 
     644            0 :         let shards = &shards;
     645            0 :         let metadata_health_records = &metadata_health_records;
     646            0 :         self.with_measured_conn(DatabaseOperation::InsertTenantShards, move |conn| {
     647            0 :             Box::pin(async move {
     648            0 :                 diesel::insert_into(tenant_shards::table)
     649            0 :                     .values(shards)
     650            0 :                     .execute(conn)
     651            0 :                     .await?;
     652              : 
     653            0 :                 diesel::insert_into(metadata_health::table)
     654            0 :                     .values(metadata_health_records)
     655            0 :                     .execute(conn)
     656            0 :                     .await?;
     657            0 :                 Ok(())
     658            0 :             })
     659            0 :         })
     660            0 :         .await
     661            0 :     }
     662              : 
     663              :     /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
     664              :     /// the tenant from memory on this server.
     665            0 :     pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
     666              :         use crate::schema::tenant_shards::dsl::*;
     667            0 :         self.with_measured_conn(DatabaseOperation::DeleteTenant, move |conn| {
     668            0 :             Box::pin(async move {
     669              :                 // `metadata_health` status (if exists) is also deleted based on the cascade behavior.
     670            0 :                 diesel::delete(tenant_shards)
     671            0 :                     .filter(tenant_id.eq(del_tenant_id.to_string()))
     672            0 :                     .execute(conn)
     673            0 :                     .await?;
     674            0 :                 Ok(())
     675            0 :             })
     676            0 :         })
     677            0 :         .await
     678            0 :     }
     679              : 
     680              :     /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient
     681              :     /// batched increment of the generations of all tenants whose generation_pageserver is equal to
     682              :     /// the node that called /re-attach.
     683              :     #[tracing::instrument(skip_all, fields(node_id))]
     684              :     pub(crate) async fn re_attach(
     685              :         &self,
     686              :         input_node_id: NodeId,
     687              :     ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
     688              :         use crate::schema::nodes::dsl::{scheduling_policy, *};
     689              :         use crate::schema::tenant_shards::dsl::*;
     690              :         let updated = self
     691            0 :             .with_measured_conn(DatabaseOperation::ReAttach, move |conn| {
     692            0 :                 Box::pin(async move {
     693            0 :                     let node: Option<NodePersistence> = nodes
     694            0 :                         .filter(node_id.eq(input_node_id.0 as i64))
     695            0 :                         .first::<NodePersistence>(conn)
     696            0 :                         .await
     697            0 :                         .optional()?;
     698              : 
     699              :                     // Check if the node is not marked as deleted
     700            0 :                     match node {
     701            0 :                         Some(node) if matches!(NodeLifecycle::from_str(&node.lifecycle), Ok(NodeLifecycle::Deleted)) => {
     702            0 :                             return Err(DatabaseError::Logical(format!(
     703            0 :                                 "Node {input_node_id} is marked as deleted, re-attach is not allowed"
     704            0 :                             )));
     705              :                         }
     706            0 :                         _ => {
     707            0 :                             // go through
     708            0 :                         }
     709              :                     };
     710              : 
     711            0 :                     let rows_updated = diesel::update(tenant_shards)
     712            0 :                         .filter(generation_pageserver.eq(input_node_id.0 as i64))
     713            0 :                         .set(generation.eq(generation + 1))
     714            0 :                         .execute(conn)
     715            0 :                         .await?;
     716              : 
     717            0 :                     tracing::info!("Incremented {} tenants' generations", rows_updated);
     718              : 
     719              :                     // TODO: UPDATE+SELECT in one query
     720              : 
     721            0 :                     let updated = tenant_shards
     722            0 :                         .filter(generation_pageserver.eq(input_node_id.0 as i64))
     723            0 :                         .select(TenantShardPersistence::as_select())
     724            0 :                         .load(conn)
     725            0 :                         .await?;
     726              : 
     727            0 :                     if let Some(node) = node {
     728            0 :                         let old_scheduling_policy =
     729            0 :                             NodeSchedulingPolicy::from_str(&node.scheduling_policy).unwrap();
     730            0 :                         let new_scheduling_policy = match old_scheduling_policy {
     731            0 :                             NodeSchedulingPolicy::Active => NodeSchedulingPolicy::Active,
     732            0 :                             NodeSchedulingPolicy::PauseForRestart => NodeSchedulingPolicy::Active,
     733            0 :                             NodeSchedulingPolicy::Draining => NodeSchedulingPolicy::Active,
     734            0 :                             NodeSchedulingPolicy::Filling => NodeSchedulingPolicy::Active,
     735            0 :                             NodeSchedulingPolicy::Pause => NodeSchedulingPolicy::Pause,
     736            0 :                             NodeSchedulingPolicy::Deleting => NodeSchedulingPolicy::Pause,
     737              :                         };
     738            0 :                         diesel::update(nodes)
     739            0 :                             .filter(node_id.eq(input_node_id.0 as i64))
     740            0 :                             .set(scheduling_policy.eq(String::from(new_scheduling_policy)))
     741            0 :                             .execute(conn)
     742            0 :                             .await?;
     743            0 :                     }
     744              : 
     745            0 :                     Ok(updated)
     746            0 :                 })
     747            0 :             })
     748              :             .await?;
     749              : 
     750              :         let mut result = HashMap::new();
     751              :         for tsp in updated {
     752              :             let tenant_shard_id = TenantShardId {
     753              :                 tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
     754            0 :                     .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
     755              :                 shard_number: ShardNumber(tsp.shard_number as u8),
     756              :                 shard_count: ShardCount::new(tsp.shard_count as u8),
     757              :             };
     758              : 
     759              :             let Some(g) = tsp.generation else {
     760              :                 // If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
     761              :                 // we only set generation_pageserver when setting generation.
     762              :                 return Err(DatabaseError::Logical(
     763              :                     "Generation should always be set after incrementing".to_string(),
     764              :                 ));
     765              :             };
     766              :             result.insert(tenant_shard_id, Generation::new(g as u32));
     767              :         }
     768              : 
     769              :         Ok(result)
     770              :     }
     771              : 
     772              :     /// Reconciler calls this immediately before attaching to a new pageserver, to acquire a unique, monotonically
     773              :     /// advancing generation number.  We also store the NodeId for which the generation was issued, so that in
     774              :     /// [`Self::re_attach`] we can do a bulk UPDATE on the generations for that node.
     775            0 :     pub(crate) async fn increment_generation(
     776            0 :         &self,
     777            0 :         tenant_shard_id: TenantShardId,
     778            0 :         node_id: NodeId,
     779            0 :     ) -> anyhow::Result<Generation> {
     780              :         use crate::schema::tenant_shards::dsl::*;
     781            0 :         let updated = self
     782            0 :             .with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| {
     783            0 :                 Box::pin(async move {
     784            0 :                     let updated = diesel::update(tenant_shards)
     785            0 :                         .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     786            0 :                         .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     787            0 :                         .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     788            0 :                         .set((
     789            0 :                             generation.eq(generation + 1),
     790            0 :                             generation_pageserver.eq(node_id.0 as i64),
     791            0 :                         ))
     792            0 :                         // TODO: only returning() the generation column
     793            0 :                         .returning(TenantShardPersistence::as_returning())
     794            0 :                         .get_result(conn)
     795            0 :                         .await?;
     796              : 
     797            0 :                     Ok(updated)
     798            0 :                 })
     799            0 :             })
     800            0 :             .await?;
     801              : 
     802              :         // Generation is always non-null in the rseult: if the generation column had been NULL, then we
     803              :         // should have experienced an SQL Confilict error while executing a query that tries to increment it.
     804            0 :         debug_assert!(updated.generation.is_some());
     805            0 :         let Some(g) = updated.generation else {
     806            0 :             return Err(DatabaseError::Logical(
     807            0 :                 "Generation should always be set after incrementing".to_string(),
     808            0 :             )
     809            0 :             .into());
     810              :         };
     811              : 
     812            0 :         Ok(Generation::new(g as u32))
     813            0 :     }
     814              : 
     815              :     /// When we want to call out to the running shards for a tenant, e.g. during timeline CRUD operations,
     816              :     /// we need to know where the shard is attached, _and_ the generation, so that we can re-check the generation
     817              :     /// afterwards to confirm that our timeline CRUD operation is truly persistent (it must have happened in the
     818              :     /// latest generation)
     819              :     ///
     820              :     /// If the tenant doesn't exist, an empty vector is returned.
     821              :     ///
     822              :     /// Output is sorted by shard number
     823            0 :     pub(crate) async fn tenant_generations(
     824            0 :         &self,
     825            0 :         filter_tenant_id: TenantId,
     826            0 :     ) -> Result<Vec<ShardGenerationState>, DatabaseError> {
     827              :         use crate::schema::tenant_shards::dsl::*;
     828            0 :         let rows = self
     829            0 :             .with_measured_conn(DatabaseOperation::TenantGenerations, move |conn| {
     830            0 :                 Box::pin(async move {
     831            0 :                     let result = tenant_shards
     832            0 :                         .filter(tenant_id.eq(filter_tenant_id.to_string()))
     833            0 :                         .select(TenantShardPersistence::as_select())
     834            0 :                         .order(shard_number)
     835            0 :                         .load(conn)
     836            0 :                         .await?;
     837            0 :                     Ok(result)
     838            0 :                 })
     839            0 :             })
     840            0 :             .await?;
     841              : 
     842            0 :         Ok(rows
     843            0 :             .into_iter()
     844            0 :             .map(|p| ShardGenerationState {
     845            0 :                 tenant_shard_id: p
     846            0 :                     .get_tenant_shard_id()
     847            0 :                     .expect("Corrupt tenant shard id in database"),
     848            0 :                 generation: p.generation.map(|g| Generation::new(g as u32)),
     849            0 :                 generation_pageserver: p.generation_pageserver.map(|n| NodeId(n as u64)),
     850            0 :             })
     851            0 :             .collect())
     852            0 :     }
     853              : 
     854              :     /// Read the generation number of specific tenant shards
     855              :     ///
     856              :     /// Output is unsorted.  Output may not include values for all inputs, if they are missing in the database.
     857            0 :     pub(crate) async fn shard_generations(
     858            0 :         &self,
     859            0 :         mut tenant_shard_ids: impl Iterator<Item = &TenantShardId>,
     860            0 :     ) -> Result<Vec<(TenantShardId, Option<Generation>)>, DatabaseError> {
     861            0 :         let mut rows = Vec::with_capacity(tenant_shard_ids.size_hint().0);
     862              : 
     863              :         // We will chunk our input to avoid composing arbitrarily long `IN` clauses.  Typically we are
     864              :         // called with a single digit number of IDs, but in principle we could be called with tens
     865              :         // of thousands (all the shards on one pageserver) from the generation validation API.
     866              :         loop {
     867              :             // A modest hardcoded chunk size to handle typical cases in a single query but never generate particularly
     868              :             // large query strings.
     869            0 :             let chunk_ids = tenant_shard_ids.by_ref().take(32);
     870              : 
     871              :             // Compose a comma separated list of tuples for matching on (tenant_id, shard_number, shard_count)
     872            0 :             let in_clause = chunk_ids
     873            0 :                 .map(|tsid| {
     874            0 :                     format!(
     875            0 :                         "('{}', {}, {})",
     876              :                         tsid.tenant_id, tsid.shard_number.0, tsid.shard_count.0
     877              :                     )
     878            0 :                 })
     879            0 :                 .join(",");
     880              : 
     881              :             // We are done when our iterator gives us nothing to filter on
     882            0 :             if in_clause.is_empty() {
     883            0 :                 break;
     884            0 :             }
     885              : 
     886            0 :             let in_clause = &in_clause;
     887            0 :             let chunk_rows = self
     888            0 :                 .with_measured_conn(DatabaseOperation::ShardGenerations, move |conn| {
     889            0 :                     Box::pin(async move {
     890              :                         // diesel doesn't support multi-column IN queries, so we compose raw SQL.  No escaping is required because
     891              :                         // the inputs are strongly typed and cannot carry any user-supplied raw string content.
     892            0 :                         let result : Vec<TenantShardPersistence> = diesel::sql_query(
     893            0 :                             format!("SELECT * from tenant_shards where (tenant_id, shard_number, shard_count) in ({in_clause});").as_str()
     894            0 :                         ).load(conn).await?;
     895              : 
     896            0 :                         Ok(result)
     897            0 :                     })
     898            0 :                 })
     899            0 :                 .await?;
     900            0 :             rows.extend(chunk_rows.into_iter())
     901              :         }
     902              : 
     903            0 :         Ok(rows
     904            0 :             .into_iter()
     905            0 :             .map(|tsp| {
     906              :                 (
     907            0 :                     tsp.get_tenant_shard_id()
     908            0 :                         .expect("Bad tenant ID in database"),
     909            0 :                     tsp.generation.map(|g| Generation::new(g as u32)),
     910              :                 )
     911            0 :             })
     912            0 :             .collect())
     913            0 :     }
     914              : 
     915              :     #[allow(non_local_definitions)]
     916              :     /// For use when updating a persistent property of a tenant, such as its config or placement_policy.
     917              :     ///
     918              :     /// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
     919              :     /// API: use [`Self::increment_generation`] instead.  Setting the generation via this route is a one-time thing
     920              :     /// that we only do the first time a tenant is set to an attached policy via /location_config.
     921            0 :     pub(crate) async fn update_tenant_shard(
     922            0 :         &self,
     923            0 :         tenant: TenantFilter,
     924            0 :         input_placement_policy: Option<PlacementPolicy>,
     925            0 :         input_config: Option<TenantConfig>,
     926            0 :         input_generation: Option<Generation>,
     927            0 :         input_scheduling_policy: Option<ShardSchedulingPolicy>,
     928            0 :     ) -> DatabaseResult<()> {
     929              :         use crate::schema::tenant_shards::dsl::*;
     930              : 
     931            0 :         let tenant = &tenant;
     932            0 :         let input_placement_policy = &input_placement_policy;
     933            0 :         let input_config = &input_config;
     934            0 :         let input_generation = &input_generation;
     935            0 :         let input_scheduling_policy = &input_scheduling_policy;
     936            0 :         self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
     937            0 :             Box::pin(async move {
     938            0 :                 let query = match tenant {
     939            0 :                     TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards)
     940            0 :                         .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
     941            0 :                         .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
     942            0 :                         .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
     943            0 :                         .into_boxed(),
     944            0 :                     TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
     945            0 :                         .filter(tenant_id.eq(input_tenant_id.to_string()))
     946            0 :                         .into_boxed(),
     947              :                 };
     948              : 
     949              :                 // Clear generation_pageserver if we are moving into a state where we won't have
     950              :                 // any attached pageservers.
     951            0 :                 let input_generation_pageserver = match input_placement_policy {
     952            0 :                     None | Some(PlacementPolicy::Attached(_)) => None,
     953            0 :                     Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) => Some(None),
     954              :                 };
     955              : 
     956              :                 #[derive(AsChangeset)]
     957              :                 #[diesel(table_name = crate::schema::tenant_shards)]
     958              :                 struct ShardUpdate {
     959              :                     generation: Option<i32>,
     960              :                     placement_policy: Option<String>,
     961              :                     config: Option<String>,
     962              :                     scheduling_policy: Option<String>,
     963              :                     generation_pageserver: Option<Option<i64>>,
     964              :                 }
     965              : 
     966            0 :                 let update = ShardUpdate {
     967            0 :                     generation: input_generation.map(|g| g.into().unwrap() as i32),
     968            0 :                     placement_policy: input_placement_policy
     969            0 :                         .as_ref()
     970            0 :                         .map(|p| serde_json::to_string(&p).unwrap()),
     971            0 :                     config: input_config
     972            0 :                         .as_ref()
     973            0 :                         .map(|c| serde_json::to_string(&c).unwrap()),
     974            0 :                     scheduling_policy: input_scheduling_policy
     975            0 :                         .map(|p| serde_json::to_string(&p).unwrap()),
     976            0 :                     generation_pageserver: input_generation_pageserver,
     977              :                 };
     978              : 
     979            0 :                 query.set(update).execute(conn).await?;
     980              : 
     981            0 :                 Ok(())
     982            0 :             })
     983            0 :         })
     984            0 :         .await?;
     985              : 
     986            0 :         Ok(())
     987            0 :     }
     988              : 
     989              :     /// Note that passing None for a shard clears the preferred AZ (rather than leaving it unmodified)
     990            0 :     pub(crate) async fn set_tenant_shard_preferred_azs(
     991            0 :         &self,
     992            0 :         preferred_azs: Vec<(TenantShardId, Option<AvailabilityZone>)>,
     993            0 :     ) -> DatabaseResult<Vec<(TenantShardId, Option<AvailabilityZone>)>> {
     994              :         use crate::schema::tenant_shards::dsl::*;
     995              : 
     996            0 :         let preferred_azs = preferred_azs.as_slice();
     997            0 :         self.with_measured_conn(DatabaseOperation::SetPreferredAzs, move |conn| {
     998            0 :             Box::pin(async move {
     999            0 :                 let mut shards_updated = Vec::default();
    1000              : 
    1001            0 :                 for (tenant_shard_id, preferred_az) in preferred_azs.iter() {
    1002            0 :                     let updated = diesel::update(tenant_shards)
    1003            0 :                         .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
    1004            0 :                         .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
    1005            0 :                         .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
    1006            0 :                         .set(preferred_az_id.eq(preferred_az.as_ref().map(|az| az.0.clone())))
    1007            0 :                         .execute(conn)
    1008            0 :                         .await?;
    1009              : 
    1010            0 :                     if updated == 1 {
    1011            0 :                         shards_updated.push((*tenant_shard_id, preferred_az.clone()));
    1012            0 :                     }
    1013              :                 }
    1014              : 
    1015            0 :                 Ok(shards_updated)
    1016            0 :             })
    1017            0 :         })
    1018            0 :         .await
    1019            0 :     }
    1020              : 
    1021            0 :     pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
    1022              :         use crate::schema::tenant_shards::dsl::*;
    1023            0 :         self.with_measured_conn(DatabaseOperation::Detach, move |conn| {
    1024            0 :             Box::pin(async move {
    1025            0 :                 let updated = diesel::update(tenant_shards)
    1026            0 :                     .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
    1027            0 :                     .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
    1028            0 :                     .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
    1029            0 :                     .set((
    1030            0 :                         generation_pageserver.eq(Option::<i64>::None),
    1031            0 :                         placement_policy
    1032            0 :                             .eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
    1033            0 :                     ))
    1034            0 :                     .execute(conn)
    1035            0 :                     .await?;
    1036              : 
    1037            0 :                 Ok(updated)
    1038            0 :             })
    1039            0 :         })
    1040            0 :         .await?;
    1041              : 
    1042            0 :         Ok(())
    1043            0 :     }
    1044              : 
    1045              :     // When we start shard splitting, we must durably mark the tenant so that
    1046              :     // on restart, we know that we must go through recovery.
    1047              :     //
    1048              :     // We create the child shards here, so that they will be available for increment_generation calls
    1049              :     // if some pageserver holding a child shard needs to restart before the overall tenant split is complete.
    1050            0 :     pub(crate) async fn begin_shard_split(
    1051            0 :         &self,
    1052            0 :         old_shard_count: ShardCount,
    1053            0 :         split_tenant_id: TenantId,
    1054            0 :         parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
    1055            0 :     ) -> DatabaseResult<()> {
    1056              :         use crate::schema::tenant_shards::dsl::*;
    1057            0 :         let parent_to_children = parent_to_children.as_slice();
    1058            0 :         self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| {
    1059            0 :             Box::pin(async move {
    1060              :             // Mark parent shards as splitting
    1061              : 
    1062            0 :             let updated = diesel::update(tenant_shards)
    1063            0 :                 .filter(tenant_id.eq(split_tenant_id.to_string()))
    1064            0 :                 .filter(shard_count.eq(old_shard_count.literal() as i32))
    1065            0 :                 .set((splitting.eq(1),))
    1066            0 :                 .execute(conn).await?;
    1067            0 :             if u8::try_from(updated)
    1068            0 :                 .map_err(|_| DatabaseError::Logical(
    1069            0 :                     format!("Overflow existing shard count {updated} while splitting"))
    1070            0 :                 )? != old_shard_count.count() {
    1071              :                 // Perhaps a deletion or another split raced with this attempt to split, mutating
    1072              :                 // the parent shards that we intend to split. In this case the split request should fail.
    1073            0 :                 return Err(DatabaseError::Logical(
    1074            0 :                     format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
    1075            0 :                 ));
    1076            0 :             }
    1077              : 
    1078              :             // FIXME: spurious clone to sidestep closure move rules
    1079            0 :             let parent_to_children = parent_to_children.to_vec();
    1080              : 
    1081              :             // Insert child shards
    1082            0 :             for (parent_shard_id, children) in parent_to_children {
    1083            0 :                 let mut parent = crate::schema::tenant_shards::table
    1084            0 :                     .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
    1085            0 :                     .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
    1086            0 :                     .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
    1087            0 :                     .load::<TenantShardPersistence>(conn).await?;
    1088            0 :                 let parent = if parent.len() != 1 {
    1089            0 :                     return Err(DatabaseError::Logical(format!(
    1090            0 :                         "Parent shard {parent_shard_id} not found"
    1091            0 :                     )));
    1092              :                 } else {
    1093            0 :                     parent.pop().unwrap()
    1094              :                 };
    1095            0 :                 for mut shard in children {
    1096              :                     // Carry the parent's generation into the child
    1097            0 :                     shard.generation = parent.generation;
    1098              : 
    1099            0 :                     debug_assert!(shard.splitting == SplitState::Splitting);
    1100            0 :                     diesel::insert_into(tenant_shards)
    1101            0 :                         .values(shard)
    1102            0 :                         .execute(conn).await?;
    1103              :                 }
    1104              :             }
    1105              : 
    1106            0 :             Ok(())
    1107            0 :         })
    1108            0 :         })
    1109            0 :         .await
    1110            0 :     }
    1111              : 
    1112              :     // When we finish shard splitting, we must atomically clean up the old shards
    1113              :     // and insert the new shards, and clear the splitting marker.
    1114            0 :     pub(crate) async fn complete_shard_split(
    1115            0 :         &self,
    1116            0 :         split_tenant_id: TenantId,
    1117            0 :         old_shard_count: ShardCount,
    1118            0 :         new_shard_count: ShardCount,
    1119            0 :     ) -> DatabaseResult<()> {
    1120              :         use crate::schema::tenant_shards::dsl::*;
    1121            0 :         self.with_measured_conn(DatabaseOperation::CompleteShardSplit, move |conn| {
    1122            0 :             Box::pin(async move {
    1123              :                 // Sanity: child shards must still exist, as we're deleting parent shards
    1124            0 :                 let child_shards_query = tenant_shards
    1125            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
    1126            0 :                     .filter(shard_count.eq(new_shard_count.literal() as i32));
    1127            0 :                 let child_shards = child_shards_query
    1128            0 :                     .load::<TenantShardPersistence>(conn)
    1129            0 :                     .await?;
    1130            0 :                 if child_shards.len() != new_shard_count.count() as usize {
    1131            0 :                     return Err(DatabaseError::Logical(format!(
    1132            0 :                         "Unexpected child shard count {} while completing split to \
    1133            0 :                             count {new_shard_count:?} on tenant {split_tenant_id}",
    1134            0 :                         child_shards.len()
    1135            0 :                     )));
    1136            0 :                 }
    1137              : 
    1138              :                 // Drop parent shards
    1139            0 :                 diesel::delete(tenant_shards)
    1140            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
    1141            0 :                     .filter(shard_count.eq(old_shard_count.literal() as i32))
    1142            0 :                     .execute(conn)
    1143            0 :                     .await?;
    1144              : 
    1145              :                 // Clear sharding flag
    1146            0 :                 let updated = diesel::update(tenant_shards)
    1147            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
    1148            0 :                     .filter(shard_count.eq(new_shard_count.literal() as i32))
    1149            0 :                     .set((splitting.eq(0),))
    1150            0 :                     .execute(conn)
    1151            0 :                     .await?;
    1152            0 :                 assert!(updated == new_shard_count.count() as usize);
    1153              : 
    1154            0 :                 Ok(())
    1155            0 :             })
    1156            0 :         })
    1157            0 :         .await
    1158            0 :     }
    1159              : 
    1160              :     /// Used when the remote part of a shard split failed: we will revert the database state to have only
    1161              :     /// the parent shards, with SplitState::Idle.
    1162            0 :     pub(crate) async fn abort_shard_split(
    1163            0 :         &self,
    1164            0 :         split_tenant_id: TenantId,
    1165            0 :         new_shard_count: ShardCount,
    1166            0 :     ) -> DatabaseResult<AbortShardSplitStatus> {
    1167              :         use crate::schema::tenant_shards::dsl::*;
    1168            0 :         self.with_measured_conn(DatabaseOperation::AbortShardSplit, move |conn| {
    1169            0 :             Box::pin(async move {
    1170              :                 // Clear the splitting state on parent shards
    1171            0 :                 let updated = diesel::update(tenant_shards)
    1172            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
    1173            0 :                     .filter(shard_count.ne(new_shard_count.literal() as i32))
    1174            0 :                     .set((splitting.eq(0),))
    1175            0 :                     .execute(conn)
    1176            0 :                     .await?;
    1177              : 
    1178              :                 // Parent shards are already gone: we cannot abort.
    1179            0 :                 if updated == 0 {
    1180            0 :                     return Ok(AbortShardSplitStatus::Complete);
    1181            0 :                 }
    1182              : 
    1183              :                 // Sanity check: if parent shards were present, their cardinality should
    1184              :                 // be less than the number of child shards.
    1185            0 :                 if updated >= new_shard_count.count() as usize {
    1186            0 :                     return Err(DatabaseError::Logical(format!(
    1187            0 :                         "Unexpected parent shard count {updated} while aborting split to \
    1188            0 :                             count {new_shard_count:?} on tenant {split_tenant_id}"
    1189            0 :                     )));
    1190            0 :                 }
    1191              : 
    1192              :                 // Erase child shards
    1193            0 :                 diesel::delete(tenant_shards)
    1194            0 :                     .filter(tenant_id.eq(split_tenant_id.to_string()))
    1195            0 :                     .filter(shard_count.eq(new_shard_count.literal() as i32))
    1196            0 :                     .execute(conn)
    1197            0 :                     .await?;
    1198              : 
    1199            0 :                 Ok(AbortShardSplitStatus::Aborted)
    1200            0 :             })
    1201            0 :         })
    1202            0 :         .await
    1203            0 :     }
    1204              : 
    1205              :     /// Stores all the latest metadata health updates durably. Updates existing entry on conflict.
    1206              :     ///
    1207              :     /// **Correctness:** `metadata_health_updates` should all belong the tenant shards managed by the storage controller.
    1208              :     #[allow(dead_code)]
    1209            0 :     pub(crate) async fn update_metadata_health_records(
    1210            0 :         &self,
    1211            0 :         healthy_records: Vec<MetadataHealthPersistence>,
    1212            0 :         unhealthy_records: Vec<MetadataHealthPersistence>,
    1213            0 :         now: chrono::DateTime<chrono::Utc>,
    1214            0 :     ) -> DatabaseResult<()> {
    1215              :         use crate::schema::metadata_health::dsl::*;
    1216              : 
    1217            0 :         let healthy_records = healthy_records.as_slice();
    1218            0 :         let unhealthy_records = unhealthy_records.as_slice();
    1219            0 :         self.with_measured_conn(DatabaseOperation::UpdateMetadataHealth, move |conn| {
    1220            0 :             Box::pin(async move {
    1221            0 :                 diesel::insert_into(metadata_health)
    1222            0 :                     .values(healthy_records)
    1223            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
    1224            0 :                     .do_update()
    1225            0 :                     .set((healthy.eq(true), last_scrubbed_at.eq(now)))
    1226            0 :                     .execute(conn)
    1227            0 :                     .await?;
    1228              : 
    1229            0 :                 diesel::insert_into(metadata_health)
    1230            0 :                     .values(unhealthy_records)
    1231            0 :                     .on_conflict((tenant_id, shard_number, shard_count))
    1232            0 :                     .do_update()
    1233            0 :                     .set((healthy.eq(false), last_scrubbed_at.eq(now)))
    1234            0 :                     .execute(conn)
    1235            0 :                     .await?;
    1236            0 :                 Ok(())
    1237            0 :             })
    1238            0 :         })
    1239            0 :         .await
    1240            0 :     }
    1241              : 
    1242              :     /// Lists all the metadata health records.
    1243              :     #[allow(dead_code)]
    1244            0 :     pub(crate) async fn list_metadata_health_records(
    1245            0 :         &self,
    1246            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
    1247            0 :         self.with_measured_conn(DatabaseOperation::ListMetadataHealth, move |conn| {
    1248            0 :             Box::pin(async {
    1249            0 :                 Ok(crate::schema::metadata_health::table
    1250            0 :                     .load::<MetadataHealthPersistence>(conn)
    1251            0 :                     .await?)
    1252            0 :             })
    1253            0 :         })
    1254            0 :         .await
    1255            0 :     }
    1256              : 
    1257              :     /// Lists all the metadata health records that is unhealthy.
    1258              :     #[allow(dead_code)]
    1259            0 :     pub(crate) async fn list_unhealthy_metadata_health_records(
    1260            0 :         &self,
    1261            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
    1262              :         use crate::schema::metadata_health::dsl::*;
    1263            0 :         self.with_measured_conn(
    1264            0 :             DatabaseOperation::ListMetadataHealthUnhealthy,
    1265            0 :             move |conn| {
    1266            0 :                 Box::pin(async {
    1267              :                     DatabaseResult::Ok(
    1268            0 :                         crate::schema::metadata_health::table
    1269            0 :                             .filter(healthy.eq(false))
    1270            0 :                             .load::<MetadataHealthPersistence>(conn)
    1271            0 :                             .await?,
    1272              :                     )
    1273            0 :                 })
    1274            0 :             },
    1275              :         )
    1276            0 :         .await
    1277            0 :     }
    1278              : 
    1279              :     /// Lists all the metadata health records that have not been updated since an `earlier` time.
    1280              :     #[allow(dead_code)]
    1281            0 :     pub(crate) async fn list_outdated_metadata_health_records(
    1282            0 :         &self,
    1283            0 :         earlier: chrono::DateTime<chrono::Utc>,
    1284            0 :     ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
    1285              :         use crate::schema::metadata_health::dsl::*;
    1286              : 
    1287            0 :         self.with_measured_conn(DatabaseOperation::ListMetadataHealthOutdated, move |conn| {
    1288            0 :             Box::pin(async move {
    1289            0 :                 let query = metadata_health.filter(last_scrubbed_at.lt(earlier));
    1290            0 :                 let res = query.load::<MetadataHealthPersistence>(conn).await?;
    1291              : 
    1292            0 :                 Ok(res)
    1293            0 :             })
    1294            0 :         })
    1295            0 :         .await
    1296            0 :     }
    1297              : 
    1298              :     /// Get the current entry from the `leader` table if one exists.
    1299              :     /// It is an error for the table to contain more than one entry.
    1300            0 :     pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
    1301            0 :         let mut leader: Vec<ControllerPersistence> = self
    1302            0 :             .with_measured_conn(DatabaseOperation::GetLeader, move |conn| {
    1303            0 :                 Box::pin(async move {
    1304            0 :                     Ok(crate::schema::controllers::table
    1305            0 :                         .load::<ControllerPersistence>(conn)
    1306            0 :                         .await?)
    1307            0 :                 })
    1308            0 :             })
    1309            0 :             .await?;
    1310              : 
    1311            0 :         if leader.len() > 1 {
    1312            0 :             return Err(DatabaseError::Logical(format!(
    1313            0 :                 "More than one entry present in the leader table: {leader:?}"
    1314            0 :             )));
    1315            0 :         }
    1316              : 
    1317            0 :         Ok(leader.pop())
    1318            0 :     }
    1319              : 
    1320              :     /// Update the new leader with compare-exchange semantics. If `prev` does not
    1321              :     /// match the current leader entry, then the update is treated as a failure.
    1322              :     /// When `prev` is not specified, the update is forced.
    1323            0 :     pub(crate) async fn update_leader(
    1324            0 :         &self,
    1325            0 :         prev: Option<ControllerPersistence>,
    1326            0 :         new: ControllerPersistence,
    1327            0 :     ) -> DatabaseResult<()> {
    1328              :         use crate::schema::controllers::dsl::*;
    1329              : 
    1330            0 :         let updated = self
    1331            0 :             .with_measured_conn(DatabaseOperation::UpdateLeader, move |conn| {
    1332            0 :                 let prev = prev.clone();
    1333            0 :                 let new = new.clone();
    1334            0 :                 Box::pin(async move {
    1335            0 :                     let updated = match &prev {
    1336            0 :                         Some(prev) => {
    1337            0 :                             diesel::update(controllers)
    1338            0 :                                 .filter(address.eq(prev.address.clone()))
    1339            0 :                                 .filter(started_at.eq(prev.started_at))
    1340            0 :                                 .set((
    1341            0 :                                     address.eq(new.address.clone()),
    1342            0 :                                     started_at.eq(new.started_at),
    1343            0 :                                 ))
    1344            0 :                                 .execute(conn)
    1345            0 :                                 .await?
    1346              :                         }
    1347              :                         None => {
    1348            0 :                             diesel::insert_into(controllers)
    1349            0 :                                 .values(new.clone())
    1350            0 :                                 .execute(conn)
    1351            0 :                                 .await?
    1352              :                         }
    1353              :                     };
    1354              : 
    1355            0 :                     Ok(updated)
    1356            0 :                 })
    1357            0 :             })
    1358            0 :             .await?;
    1359              : 
    1360            0 :         if updated == 0 {
    1361            0 :             return Err(DatabaseError::Logical(
    1362            0 :                 "Leader table update failed".to_string(),
    1363            0 :             ));
    1364            0 :         }
    1365              : 
    1366            0 :         Ok(())
    1367            0 :     }
    1368              : 
    1369              :     /// At startup, populate the list of nodes which our shards may be placed on
    1370            0 :     pub(crate) async fn list_safekeepers(&self) -> DatabaseResult<Vec<SafekeeperPersistence>> {
    1371            0 :         let safekeepers: Vec<SafekeeperPersistence> = self
    1372            0 :             .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
    1373            0 :                 Box::pin(async move {
    1374            0 :                     Ok(crate::schema::safekeepers::table
    1375            0 :                         .load::<SafekeeperPersistence>(conn)
    1376            0 :                         .await?)
    1377            0 :                 })
    1378            0 :             })
    1379            0 :             .await?;
    1380              : 
    1381            0 :         tracing::info!("list_safekeepers: loaded {} nodes", safekeepers.len());
    1382              : 
    1383            0 :         Ok(safekeepers)
    1384            0 :     }
    1385              : 
    1386            0 :     pub(crate) async fn safekeeper_upsert(
    1387            0 :         &self,
    1388            0 :         record: SafekeeperUpsert,
    1389            0 :     ) -> Result<(), DatabaseError> {
    1390              :         use crate::schema::safekeepers::dsl::*;
    1391              : 
    1392            0 :         self.with_conn(move |conn| {
    1393            0 :             let record = record.clone();
    1394            0 :             Box::pin(async move {
    1395            0 :                 let bind = record
    1396            0 :                     .as_insert_or_update()
    1397            0 :                     .map_err(|e| DatabaseError::Logical(format!("{e}")))?;
    1398              : 
    1399            0 :                 let inserted_updated = diesel::insert_into(safekeepers)
    1400            0 :                     .values(&bind)
    1401            0 :                     .on_conflict(id)
    1402            0 :                     .do_update()
    1403            0 :                     .set(&bind)
    1404            0 :                     .execute(conn)
    1405            0 :                     .await?;
    1406              : 
    1407            0 :                 if inserted_updated != 1 {
    1408            0 :                     return Err(DatabaseError::Logical(format!(
    1409            0 :                         "unexpected number of rows ({inserted_updated})"
    1410            0 :                     )));
    1411            0 :                 }
    1412              : 
    1413            0 :                 Ok(())
    1414            0 :             })
    1415            0 :         })
    1416            0 :         .await
    1417            0 :     }
    1418              : 
    1419            0 :     pub(crate) async fn set_safekeeper_scheduling_policy(
    1420            0 :         &self,
    1421            0 :         id_: i64,
    1422            0 :         scheduling_policy_: SkSchedulingPolicy,
    1423            0 :     ) -> Result<(), DatabaseError> {
    1424              :         use crate::schema::safekeepers::dsl::*;
    1425              : 
    1426            0 :         self.with_conn(move |conn| {
    1427            0 :             Box::pin(async move {
    1428              :                 #[derive(Insertable, AsChangeset)]
    1429              :                 #[diesel(table_name = crate::schema::safekeepers)]
    1430              :                 struct UpdateSkSchedulingPolicy<'a> {
    1431              :                     id: i64,
    1432              :                     scheduling_policy: &'a str,
    1433              :                 }
    1434            0 :                 let scheduling_policy_ = String::from(scheduling_policy_);
    1435              : 
    1436            0 :                 let rows_affected = diesel::update(safekeepers.filter(id.eq(id_)))
    1437            0 :                     .set(scheduling_policy.eq(scheduling_policy_))
    1438            0 :                     .execute(conn)
    1439            0 :                     .await?;
    1440              : 
    1441            0 :                 if rows_affected != 1 {
    1442            0 :                     return Err(DatabaseError::Logical(format!(
    1443            0 :                         "unexpected number of rows ({rows_affected})",
    1444            0 :                     )));
    1445            0 :                 }
    1446              : 
    1447            0 :                 Ok(())
    1448            0 :             })
    1449            0 :         })
    1450            0 :         .await
    1451            0 :     }
    1452              : 
    1453              :     /// Activate the given safekeeper, ensuring that there is no TOCTOU.
    1454              :     /// Returns `Some` if the safekeeper has indeed been activating (or already active). Other states return `None`.
    1455            0 :     pub(crate) async fn activate_safekeeper(&self, id_: i64) -> Result<Option<()>, DatabaseError> {
    1456              :         use crate::schema::safekeepers::dsl::*;
    1457              : 
    1458            0 :         self.with_conn(move |conn| {
    1459            0 :             Box::pin(async move {
    1460              :                 #[derive(Insertable, AsChangeset)]
    1461              :                 #[diesel(table_name = crate::schema::safekeepers)]
    1462              :                 struct UpdateSkSchedulingPolicy<'a> {
    1463              :                     id: i64,
    1464              :                     scheduling_policy: &'a str,
    1465              :                 }
    1466            0 :                 let scheduling_policy_active = String::from(SkSchedulingPolicy::Active);
    1467            0 :                 let scheduling_policy_activating = String::from(SkSchedulingPolicy::Activating);
    1468              : 
    1469            0 :                 let rows_affected = diesel::update(
    1470            0 :                     safekeepers.filter(id.eq(id_)).filter(
    1471            0 :                         scheduling_policy
    1472            0 :                             .eq(scheduling_policy_activating)
    1473            0 :                             .or(scheduling_policy.eq(&scheduling_policy_active)),
    1474            0 :                     ),
    1475            0 :                 )
    1476            0 :                 .set(scheduling_policy.eq(&scheduling_policy_active))
    1477            0 :                 .execute(conn)
    1478            0 :                 .await?;
    1479              : 
    1480            0 :                 if rows_affected == 0 {
    1481            0 :                     return Ok(Some(()));
    1482            0 :                 }
    1483            0 :                 if rows_affected != 1 {
    1484            0 :                     return Err(DatabaseError::Logical(format!(
    1485            0 :                         "unexpected number of rows ({rows_affected})",
    1486            0 :                     )));
    1487            0 :                 }
    1488              : 
    1489            0 :                 Ok(Some(()))
    1490            0 :             })
    1491            0 :         })
    1492            0 :         .await
    1493            0 :     }
    1494              : 
    1495              :     /// Persist timeline. Returns if the timeline was newly inserted. If it wasn't, we haven't done any writes.
    1496            0 :     pub(crate) async fn insert_timeline(&self, entry: TimelinePersistence) -> DatabaseResult<bool> {
    1497              :         use crate::schema::timelines;
    1498              : 
    1499            0 :         let entry = &entry;
    1500            0 :         self.with_measured_conn(DatabaseOperation::InsertTimeline, move |conn| {
    1501            0 :             Box::pin(async move {
    1502            0 :                 let inserted_updated = diesel::insert_into(timelines::table)
    1503            0 :                     .values(entry)
    1504            0 :                     .on_conflict((timelines::tenant_id, timelines::timeline_id))
    1505            0 :                     .do_nothing()
    1506            0 :                     .execute(conn)
    1507            0 :                     .await?;
    1508              : 
    1509            0 :                 match inserted_updated {
    1510            0 :                     0 => Ok(false),
    1511            0 :                     1 => Ok(true),
    1512            0 :                     _ => Err(DatabaseError::Logical(format!(
    1513            0 :                         "unexpected number of rows ({inserted_updated})"
    1514            0 :                     ))),
    1515              :                 }
    1516            0 :             })
    1517            0 :         })
    1518            0 :         .await
    1519            0 :     }
    1520              : 
    1521              :     /// Update an already present timeline.
    1522              :     /// VERY UNSAFE FUNCTION: this overrides in-progress migrations. Don't use this unless neccessary.
    1523            0 :     pub(crate) async fn update_timeline_unsafe(
    1524            0 :         &self,
    1525            0 :         entry: TimelineUpdate,
    1526            0 :     ) -> DatabaseResult<bool> {
    1527              :         use crate::schema::timelines;
    1528              : 
    1529            0 :         let entry = &entry;
    1530            0 :         self.with_measured_conn(DatabaseOperation::UpdateTimeline, move |conn| {
    1531            0 :             Box::pin(async move {
    1532            0 :                 let inserted_updated = diesel::update(timelines::table)
    1533            0 :                     .filter(timelines::tenant_id.eq(&entry.tenant_id))
    1534            0 :                     .filter(timelines::timeline_id.eq(&entry.timeline_id))
    1535            0 :                     .set(entry)
    1536            0 :                     .execute(conn)
    1537            0 :                     .await?;
    1538              : 
    1539            0 :                 match inserted_updated {
    1540            0 :                     0 => Ok(false),
    1541            0 :                     1 => Ok(true),
    1542            0 :                     _ => Err(DatabaseError::Logical(format!(
    1543            0 :                         "unexpected number of rows ({inserted_updated})"
    1544            0 :                     ))),
    1545              :                 }
    1546            0 :             })
    1547            0 :         })
    1548            0 :         .await
    1549            0 :     }
    1550              : 
    1551              :     /// Update timeline membership configuration in the database.
    1552              :     /// Perform a compare-and-swap (CAS) operation on the timeline's generation.
    1553              :     /// The `new_generation` must be the next (+1) generation after the one in the database.
    1554              :     /// Also inserts reconcile_requests to safekeeper_timeline_pending_ops table in the same
    1555              :     /// transaction.
    1556            0 :     pub(crate) async fn update_timeline_membership(
    1557            0 :         &self,
    1558            0 :         tenant_id: TenantId,
    1559            0 :         timeline_id: TimelineId,
    1560            0 :         new_generation: SafekeeperGeneration,
    1561            0 :         sk_set: &[NodeId],
    1562            0 :         new_sk_set: Option<&[NodeId]>,
    1563            0 :         reconcile_requests: &[TimelinePendingOpPersistence],
    1564            0 :     ) -> DatabaseResult<()> {
    1565              :         use crate::schema::safekeeper_timeline_pending_ops as stpo;
    1566              :         use crate::schema::timelines;
    1567              :         use diesel::query_dsl::methods::FilterDsl;
    1568              : 
    1569            0 :         let prev_generation = new_generation.previous().unwrap();
    1570              : 
    1571            0 :         let tenant_id = &tenant_id;
    1572            0 :         let timeline_id = &timeline_id;
    1573            0 :         self.with_measured_conn(DatabaseOperation::UpdateTimelineMembership, move |conn| {
    1574            0 :             Box::pin(async move {
    1575            0 :                 let updated = diesel::update(timelines::table)
    1576            0 :                     .filter(timelines::tenant_id.eq(&tenant_id.to_string()))
    1577            0 :                     .filter(timelines::timeline_id.eq(&timeline_id.to_string()))
    1578            0 :                     .filter(timelines::generation.eq(prev_generation.into_inner() as i32))
    1579            0 :                     .set((
    1580            0 :                         timelines::generation.eq(new_generation.into_inner() as i32),
    1581            0 :                         timelines::sk_set
    1582            0 :                             .eq(sk_set.iter().map(|id| id.0 as i64).collect::<Vec<_>>()),
    1583            0 :                         timelines::new_sk_set.eq(new_sk_set
    1584            0 :                             .map(|set| set.iter().map(|id| id.0 as i64).collect::<Vec<_>>())),
    1585              :                     ))
    1586            0 :                     .execute(conn)
    1587            0 :                     .await?;
    1588              : 
    1589            0 :                 match updated {
    1590              :                     0 => {
    1591              :                         // TODO(diko): It makes sense to select the current generation
    1592              :                         // and include it in the error message for better debuggability.
    1593            0 :                         return Err(DatabaseError::Cas(
    1594            0 :                             "Failed to update membership configuration".to_string(),
    1595            0 :                         ));
    1596              :                     }
    1597            0 :                     1 => {}
    1598              :                     _ => {
    1599            0 :                         return Err(DatabaseError::Logical(format!(
    1600            0 :                             "unexpected number of rows ({updated})"
    1601            0 :                         )));
    1602              :                     }
    1603              :                 };
    1604              : 
    1605            0 :                 for req in reconcile_requests {
    1606            0 :                     let inserted_updated = diesel::insert_into(stpo::table)
    1607            0 :                         .values(req)
    1608            0 :                         .on_conflict((stpo::tenant_id, stpo::timeline_id, stpo::sk_id))
    1609            0 :                         .do_update()
    1610            0 :                         .set(req)
    1611            0 :                         .filter(stpo::generation.lt(req.generation))
    1612            0 :                         .execute(conn)
    1613            0 :                         .await?;
    1614              : 
    1615            0 :                     if inserted_updated > 1 {
    1616            0 :                         return Err(DatabaseError::Logical(format!(
    1617            0 :                             "unexpected number of rows ({inserted_updated})"
    1618            0 :                         )));
    1619            0 :                     }
    1620              :                 }
    1621              : 
    1622            0 :                 Ok(())
    1623            0 :             })
    1624            0 :         })
    1625            0 :         .await
    1626            0 :     }
    1627              : 
    1628              :     /// Update the cplane notified generation for a timeline.
    1629              :     /// Perform a compare-and-swap (CAS) operation on the timeline's cplane notified generation.
    1630              :     /// The update will fail if the specified generation is less than the cplane notified generation
    1631              :     /// in the database.
    1632            0 :     pub(crate) async fn update_cplane_notified_generation(
    1633            0 :         &self,
    1634            0 :         tenant_id: TenantId,
    1635            0 :         timeline_id: TimelineId,
    1636            0 :         generation: SafekeeperGeneration,
    1637            0 :     ) -> DatabaseResult<()> {
    1638              :         use crate::schema::timelines::dsl;
    1639              : 
    1640            0 :         let tenant_id = &tenant_id;
    1641            0 :         let timeline_id = &timeline_id;
    1642            0 :         self.with_measured_conn(
    1643            0 :             DatabaseOperation::UpdateCplaneNotifiedGeneration,
    1644            0 :             move |conn| {
    1645            0 :                 Box::pin(async move {
    1646            0 :                     let updated = diesel::update(dsl::timelines)
    1647            0 :                         .filter(dsl::tenant_id.eq(&tenant_id.to_string()))
    1648            0 :                         .filter(dsl::timeline_id.eq(&timeline_id.to_string()))
    1649            0 :                         .filter(dsl::cplane_notified_generation.le(generation.into_inner() as i32))
    1650            0 :                         .set(dsl::cplane_notified_generation.eq(generation.into_inner() as i32))
    1651            0 :                         .execute(conn)
    1652            0 :                         .await?;
    1653              : 
    1654            0 :                     match updated {
    1655            0 :                         0 => Err(DatabaseError::Cas(
    1656            0 :                             "Failed to update cplane notified generation".to_string(),
    1657            0 :                         )),
    1658            0 :                         1 => Ok(()),
    1659            0 :                         _ => Err(DatabaseError::Logical(format!(
    1660            0 :                             "unexpected number of rows ({updated})"
    1661            0 :                         ))),
    1662              :                     }
    1663            0 :                 })
    1664            0 :             },
    1665              :         )
    1666            0 :         .await
    1667            0 :     }
    1668              : 
    1669              :     /// Update the sk set notified generation for a timeline.
    1670              :     /// Perform a compare-and-swap (CAS) operation on the timeline's sk set notified generation.
    1671              :     /// The update will fail if the specified generation is less than the sk set notified generation
    1672              :     /// in the database.
    1673            0 :     pub(crate) async fn update_sk_set_notified_generation(
    1674            0 :         &self,
    1675            0 :         tenant_id: TenantId,
    1676            0 :         timeline_id: TimelineId,
    1677            0 :         generation: SafekeeperGeneration,
    1678            0 :     ) -> DatabaseResult<()> {
    1679              :         use crate::schema::timelines::dsl;
    1680              : 
    1681            0 :         let tenant_id = &tenant_id;
    1682            0 :         let timeline_id = &timeline_id;
    1683            0 :         self.with_measured_conn(
    1684            0 :             DatabaseOperation::UpdateSkSetNotifiedGeneration,
    1685            0 :             move |conn| {
    1686            0 :                 Box::pin(async move {
    1687            0 :                     let updated = diesel::update(dsl::timelines)
    1688            0 :                         .filter(dsl::tenant_id.eq(&tenant_id.to_string()))
    1689            0 :                         .filter(dsl::timeline_id.eq(&timeline_id.to_string()))
    1690            0 :                         .filter(dsl::sk_set_notified_generation.le(generation.into_inner() as i32))
    1691            0 :                         .set(dsl::sk_set_notified_generation.eq(generation.into_inner() as i32))
    1692            0 :                         .execute(conn)
    1693            0 :                         .await?;
    1694              : 
    1695            0 :                     match updated {
    1696            0 :                         0 => Err(DatabaseError::Cas(
    1697            0 :                             "Failed to update sk set notified generation".to_string(),
    1698            0 :                         )),
    1699            0 :                         1 => Ok(()),
    1700            0 :                         _ => Err(DatabaseError::Logical(format!(
    1701            0 :                             "unexpected number of rows ({updated})"
    1702            0 :                         ))),
    1703              :                     }
    1704            0 :                 })
    1705            0 :             },
    1706              :         )
    1707            0 :         .await
    1708            0 :     }
    1709              : 
    1710              :     /// Load timeline from db. Returns `None` if not present.
    1711            0 :     pub(crate) async fn get_timeline(
    1712            0 :         &self,
    1713            0 :         tenant_id: TenantId,
    1714            0 :         timeline_id: TimelineId,
    1715            0 :     ) -> DatabaseResult<Option<TimelinePersistence>> {
    1716              :         use crate::schema::timelines::dsl;
    1717              : 
    1718            0 :         let tenant_id = &tenant_id;
    1719            0 :         let timeline_id = &timeline_id;
    1720            0 :         let timeline_from_db = self
    1721            0 :             .with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
    1722            0 :                 Box::pin(async move {
    1723            0 :                     let mut from_db: Vec<TimelineFromDb> = dsl::timelines
    1724            0 :                         .filter(
    1725            0 :                             dsl::tenant_id
    1726            0 :                                 .eq(&tenant_id.to_string())
    1727            0 :                                 .and(dsl::timeline_id.eq(&timeline_id.to_string())),
    1728            0 :                         )
    1729            0 :                         .load(conn)
    1730            0 :                         .await?;
    1731            0 :                     if from_db.is_empty() {
    1732            0 :                         return Ok(None);
    1733            0 :                     }
    1734            0 :                     if from_db.len() != 1 {
    1735            0 :                         return Err(DatabaseError::Logical(format!(
    1736            0 :                             "unexpected number of rows ({})",
    1737            0 :                             from_db.len()
    1738            0 :                         )));
    1739            0 :                     }
    1740              : 
    1741            0 :                     Ok(Some(from_db.pop().unwrap().into_persistence()))
    1742            0 :                 })
    1743            0 :             })
    1744            0 :             .await?;
    1745              : 
    1746            0 :         Ok(timeline_from_db)
    1747            0 :     }
    1748              : 
    1749              :     /// Set `delete_at` for the given timeline
    1750            0 :     pub(crate) async fn timeline_set_deleted_at(
    1751            0 :         &self,
    1752            0 :         tenant_id: TenantId,
    1753            0 :         timeline_id: TimelineId,
    1754            0 :     ) -> DatabaseResult<()> {
    1755              :         use crate::schema::timelines;
    1756              : 
    1757            0 :         let deletion_time = chrono::Local::now().to_utc();
    1758            0 :         self.with_measured_conn(DatabaseOperation::InsertTimeline, move |conn| {
    1759            0 :             Box::pin(async move {
    1760            0 :                 let updated = diesel::update(timelines::table)
    1761            0 :                     .filter(timelines::tenant_id.eq(tenant_id.to_string()))
    1762            0 :                     .filter(timelines::timeline_id.eq(timeline_id.to_string()))
    1763            0 :                     .set(timelines::deleted_at.eq(Some(deletion_time)))
    1764            0 :                     .execute(conn)
    1765            0 :                     .await?;
    1766              : 
    1767            0 :                 match updated {
    1768            0 :                     0 => Ok(()),
    1769            0 :                     1 => Ok(()),
    1770            0 :                     _ => Err(DatabaseError::Logical(format!(
    1771            0 :                         "unexpected number of rows ({updated})"
    1772            0 :                     ))),
    1773              :                 }
    1774            0 :             })
    1775            0 :         })
    1776            0 :         .await
    1777            0 :     }
    1778              : 
    1779              :     /// Load timeline from db. Returns `None` if not present.
    1780              :     ///
    1781              :     /// Only works if `deleted_at` is set, so you should call [`Self::timeline_set_deleted_at`] before.
    1782            0 :     pub(crate) async fn delete_timeline(
    1783            0 :         &self,
    1784            0 :         tenant_id: TenantId,
    1785            0 :         timeline_id: TimelineId,
    1786            0 :     ) -> DatabaseResult<()> {
    1787              :         use crate::schema::timelines::dsl;
    1788              : 
    1789            0 :         let tenant_id = &tenant_id;
    1790            0 :         let timeline_id = &timeline_id;
    1791            0 :         self.with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
    1792            0 :             Box::pin(async move {
    1793            0 :                 diesel::delete(dsl::timelines)
    1794            0 :                     .filter(dsl::tenant_id.eq(&tenant_id.to_string()))
    1795            0 :                     .filter(dsl::timeline_id.eq(&timeline_id.to_string()))
    1796            0 :                     .filter(dsl::deleted_at.is_not_null())
    1797            0 :                     .execute(conn)
    1798            0 :                     .await?;
    1799            0 :                 Ok(())
    1800            0 :             })
    1801            0 :         })
    1802            0 :         .await?;
    1803              : 
    1804            0 :         Ok(())
    1805            0 :     }
    1806              : 
    1807              :     /// Loads a list of all timelines from database.
    1808            0 :     pub(crate) async fn list_timelines_for_tenant(
    1809            0 :         &self,
    1810            0 :         tenant_id: TenantId,
    1811            0 :     ) -> DatabaseResult<Vec<TimelinePersistence>> {
    1812              :         use crate::schema::timelines::dsl;
    1813              : 
    1814            0 :         let tenant_id = &tenant_id;
    1815            0 :         let timelines = self
    1816            0 :             .with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
    1817            0 :                 Box::pin(async move {
    1818            0 :                     let timelines: Vec<TimelineFromDb> = dsl::timelines
    1819            0 :                         .filter(dsl::tenant_id.eq(&tenant_id.to_string()))
    1820            0 :                         .load(conn)
    1821            0 :                         .await?;
    1822            0 :                     Ok(timelines)
    1823            0 :                 })
    1824            0 :             })
    1825            0 :             .await?;
    1826              : 
    1827            0 :         let timelines = timelines
    1828            0 :             .into_iter()
    1829            0 :             .map(TimelineFromDb::into_persistence)
    1830            0 :             .collect();
    1831            0 :         Ok(timelines)
    1832            0 :     }
    1833              : 
    1834              :     /// Persist pending op. Returns if it was newly inserted. If it wasn't, we haven't done any writes.
    1835            0 :     pub(crate) async fn insert_pending_op(
    1836            0 :         &self,
    1837            0 :         entry: TimelinePendingOpPersistence,
    1838            0 :     ) -> DatabaseResult<bool> {
    1839              :         use crate::schema::safekeeper_timeline_pending_ops as skpo;
    1840              :         // This overrides the `filter` fn used in other functions, so contain the mayhem via a function-local use
    1841              :         use diesel::query_dsl::methods::FilterDsl;
    1842              : 
    1843            0 :         let entry = &entry;
    1844            0 :         self.with_measured_conn(DatabaseOperation::InsertTimelineReconcile, move |conn| {
    1845            0 :             Box::pin(async move {
    1846              :                 // For simplicity it makes sense to keep only the last operation
    1847              :                 // per (tenant, timeline, sk) tuple: if we migrated a timeline
    1848              :                 // from node and adding it back it is not necessary to remove
    1849              :                 // data on it. Hence, generation is not part of primary key and
    1850              :                 // we override any rows with lower generations here.
    1851            0 :                 let inserted_updated = diesel::insert_into(skpo::table)
    1852            0 :                     .values(entry)
    1853            0 :                     .on_conflict((skpo::tenant_id, skpo::timeline_id, skpo::sk_id))
    1854            0 :                     .do_update()
    1855            0 :                     .set(entry)
    1856            0 :                     .filter(skpo::generation.lt(entry.generation))
    1857            0 :                     .execute(conn)
    1858            0 :                     .await?;
    1859              : 
    1860            0 :                 match inserted_updated {
    1861            0 :                     0 => Ok(false),
    1862            0 :                     1 => Ok(true),
    1863            0 :                     _ => Err(DatabaseError::Logical(format!(
    1864            0 :                         "unexpected number of rows ({inserted_updated})"
    1865            0 :                     ))),
    1866              :                 }
    1867            0 :             })
    1868            0 :         })
    1869            0 :         .await
    1870            0 :     }
    1871              :     /// Remove persisted pending op.
    1872            0 :     pub(crate) async fn remove_pending_op(
    1873            0 :         &self,
    1874            0 :         tenant_id: TenantId,
    1875            0 :         timeline_id: Option<TimelineId>,
    1876            0 :         sk_id: NodeId,
    1877            0 :         generation: u32,
    1878            0 :     ) -> DatabaseResult<()> {
    1879              :         use crate::schema::safekeeper_timeline_pending_ops::dsl;
    1880              : 
    1881            0 :         let tenant_id = &tenant_id;
    1882            0 :         let timeline_id = &timeline_id;
    1883            0 :         self.with_measured_conn(DatabaseOperation::RemoveTimelineReconcile, move |conn| {
    1884            0 :             let timeline_id_str = timeline_id.map(|tid| tid.to_string()).unwrap_or_default();
    1885            0 :             Box::pin(async move {
    1886            0 :                 diesel::delete(dsl::safekeeper_timeline_pending_ops)
    1887            0 :                     .filter(dsl::tenant_id.eq(tenant_id.to_string()))
    1888            0 :                     .filter(dsl::timeline_id.eq(timeline_id_str))
    1889            0 :                     .filter(dsl::sk_id.eq(sk_id.0 as i64))
    1890            0 :                     .filter(dsl::generation.eq(generation as i32))
    1891            0 :                     .execute(conn)
    1892            0 :                     .await?;
    1893            0 :                 Ok(())
    1894            0 :             })
    1895            0 :         })
    1896            0 :         .await
    1897            0 :     }
    1898              : 
    1899              :     /// Load pending operations from db, joined together with timeline data.
    1900            0 :     pub(crate) async fn list_pending_ops_with_timelines(
    1901            0 :         &self,
    1902            0 :     ) -> DatabaseResult<Vec<(TimelinePendingOpPersistence, Option<TimelinePersistence>)>> {
    1903              :         use crate::schema::safekeeper_timeline_pending_ops::dsl;
    1904              :         use crate::schema::timelines;
    1905              : 
    1906            0 :         let timeline_from_db = self
    1907            0 :             .with_measured_conn(
    1908            0 :                 DatabaseOperation::ListTimelineReconcileStartup,
    1909            0 :                 move |conn| {
    1910            0 :                     Box::pin(async move {
    1911            0 :                         let from_db: Vec<(TimelinePendingOpPersistence, Option<TimelineFromDb>)> =
    1912            0 :                             dsl::safekeeper_timeline_pending_ops
    1913            0 :                                 .left_join(
    1914            0 :                                     timelines::table.on(timelines::tenant_id
    1915            0 :                                         .eq(dsl::tenant_id)
    1916            0 :                                         .and(timelines::timeline_id.eq(dsl::timeline_id))),
    1917            0 :                                 )
    1918            0 :                                 .select((
    1919            0 :                                     TimelinePendingOpPersistence::as_select(),
    1920            0 :                                     Option::<TimelineFromDb>::as_select(),
    1921            0 :                                 ))
    1922            0 :                                 .load(conn)
    1923            0 :                                 .await?;
    1924            0 :                         Ok(from_db)
    1925            0 :                     })
    1926            0 :                 },
    1927              :             )
    1928            0 :             .await?;
    1929              : 
    1930            0 :         Ok(timeline_from_db
    1931            0 :             .into_iter()
    1932            0 :             .map(|(op, tl_opt)| (op, tl_opt.map(|tl_opt| tl_opt.into_persistence())))
    1933            0 :             .collect())
    1934            0 :     }
    1935              :     /// List pending operations for a given timeline (including tenant-global ones)
    1936            0 :     pub(crate) async fn list_pending_ops_for_timeline(
    1937            0 :         &self,
    1938            0 :         tenant_id: TenantId,
    1939            0 :         timeline_id: TimelineId,
    1940            0 :     ) -> DatabaseResult<Vec<TimelinePendingOpPersistence>> {
    1941              :         use crate::schema::safekeeper_timeline_pending_ops::dsl;
    1942              : 
    1943            0 :         let timelines_from_db = self
    1944            0 :             .with_measured_conn(DatabaseOperation::ListTimelineReconcile, move |conn| {
    1945            0 :                 Box::pin(async move {
    1946            0 :                     let from_db: Vec<TimelinePendingOpPersistence> =
    1947            0 :                         dsl::safekeeper_timeline_pending_ops
    1948            0 :                             .filter(dsl::tenant_id.eq(tenant_id.to_string()))
    1949            0 :                             .filter(
    1950            0 :                                 dsl::timeline_id
    1951            0 :                                     .eq(timeline_id.to_string())
    1952            0 :                                     .or(dsl::timeline_id.eq("")),
    1953            0 :                             )
    1954            0 :                             .load(conn)
    1955            0 :                             .await?;
    1956            0 :                     Ok(from_db)
    1957            0 :                 })
    1958            0 :             })
    1959            0 :             .await?;
    1960              : 
    1961            0 :         Ok(timelines_from_db)
    1962            0 :     }
    1963              : 
    1964              :     /// Delete all pending ops for the given timeline.
    1965              :     ///
    1966              :     /// Use this only at timeline deletion, otherwise use generation based APIs
    1967            0 :     pub(crate) async fn remove_pending_ops_for_timeline(
    1968            0 :         &self,
    1969            0 :         tenant_id: TenantId,
    1970            0 :         timeline_id: Option<TimelineId>,
    1971            0 :     ) -> DatabaseResult<()> {
    1972              :         use crate::schema::safekeeper_timeline_pending_ops::dsl;
    1973              : 
    1974            0 :         let tenant_id = &tenant_id;
    1975            0 :         let timeline_id = &timeline_id;
    1976            0 :         self.with_measured_conn(DatabaseOperation::RemoveTimelineReconcile, move |conn| {
    1977            0 :             let timeline_id_str = timeline_id.map(|tid| tid.to_string()).unwrap_or_default();
    1978            0 :             Box::pin(async move {
    1979            0 :                 diesel::delete(dsl::safekeeper_timeline_pending_ops)
    1980            0 :                     .filter(dsl::tenant_id.eq(tenant_id.to_string()))
    1981            0 :                     .filter(dsl::timeline_id.eq(timeline_id_str))
    1982            0 :                     .execute(conn)
    1983            0 :                     .await?;
    1984            0 :                 Ok(())
    1985            0 :             })
    1986            0 :         })
    1987            0 :         .await?;
    1988              : 
    1989            0 :         Ok(())
    1990            0 :     }
    1991              : 
    1992            0 :     pub(crate) async fn insert_timeline_import(
    1993            0 :         &self,
    1994            0 :         import: TimelineImportPersistence,
    1995            0 :     ) -> DatabaseResult<bool> {
    1996            0 :         self.with_measured_conn(DatabaseOperation::InsertTimelineImport, move |conn| {
    1997            0 :             Box::pin({
    1998            0 :                 let import = import.clone();
    1999            0 :                 async move {
    2000            0 :                     let inserted = diesel::insert_into(crate::schema::timeline_imports::table)
    2001            0 :                         .values(import)
    2002            0 :                         .execute(conn)
    2003            0 :                         .await?;
    2004            0 :                     Ok(inserted == 1)
    2005            0 :                 }
    2006              :             })
    2007            0 :         })
    2008            0 :         .await
    2009            0 :     }
    2010              : 
    2011            0 :     pub(crate) async fn list_timeline_imports(&self) -> DatabaseResult<Vec<TimelineImport>> {
    2012              :         use crate::schema::timeline_imports::dsl;
    2013            0 :         let persistent = self
    2014            0 :             .with_measured_conn(DatabaseOperation::ListTimelineImports, move |conn| {
    2015            0 :                 Box::pin(async move {
    2016            0 :                     let from_db: Vec<TimelineImportPersistence> =
    2017            0 :                         dsl::timeline_imports.load(conn).await?;
    2018            0 :                     Ok(from_db)
    2019            0 :                 })
    2020            0 :             })
    2021            0 :             .await?;
    2022              : 
    2023            0 :         let imports: Result<Vec<TimelineImport>, _> = persistent
    2024            0 :             .into_iter()
    2025            0 :             .map(TimelineImport::from_persistent)
    2026            0 :             .collect();
    2027            0 :         match imports {
    2028            0 :             Ok(ok) => Ok(ok.into_iter().collect()),
    2029            0 :             Err(err) => Err(DatabaseError::Logical(format!(
    2030            0 :                 "failed to deserialize import: {err}"
    2031            0 :             ))),
    2032              :         }
    2033            0 :     }
    2034              : 
    2035            0 :     pub(crate) async fn get_timeline_import(
    2036            0 :         &self,
    2037            0 :         tenant_id: TenantId,
    2038            0 :         timeline_id: TimelineId,
    2039            0 :     ) -> DatabaseResult<Option<TimelineImport>> {
    2040              :         use crate::schema::timeline_imports::dsl;
    2041            0 :         let persistent_import = self
    2042            0 :             .with_measured_conn(DatabaseOperation::ListTimelineImports, move |conn| {
    2043            0 :                 Box::pin(async move {
    2044            0 :                     let mut from_db: Vec<TimelineImportPersistence> = dsl::timeline_imports
    2045            0 :                         .filter(dsl::tenant_id.eq(tenant_id.to_string()))
    2046            0 :                         .filter(dsl::timeline_id.eq(timeline_id.to_string()))
    2047            0 :                         .load(conn)
    2048            0 :                         .await?;
    2049              : 
    2050            0 :                     if from_db.len() > 1 {
    2051            0 :                         return Err(DatabaseError::Logical(format!(
    2052            0 :                             "unexpected number of rows ({})",
    2053            0 :                             from_db.len()
    2054            0 :                         )));
    2055            0 :                     }
    2056              : 
    2057            0 :                     Ok(from_db.pop())
    2058            0 :                 })
    2059            0 :             })
    2060            0 :             .await?;
    2061              : 
    2062            0 :         persistent_import
    2063            0 :             .map(TimelineImport::from_persistent)
    2064            0 :             .transpose()
    2065            0 :             .map_err(|err| DatabaseError::Logical(format!("failed to deserialize import: {err}")))
    2066            0 :     }
    2067              : 
    2068            0 :     pub(crate) async fn delete_timeline_import(
    2069            0 :         &self,
    2070            0 :         tenant_id: TenantId,
    2071            0 :         timeline_id: TimelineId,
    2072            0 :     ) -> DatabaseResult<()> {
    2073              :         use crate::schema::timeline_imports::dsl;
    2074              : 
    2075            0 :         self.with_measured_conn(DatabaseOperation::DeleteTimelineImport, move |conn| {
    2076            0 :             Box::pin(async move {
    2077            0 :                 diesel::delete(crate::schema::timeline_imports::table)
    2078            0 :                     .filter(
    2079            0 :                         dsl::tenant_id
    2080            0 :                             .eq(tenant_id.to_string())
    2081            0 :                             .and(dsl::timeline_id.eq(timeline_id.to_string())),
    2082            0 :                     )
    2083            0 :                     .execute(conn)
    2084            0 :                     .await?;
    2085              : 
    2086            0 :                 Ok(())
    2087            0 :             })
    2088            0 :         })
    2089            0 :         .await
    2090            0 :     }
    2091              : 
    2092              :     /// Idempotently update the status of one shard for an ongoing timeline import
    2093              :     ///
    2094              :     /// If the update was persisted to the database, then the current state of the
    2095              :     /// import is returned to the caller. In case of logical errors a bespoke
    2096              :     /// [`TimelineImportUpdateError`] instance is returned. Other database errors
    2097              :     /// are covered by the outer [`DatabaseError`].
    2098            0 :     pub(crate) async fn update_timeline_import(
    2099            0 :         &self,
    2100            0 :         tenant_shard_id: TenantShardId,
    2101            0 :         timeline_id: TimelineId,
    2102            0 :         shard_status: ShardImportStatus,
    2103            0 :     ) -> DatabaseResult<Result<Option<TimelineImport>, TimelineImportUpdateError>> {
    2104              :         use crate::schema::timeline_imports::dsl;
    2105              : 
    2106            0 :         self.with_measured_conn(DatabaseOperation::UpdateTimelineImport, move |conn| {
    2107            0 :             Box::pin({
    2108            0 :                 let shard_status = shard_status.clone();
    2109            0 :                 async move {
    2110              :                     // Load the current state from the database
    2111            0 :                     let mut from_db: Vec<TimelineImportPersistence> = dsl::timeline_imports
    2112            0 :                         .filter(
    2113            0 :                             dsl::tenant_id
    2114            0 :                                 .eq(tenant_shard_id.tenant_id.to_string())
    2115            0 :                                 .and(dsl::timeline_id.eq(timeline_id.to_string())),
    2116            0 :                         )
    2117            0 :                         .load(conn)
    2118            0 :                         .await?;
    2119              : 
    2120            0 :                     assert!(from_db.len() <= 1);
    2121              : 
    2122            0 :                     let mut status = match from_db.pop() {
    2123            0 :                         Some(some) => TimelineImport::from_persistent(some).unwrap(),
    2124              :                         None => {
    2125            0 :                             return Ok(Err(TimelineImportUpdateError::ImportNotFound {
    2126            0 :                                 tenant_id: tenant_shard_id.tenant_id,
    2127            0 :                                 timeline_id,
    2128            0 :                             }));
    2129              :                         }
    2130              :                     };
    2131              : 
    2132              :                     // Perform the update in-memory
    2133            0 :                     let follow_up = match status.update(tenant_shard_id.to_index(), shard_status) {
    2134            0 :                         Ok(ok) => ok,
    2135            0 :                         Err(err) => {
    2136            0 :                             return Ok(Err(err));
    2137              :                         }
    2138              :                     };
    2139              : 
    2140            0 :                     let new_persistent = status.to_persistent();
    2141              : 
    2142              :                     // Write back if required (in the same transaction)
    2143            0 :                     match follow_up {
    2144              :                         TimelineImportUpdateFollowUp::Persist => {
    2145            0 :                             let updated = diesel::update(dsl::timeline_imports)
    2146            0 :                                 .filter(
    2147            0 :                                     dsl::tenant_id
    2148            0 :                                         .eq(tenant_shard_id.tenant_id.to_string())
    2149            0 :                                         .and(dsl::timeline_id.eq(timeline_id.to_string())),
    2150            0 :                                 )
    2151            0 :                                 .set(dsl::shard_statuses.eq(new_persistent.shard_statuses))
    2152            0 :                                 .execute(conn)
    2153            0 :                                 .await?;
    2154              : 
    2155            0 :                             if updated != 1 {
    2156            0 :                                 return Ok(Err(TimelineImportUpdateError::ImportNotFound {
    2157            0 :                                     tenant_id: tenant_shard_id.tenant_id,
    2158            0 :                                     timeline_id,
    2159            0 :                                 }));
    2160            0 :                             }
    2161              : 
    2162            0 :                             Ok(Ok(Some(status)))
    2163              :                         }
    2164            0 :                         TimelineImportUpdateFollowUp::None => Ok(Ok(None)),
    2165              :                     }
    2166            0 :                 }
    2167              :             })
    2168            0 :         })
    2169            0 :         .await
    2170            0 :     }
    2171              : 
    2172            0 :     pub(crate) async fn is_tenant_importing_timeline(
    2173            0 :         &self,
    2174            0 :         tenant_id: TenantId,
    2175            0 :     ) -> DatabaseResult<bool> {
    2176              :         use crate::schema::timeline_imports::dsl;
    2177            0 :         self.with_measured_conn(DatabaseOperation::IsTenantImportingTimeline, move |conn| {
    2178            0 :             Box::pin(async move {
    2179            0 :                 let imports: i64 = dsl::timeline_imports
    2180            0 :                     .filter(dsl::tenant_id.eq(tenant_id.to_string()))
    2181            0 :                     .count()
    2182            0 :                     .get_result(conn)
    2183            0 :                     .await?;
    2184              : 
    2185            0 :                 Ok(imports > 0)
    2186            0 :             })
    2187            0 :         })
    2188            0 :         .await
    2189            0 :     }
    2190              : 
    2191              :     ////////////////////////////////////////////////////////////////
    2192              :     //////////////////////// Hadron methods ////////////////////////
    2193              :     //////////////////////// (Brickstore) //////////////////////////
    2194              :     ////////////////////////////////////////////////////////////////
    2195              : 
    2196              :     /// Upsert a SafeKeeper node.
    2197              :     #[allow(unused)]
    2198            0 :     pub(crate) async fn upsert_sk_node(&self, sk_node: &SafeKeeperNode) -> DatabaseResult<()> {
    2199            0 :         let sk_row = sk_node.to_database_row();
    2200            0 :         self.with_measured_conn(DatabaseOperation::UpsertSafeKeeperNode, move |conn| {
    2201              :             // Incantation to make the borrow checker happy
    2202            0 :             let sk_row_clone = sk_row.clone();
    2203            0 :             Box::pin(async move { execute_sk_upsert(conn, sk_row_clone).await })
    2204            0 :         })
    2205            0 :         .await
    2206            0 :     }
    2207              : 
    2208              :     /// Load all Safe Keeper nodes and their scheduled endpoints from the database. This method is called at startup to
    2209              :     /// populate the SafeKeeperScheduler.
    2210              :     #[allow(unused)]
    2211            0 :     pub(crate) async fn load_safekeeper_scheduling_data(
    2212            0 :         &self,
    2213            0 :     ) -> DatabaseResult<HashMap<NodeId, SafeKeeperNode>> {
    2214            0 :         let sk_nodes: HashMap<NodeId, SafeKeeperNode> = self
    2215            0 :             .with_measured_conn(
    2216            0 :                 DatabaseOperation::LoadSafeKeepersAndEndpoints,
    2217            0 :                 move |conn| {
    2218              :                     // Retrieve all Safe Keeper nodes from the hadron_safekeepers table, and all timelines (grouped by
    2219              :                     // safe keeper IDs) from the hadron_timeline_safekeepers table.
    2220            0 :                     Box::pin(async move { scan_safekeepers_and_scheduled_timelines(conn).await })
    2221            0 :                 },
    2222              :             )
    2223            0 :             .await?;
    2224              : 
    2225            0 :         tracing::info!(
    2226            0 :             "load_safekeepers_and_endpoints: loaded {} safekeepers",
    2227            0 :             sk_nodes.len()
    2228              :         );
    2229              : 
    2230            0 :         Ok(sk_nodes)
    2231            0 :     }
    2232              : 
    2233              :     #[allow(unused)]
    2234            0 :     pub(crate) async fn get_or_assign_safekeepers_to_timeline(
    2235            0 :         &self,
    2236            0 :         timeline_id: TimelineId,
    2237            0 :         safekeepers: Vec<NodeId>,
    2238            0 :     ) -> DatabaseResult<Vec<NodeId>> {
    2239            0 :         self.with_measured_conn(
    2240            0 :             DatabaseOperation::GetOrCreateHadronTimelineSafekeeper,
    2241            0 :             move |conn| {
    2242            0 :                 let safekeepers_clone = safekeepers.clone();
    2243            0 :                 Box::pin(async move {
    2244            0 :                     idempotently_persist_or_get_existing_timeline_safekeepers(
    2245            0 :                         conn,
    2246            0 :                         timeline_id,
    2247            0 :                         &safekeepers_clone,
    2248            0 :                     )
    2249            0 :                     .await
    2250            0 :                 })
    2251            0 :             },
    2252              :         )
    2253            0 :         .await
    2254            0 :     }
    2255              : 
    2256              :     #[allow(unused)]
    2257            0 :     pub(crate) async fn delete_hadron_timeline_safekeepers(
    2258            0 :         &self,
    2259            0 :         timeline_id: TimelineId,
    2260            0 :     ) -> DatabaseResult<()> {
    2261            0 :         self.with_measured_conn(DatabaseOperation::DeleteHadronTimeline, move |conn| {
    2262            0 :             Box::pin(async move {
    2263            0 :                 delete_timeline_safekeepers(conn, timeline_id).await?;
    2264            0 :                 Ok(())
    2265            0 :             })
    2266            0 :         })
    2267            0 :         .await
    2268            0 :     }
    2269              : 
    2270              :     #[allow(unused)]
    2271            0 :     pub(crate) async fn get_pageserver_and_safekeepers(
    2272            0 :         &self,
    2273            0 :         tenant_id: TenantId,
    2274            0 :         timeline_id: TimelineId,
    2275            0 :     ) -> DatabaseResult<PageserverAndSafekeeperConnectionInfo> {
    2276            0 :         self.with_measured_conn(
    2277            0 :             DatabaseOperation::FetchPageServerAndSafeKeeperConnections,
    2278            0 :             move |conn| {
    2279            0 :                 Box::pin(async move {
    2280            0 :                     get_pageserver_and_safekeeper_connection_info(conn, tenant_id, timeline_id)
    2281            0 :                         .await
    2282            0 :                 })
    2283            0 :             },
    2284              :         )
    2285            0 :         .await
    2286            0 :     }
    2287              : 
    2288              :     #[allow(unused)]
    2289            0 :     pub(crate) async fn list_hadron_safekeepers(&self) -> DatabaseResult<Vec<HadronSafekeeperRow>> {
    2290            0 :         let safekeepers: Vec<HadronSafekeeperRow> = self
    2291            0 :             .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
    2292            0 :                 Box::pin(async move {
    2293            0 :                     Ok(crate::schema::hadron_safekeepers::table
    2294            0 :                         .load::<HadronSafekeeperRow>(conn)
    2295            0 :                         .await?)
    2296            0 :                 })
    2297            0 :             })
    2298            0 :             .await?;
    2299              : 
    2300            0 :         tracing::info!(
    2301            0 :             "list_hadron_safekeepers: loaded {} nodes",
    2302            0 :             safekeepers.len()
    2303              :         );
    2304              : 
    2305            0 :         Ok(safekeepers)
    2306            0 :     }
    2307              : 
    2308              :     #[allow(unused)]
    2309            0 :     pub(crate) async fn safekeeper_list_timelines(
    2310            0 :         &self,
    2311            0 :         id: i64,
    2312            0 :     ) -> DatabaseResult<SCSafekeeperTimelinesResponse> {
    2313            0 :         self.with_measured_conn(DatabaseOperation::ListSafekeeperTimelines, move |conn| {
    2314            0 :             Box::pin(async move { execute_safekeeper_list_timelines(conn, id).await })
    2315            0 :         })
    2316            0 :         .await
    2317            0 :     }
    2318              : }
    2319              : 
    2320            0 : pub(crate) fn load_certs() -> anyhow::Result<Arc<rustls::RootCertStore>> {
    2321            0 :     let der_certs = rustls_native_certs::load_native_certs();
    2322              : 
    2323            0 :     if !der_certs.errors.is_empty() {
    2324            0 :         anyhow::bail!("could not parse certificates: {:?}", der_certs.errors);
    2325            0 :     }
    2326              : 
    2327            0 :     let mut store = rustls::RootCertStore::empty();
    2328            0 :     store.add_parsable_certificates(der_certs.certs);
    2329            0 :     Ok(Arc::new(store))
    2330            0 : }
    2331              : 
    2332              : #[derive(Debug)]
    2333              : /// A verifier that accepts all certificates (but logs an error still)
    2334              : struct AcceptAll(Arc<WebPkiServerVerifier>);
    2335              : impl ServerCertVerifier for AcceptAll {
    2336            0 :     fn verify_server_cert(
    2337            0 :         &self,
    2338            0 :         end_entity: &rustls::pki_types::CertificateDer<'_>,
    2339            0 :         intermediates: &[rustls::pki_types::CertificateDer<'_>],
    2340            0 :         server_name: &rustls::pki_types::ServerName<'_>,
    2341            0 :         ocsp_response: &[u8],
    2342            0 :         now: rustls::pki_types::UnixTime,
    2343            0 :     ) -> Result<ServerCertVerified, rustls::Error> {
    2344            0 :         let r =
    2345            0 :             self.0
    2346            0 :                 .verify_server_cert(end_entity, intermediates, server_name, ocsp_response, now);
    2347            0 :         if let Err(err) = r {
    2348            0 :             tracing::info!(
    2349              :                 ?server_name,
    2350            0 :                 "ignoring db connection TLS validation error: {err:?}"
    2351              :             );
    2352            0 :             return Ok(ServerCertVerified::assertion());
    2353            0 :         }
    2354            0 :         r
    2355            0 :     }
    2356            0 :     fn verify_tls12_signature(
    2357            0 :         &self,
    2358            0 :         message: &[u8],
    2359            0 :         cert: &rustls::pki_types::CertificateDer<'_>,
    2360            0 :         dss: &rustls::DigitallySignedStruct,
    2361            0 :     ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
    2362            0 :         self.0.verify_tls12_signature(message, cert, dss)
    2363            0 :     }
    2364            0 :     fn verify_tls13_signature(
    2365            0 :         &self,
    2366            0 :         message: &[u8],
    2367            0 :         cert: &rustls::pki_types::CertificateDer<'_>,
    2368            0 :         dss: &rustls::DigitallySignedStruct,
    2369            0 :     ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
    2370            0 :         self.0.verify_tls13_signature(message, cert, dss)
    2371            0 :     }
    2372            0 :     fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
    2373            0 :         self.0.supported_verify_schemes()
    2374            0 :     }
    2375              : }
    2376              : 
    2377              : /// Loads the root certificates and constructs a client config suitable for connecting.
    2378              : /// This function is blocking.
    2379            0 : fn client_config_with_root_certs() -> anyhow::Result<rustls::ClientConfig> {
    2380            0 :     let client_config =
    2381            0 :         rustls::ClientConfig::builder_with_provider(Arc::new(ring::default_provider()))
    2382            0 :             .with_safe_default_protocol_versions()
    2383            0 :             .expect("ring should support the default protocol versions");
    2384              :     static DO_CERT_CHECKS: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
    2385            0 :     let do_cert_checks =
    2386            0 :         DO_CERT_CHECKS.get_or_init(|| std::env::var("STORCON_DB_CERT_CHECKS").is_ok());
    2387            0 :     Ok(if *do_cert_checks {
    2388            0 :         client_config
    2389            0 :             .with_root_certificates(load_certs()?)
    2390            0 :             .with_no_client_auth()
    2391              :     } else {
    2392            0 :         let verifier = AcceptAll(
    2393            0 :             WebPkiServerVerifier::builder_with_provider(
    2394            0 :                 load_certs()?,
    2395            0 :                 Arc::new(ring::default_provider()),
    2396              :             )
    2397            0 :             .build()?,
    2398              :         );
    2399            0 :         client_config
    2400            0 :             .dangerous()
    2401            0 :             .with_custom_certificate_verifier(Arc::new(verifier))
    2402            0 :             .with_no_client_auth()
    2403              :     })
    2404            0 : }
    2405              : 
    2406              : // Hadron's implementation of establish_connection_rustls which avoids hogging the tokio executor thread during
    2407              : // CPU-intensive operations in postgres connection and session establishments.
    2408              : // Compared to the original implementation this function performs the following tasks using spawn_blocking to avoid
    2409              : // hogging the tokio executor thread:
    2410              : // 1. Parsing and decoding root certificates during rustls client config setup.
    2411              : // 2. The tokio_postgres::connect() call, which performs the TLS handshake and the postgres password authentication.
    2412            0 : fn establish_connection_rustls(config: &str) -> BoxFuture<ConnectionResult<AsyncPgConnection>> {
    2413            0 :     let fut = async move {
    2414              :         // We first set up the way we want rustls to work.
    2415            0 :         let rustls_config = tokio::task::spawn_blocking(client_config_with_root_certs)
    2416            0 :             .await
    2417            0 :             .map_err(|e| {
    2418            0 :                 ConnectionError::BadConnection(format!(
    2419            0 :                     "Error in spawn_blocking client_config_with_root_certs: {e}"
    2420            0 :                 ))
    2421            0 :             })
    2422            0 :             .and_then(|r| {
    2423            0 :                 r.map_err(|e| {
    2424            0 :                     ConnectionError::BadConnection(format!(
    2425            0 :                         "Error in client_config_with_root_certs: {e}"
    2426            0 :                     ))
    2427            0 :                 })
    2428            0 :             })?;
    2429              : 
    2430            0 :         let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config);
    2431              : 
    2432              :         // Perform the expensive TLS handshake and SCRAM SHA calculations in a blocking task
    2433            0 :         let task_owned_config = config.to_owned();
    2434            0 :         let (client, conn) = tokio::task::spawn_blocking(move || {
    2435            0 :             tokio::runtime::Handle::current()
    2436            0 :                 .block_on(async { tokio_postgres::connect(&task_owned_config, tls).await })
    2437            0 :         })
    2438            0 :         .await
    2439            0 :         .map_err(|e| {
    2440            0 :             ConnectionError::BadConnection(format!(
    2441            0 :                 "Error in spawn_blocking tokio_postgres::connect: {e}"
    2442            0 :             ))
    2443            0 :         })
    2444            0 :         .and_then(|r| r.map_err(|e| ConnectionError::BadConnection(e.to_string())))?;
    2445              : 
    2446            0 :         AsyncPgConnection::try_from_client_and_connection(client, conn).await
    2447            0 :     };
    2448            0 :     fut.boxed()
    2449            0 : }
    2450              : 
    2451              : #[cfg_attr(test, test)]
    2452            1 : fn test_config_debug_censors_password() {
    2453            1 :     let has_pw =
    2454            1 :         "host=/var/lib/postgresql,localhost port=1234 user=specialuser password='NOT ALLOWED TAG'";
    2455            1 :     let has_pw_cfg = has_pw.parse::<tokio_postgres::Config>().unwrap();
    2456            1 :     assert!(format!("{has_pw_cfg:?}").contains("specialuser"));
    2457              :     // Ensure that the password is not leaked by the debug impl
    2458            1 :     assert!(!format!("{has_pw_cfg:?}").contains("NOT ALLOWED TAG"));
    2459            1 : }
    2460              : 
    2461            0 : fn log_postgres_connstr_info(config_str: &str) -> anyhow::Result<()> {
    2462            0 :     let config = config_str
    2463            0 :         .parse::<tokio_postgres::Config>()
    2464            0 :         .map_err(|_e| anyhow::anyhow!("Couldn't parse config str"))?;
    2465              :     // We use debug formatting here, and use a unit test to ensure that we don't leak the password.
    2466              :     // To make extra sure the test gets ran, run it every time the function is called
    2467              :     // (this is rather cold code, we can afford it).
    2468              :     #[cfg(not(test))]
    2469            0 :     test_config_debug_censors_password();
    2470            0 :     tracing::info!("database connection config: {config:?}");
    2471            0 :     Ok(())
    2472            0 : }
    2473              : 
    2474              : /// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably
    2475              : #[derive(
    2476            0 :     QueryableByName, Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq,
    2477              : )]
    2478              : #[diesel(table_name = crate::schema::tenant_shards)]
    2479              : pub(crate) struct TenantShardPersistence {
    2480              :     #[serde(default)]
    2481              :     pub(crate) tenant_id: String,
    2482              :     #[serde(default)]
    2483              :     pub(crate) shard_number: i32,
    2484              :     #[serde(default)]
    2485              :     pub(crate) shard_count: i32,
    2486              :     #[serde(default)]
    2487              :     pub(crate) shard_stripe_size: i32,
    2488              : 
    2489              :     // Latest generation number: next time we attach, increment this
    2490              :     // and use the incremented number when attaching.
    2491              :     //
    2492              :     // Generation is only None when first onboarding a tenant, where it may
    2493              :     // be in PlacementPolicy::Secondary and therefore have no valid generation state.
    2494              :     pub(crate) generation: Option<i32>,
    2495              : 
    2496              :     // Currently attached pageserver
    2497              :     #[serde(rename = "pageserver")]
    2498              :     pub(crate) generation_pageserver: Option<i64>,
    2499              : 
    2500              :     #[serde(default)]
    2501              :     pub(crate) placement_policy: String,
    2502              :     #[serde(default)]
    2503              :     pub(crate) splitting: SplitState,
    2504              :     #[serde(default)]
    2505              :     pub(crate) config: String,
    2506              :     #[serde(default)]
    2507              :     pub(crate) scheduling_policy: String,
    2508              : 
    2509              :     // Hint that we should attempt to schedule this tenant shard the given
    2510              :     // availability zone in order to minimise the chances of cross-AZ communication
    2511              :     // with compute.
    2512              :     pub(crate) preferred_az_id: Option<String>,
    2513              : }
    2514              : 
    2515              : impl TenantShardPersistence {
    2516            0 :     fn get_shard_count(&self) -> Result<ShardCount, ShardConfigError> {
    2517            0 :         self.shard_count
    2518            0 :             .try_into()
    2519            0 :             .map(ShardCount)
    2520            0 :             .map_err(|_| ShardConfigError::InvalidCount)
    2521            0 :     }
    2522              : 
    2523            0 :     fn get_shard_number(&self) -> Result<ShardNumber, ShardConfigError> {
    2524            0 :         self.shard_number
    2525            0 :             .try_into()
    2526            0 :             .map(ShardNumber)
    2527            0 :             .map_err(|_| ShardConfigError::InvalidNumber)
    2528            0 :     }
    2529              : 
    2530            0 :     fn get_stripe_size(&self) -> Result<ShardStripeSize, ShardConfigError> {
    2531            0 :         self.shard_stripe_size
    2532            0 :             .try_into()
    2533            0 :             .map(ShardStripeSize)
    2534            0 :             .map_err(|_| ShardConfigError::InvalidStripeSize)
    2535            0 :     }
    2536              : 
    2537            0 :     pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
    2538            0 :         if self.shard_count == 0 {
    2539              :             // NB: carry over the stripe size from the persisted record, to avoid consistency check
    2540              :             // failures if the persisted value differs from the default stripe size. The stripe size
    2541              :             // doesn't really matter for unsharded tenants anyway.
    2542            0 :             Ok(ShardIdentity::unsharded_with_stripe_size(
    2543            0 :                 self.get_stripe_size()?,
    2544              :             ))
    2545              :         } else {
    2546            0 :             Ok(ShardIdentity::new(
    2547            0 :                 self.get_shard_number()?,
    2548            0 :                 self.get_shard_count()?,
    2549            0 :                 self.get_stripe_size()?,
    2550            0 :             )?)
    2551              :         }
    2552            0 :     }
    2553              : 
    2554            0 :     pub(crate) fn get_tenant_shard_id(&self) -> anyhow::Result<TenantShardId> {
    2555              :         Ok(TenantShardId {
    2556            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    2557            0 :             shard_number: self.get_shard_number()?,
    2558            0 :             shard_count: self.get_shard_count()?,
    2559              :         })
    2560            0 :     }
    2561              : }
    2562              : 
    2563              : /// Parts of [`crate::node::Node`] that are stored durably
    2564            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)]
    2565              : #[diesel(table_name = crate::schema::nodes)]
    2566              : pub(crate) struct NodePersistence {
    2567              :     pub(crate) node_id: i64,
    2568              :     pub(crate) scheduling_policy: String,
    2569              :     pub(crate) listen_http_addr: String,
    2570              :     pub(crate) listen_http_port: i32,
    2571              :     pub(crate) listen_pg_addr: String,
    2572              :     pub(crate) listen_pg_port: i32,
    2573              :     pub(crate) availability_zone_id: String,
    2574              :     pub(crate) listen_https_port: Option<i32>,
    2575              :     pub(crate) lifecycle: String,
    2576              :     pub(crate) listen_grpc_addr: Option<String>,
    2577              :     pub(crate) listen_grpc_port: Option<i32>,
    2578              : }
    2579              : 
    2580              : /// Tenant metadata health status that are stored durably.
    2581            0 : #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
    2582              : #[diesel(table_name = crate::schema::metadata_health)]
    2583              : pub(crate) struct MetadataHealthPersistence {
    2584              :     #[serde(default)]
    2585              :     pub(crate) tenant_id: String,
    2586              :     #[serde(default)]
    2587              :     pub(crate) shard_number: i32,
    2588              :     #[serde(default)]
    2589              :     pub(crate) shard_count: i32,
    2590              : 
    2591              :     pub(crate) healthy: bool,
    2592              :     pub(crate) last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    2593              : }
    2594              : 
    2595              : impl MetadataHealthPersistence {
    2596            0 :     pub fn new(
    2597            0 :         tenant_shard_id: TenantShardId,
    2598            0 :         healthy: bool,
    2599            0 :         last_scrubbed_at: chrono::DateTime<chrono::Utc>,
    2600            0 :     ) -> Self {
    2601            0 :         let tenant_id = tenant_shard_id.tenant_id.to_string();
    2602            0 :         let shard_number = tenant_shard_id.shard_number.0 as i32;
    2603            0 :         let shard_count = tenant_shard_id.shard_count.literal() as i32;
    2604              : 
    2605            0 :         MetadataHealthPersistence {
    2606            0 :             tenant_id,
    2607            0 :             shard_number,
    2608            0 :             shard_count,
    2609            0 :             healthy,
    2610            0 :             last_scrubbed_at,
    2611            0 :         }
    2612            0 :     }
    2613              : 
    2614              :     #[allow(dead_code)]
    2615            0 :     pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
    2616              :         Ok(TenantShardId {
    2617            0 :             tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
    2618            0 :             shard_number: ShardNumber(self.shard_number as u8),
    2619            0 :             shard_count: ShardCount::new(self.shard_count as u8),
    2620              :         })
    2621            0 :     }
    2622              : }
    2623              : 
    2624              : impl From<MetadataHealthPersistence> for MetadataHealthRecord {
    2625            0 :     fn from(value: MetadataHealthPersistence) -> Self {
    2626            0 :         MetadataHealthRecord {
    2627            0 :             tenant_shard_id: value
    2628            0 :                 .get_tenant_shard_id()
    2629            0 :                 .expect("stored tenant id should be valid"),
    2630            0 :             healthy: value.healthy,
    2631            0 :             last_scrubbed_at: value.last_scrubbed_at,
    2632            0 :         }
    2633            0 :     }
    2634              : }
    2635              : 
    2636              : #[derive(
    2637            0 :     Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Debug, Clone,
    2638              : )]
    2639              : #[diesel(table_name = crate::schema::controllers)]
    2640              : pub(crate) struct ControllerPersistence {
    2641              :     pub(crate) address: String,
    2642              :     pub(crate) started_at: chrono::DateTime<chrono::Utc>,
    2643              : }
    2644              : 
    2645              : // What we store in the database
    2646            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Eq, PartialEq, Debug, Clone)]
    2647              : #[diesel(table_name = crate::schema::safekeepers)]
    2648              : pub(crate) struct SafekeeperPersistence {
    2649              :     pub(crate) id: i64,
    2650              :     pub(crate) region_id: String,
    2651              :     /// 1 is special, it means just created (not currently posted to storcon).
    2652              :     /// Zero or negative is not really expected.
    2653              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    2654              :     pub(crate) version: i64,
    2655              :     pub(crate) host: String,
    2656              :     pub(crate) port: i32,
    2657              :     pub(crate) http_port: i32,
    2658              :     pub(crate) availability_zone_id: String,
    2659              :     pub(crate) scheduling_policy: SkSchedulingPolicyFromSql,
    2660              :     pub(crate) https_port: Option<i32>,
    2661              : }
    2662              : 
    2663              : /// Wrapper struct around [`SkSchedulingPolicy`] because both it and [`FromSql`] are from foreign crates,
    2664              : /// and we don't want to make [`safekeeper_api`] depend on [`diesel`].
    2665            0 : #[derive(Serialize, Deserialize, FromSqlRow, Eq, PartialEq, Debug, Copy, Clone)]
    2666              : pub(crate) struct SkSchedulingPolicyFromSql(pub(crate) SkSchedulingPolicy);
    2667              : 
    2668              : impl From<SkSchedulingPolicy> for SkSchedulingPolicyFromSql {
    2669            0 :     fn from(value: SkSchedulingPolicy) -> Self {
    2670            0 :         SkSchedulingPolicyFromSql(value)
    2671            0 :     }
    2672              : }
    2673              : 
    2674              : impl FromSql<diesel::sql_types::VarChar, Pg> for SkSchedulingPolicyFromSql {
    2675            0 :     fn from_sql(
    2676            0 :         bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
    2677            0 :     ) -> diesel::deserialize::Result<Self> {
    2678            0 :         let bytes = bytes.as_bytes();
    2679            0 :         match core::str::from_utf8(bytes) {
    2680            0 :             Ok(s) => match SkSchedulingPolicy::from_str(s) {
    2681            0 :                 Ok(policy) => Ok(SkSchedulingPolicyFromSql(policy)),
    2682            0 :                 Err(e) => Err(format!("can't parse: {e}").into()),
    2683              :             },
    2684            0 :             Err(e) => Err(format!("invalid UTF-8 for scheduling policy: {e}").into()),
    2685              :         }
    2686            0 :     }
    2687              : }
    2688              : 
    2689              : impl SafekeeperPersistence {
    2690            0 :     pub(crate) fn from_upsert(
    2691            0 :         upsert: SafekeeperUpsert,
    2692            0 :         scheduling_policy: SkSchedulingPolicy,
    2693            0 :     ) -> Self {
    2694            0 :         crate::persistence::SafekeeperPersistence {
    2695            0 :             id: upsert.id,
    2696            0 :             region_id: upsert.region_id,
    2697            0 :             version: upsert.version,
    2698            0 :             host: upsert.host,
    2699            0 :             port: upsert.port,
    2700            0 :             http_port: upsert.http_port,
    2701            0 :             https_port: upsert.https_port,
    2702            0 :             availability_zone_id: upsert.availability_zone_id,
    2703            0 :             scheduling_policy: SkSchedulingPolicyFromSql(scheduling_policy),
    2704            0 :         }
    2705            0 :     }
    2706            0 :     pub(crate) fn as_describe_response(&self) -> Result<SafekeeperDescribeResponse, DatabaseError> {
    2707            0 :         Ok(SafekeeperDescribeResponse {
    2708            0 :             id: NodeId(self.id as u64),
    2709            0 :             region_id: self.region_id.clone(),
    2710            0 :             version: self.version,
    2711            0 :             host: self.host.clone(),
    2712            0 :             port: self.port,
    2713            0 :             http_port: self.http_port,
    2714            0 :             https_port: self.https_port,
    2715            0 :             availability_zone_id: self.availability_zone_id.clone(),
    2716            0 :             scheduling_policy: self.scheduling_policy.0,
    2717            0 :         })
    2718            0 :     }
    2719              : }
    2720              : 
    2721              : /// What we expect from the upsert http api
    2722            0 : #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)]
    2723              : pub(crate) struct SafekeeperUpsert {
    2724              :     pub(crate) id: i64,
    2725              :     pub(crate) region_id: String,
    2726              :     /// 1 is special, it means just created (not currently posted to storcon).
    2727              :     /// Zero or negative is not really expected.
    2728              :     /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
    2729              :     pub(crate) version: i64,
    2730              :     pub(crate) host: String,
    2731              :     pub(crate) port: i32,
    2732              :     /// The active flag will not be stored in the database and will be ignored.
    2733              :     pub(crate) active: Option<bool>,
    2734              :     pub(crate) http_port: i32,
    2735              :     pub(crate) https_port: Option<i32>,
    2736              :     pub(crate) availability_zone_id: String,
    2737              : }
    2738              : 
    2739              : impl SafekeeperUpsert {
    2740            0 :     fn as_insert_or_update(&self) -> anyhow::Result<InsertUpdateSafekeeper<'_>> {
    2741            0 :         if self.version < 0 {
    2742            0 :             anyhow::bail!("negative version: {}", self.version);
    2743            0 :         }
    2744            0 :         Ok(InsertUpdateSafekeeper {
    2745            0 :             id: self.id,
    2746            0 :             region_id: &self.region_id,
    2747            0 :             version: self.version,
    2748            0 :             host: &self.host,
    2749            0 :             port: self.port,
    2750            0 :             http_port: self.http_port,
    2751            0 :             https_port: self.https_port,
    2752            0 :             availability_zone_id: &self.availability_zone_id,
    2753            0 :             // None means a wish to not update this column. We expose abilities to update it via other means.
    2754            0 :             scheduling_policy: None,
    2755            0 :         })
    2756            0 :     }
    2757              : }
    2758              : 
    2759              : #[derive(Insertable, AsChangeset)]
    2760              : #[diesel(table_name = crate::schema::safekeepers)]
    2761              : struct InsertUpdateSafekeeper<'a> {
    2762              :     id: i64,
    2763              :     region_id: &'a str,
    2764              :     version: i64,
    2765              :     host: &'a str,
    2766              :     port: i32,
    2767              :     http_port: i32,
    2768              :     https_port: Option<i32>,
    2769              :     availability_zone_id: &'a str,
    2770              :     scheduling_policy: Option<&'a str>,
    2771              : }
    2772              : 
    2773            0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
    2774              : #[diesel(sql_type = crate::schema::sql_types::PgLsn)]
    2775              : pub(crate) struct LsnWrapper(pub(crate) Lsn);
    2776              : 
    2777              : impl From<Lsn> for LsnWrapper {
    2778            0 :     fn from(value: Lsn) -> Self {
    2779            0 :         LsnWrapper(value)
    2780            0 :     }
    2781              : }
    2782              : 
    2783              : impl FromSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
    2784            0 :     fn from_sql(
    2785            0 :         bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
    2786            0 :     ) -> diesel::deserialize::Result<Self> {
    2787            0 :         let byte_arr: diesel::deserialize::Result<[u8; 8]> = bytes
    2788            0 :             .as_bytes()
    2789            0 :             .try_into()
    2790            0 :             .map_err(|_| "Can't obtain lsn from sql".into());
    2791            0 :         Ok(LsnWrapper(Lsn(u64::from_be_bytes(byte_arr?))))
    2792            0 :     }
    2793              : }
    2794              : 
    2795              : impl ToSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
    2796            0 :     fn to_sql<'b>(
    2797            0 :         &'b self,
    2798            0 :         out: &mut diesel::serialize::Output<'b, '_, Pg>,
    2799            0 :     ) -> diesel::serialize::Result {
    2800            0 :         out.write_all(&u64::to_be_bytes(self.0.0))
    2801            0 :             .map(|_| IsNull::No)
    2802            0 :             .map_err(Into::into)
    2803            0 :     }
    2804              : }
    2805              : 
    2806              : #[derive(Insertable, AsChangeset, Clone)]
    2807              : #[diesel(table_name = crate::schema::timelines)]
    2808              : pub(crate) struct TimelinePersistence {
    2809              :     pub(crate) tenant_id: String,
    2810              :     pub(crate) timeline_id: String,
    2811              :     pub(crate) start_lsn: LsnWrapper,
    2812              :     pub(crate) generation: i32,
    2813              :     pub(crate) sk_set: Vec<i64>,
    2814              :     pub(crate) new_sk_set: Option<Vec<i64>>,
    2815              :     pub(crate) cplane_notified_generation: i32,
    2816              :     pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
    2817              :     pub(crate) sk_set_notified_generation: i32,
    2818              : }
    2819              : 
    2820              : /// This is separate from [TimelinePersistence] only because postgres allows NULLs
    2821              : /// in arrays and there is no way to forbid that at schema level. Hence diesel
    2822              : /// wants `sk_set` to be `Vec<Option<i64>>` instead of `Vec<i64>` for
    2823              : /// Queryable/Selectable. It does however allow insertions without redundant
    2824              : /// Option(s), so [TimelinePersistence] doesn't have them.
    2825            0 : #[derive(Queryable, Selectable)]
    2826              : #[diesel(table_name = crate::schema::timelines)]
    2827              : pub(crate) struct TimelineFromDb {
    2828              :     pub(crate) tenant_id: String,
    2829              :     pub(crate) timeline_id: String,
    2830              :     pub(crate) start_lsn: LsnWrapper,
    2831              :     pub(crate) generation: i32,
    2832              :     pub(crate) sk_set: Vec<Option<i64>>,
    2833              :     pub(crate) new_sk_set: Option<Vec<Option<i64>>>,
    2834              :     pub(crate) cplane_notified_generation: i32,
    2835              :     pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
    2836              :     pub(crate) sk_set_notified_generation: i32,
    2837              : }
    2838              : 
    2839              : impl TimelineFromDb {
    2840            0 :     fn into_persistence(self) -> TimelinePersistence {
    2841              :         // We should never encounter null entries in the sets, but we need to filter them out.
    2842              :         // There is no way to forbid this in the schema that diesel recognizes (to our knowledge).
    2843            0 :         let sk_set = self.sk_set.into_iter().flatten().collect::<Vec<_>>();
    2844            0 :         let new_sk_set = self
    2845            0 :             .new_sk_set
    2846            0 :             .map(|s| s.into_iter().flatten().collect::<Vec<_>>());
    2847            0 :         TimelinePersistence {
    2848            0 :             tenant_id: self.tenant_id,
    2849            0 :             timeline_id: self.timeline_id,
    2850            0 :             start_lsn: self.start_lsn,
    2851            0 :             generation: self.generation,
    2852            0 :             sk_set,
    2853            0 :             new_sk_set,
    2854            0 :             cplane_notified_generation: self.cplane_notified_generation,
    2855            0 :             deleted_at: self.deleted_at,
    2856            0 :             sk_set_notified_generation: self.sk_set_notified_generation,
    2857            0 :         }
    2858            0 :     }
    2859              : }
    2860              : 
    2861              : // This is separate from TimelinePersistence because we don't want to touch generation and deleted_at values for the update.
    2862              : #[derive(AsChangeset)]
    2863              : #[diesel(table_name = crate::schema::timelines)]
    2864              : #[diesel(treat_none_as_null = true)]
    2865              : pub(crate) struct TimelineUpdate {
    2866              :     pub(crate) tenant_id: String,
    2867              :     pub(crate) timeline_id: String,
    2868              :     pub(crate) start_lsn: LsnWrapper,
    2869              :     pub(crate) sk_set: Vec<i64>,
    2870              :     pub(crate) new_sk_set: Option<Vec<i64>>,
    2871              : }
    2872              : 
    2873            0 : #[derive(Insertable, AsChangeset, Queryable, Selectable, Clone)]
    2874              : #[diesel(table_name = crate::schema::safekeeper_timeline_pending_ops)]
    2875              : pub(crate) struct TimelinePendingOpPersistence {
    2876              :     pub(crate) sk_id: i64,
    2877              :     pub(crate) tenant_id: String,
    2878              :     pub(crate) timeline_id: String,
    2879              :     pub(crate) generation: i32,
    2880              :     pub(crate) op_kind: SafekeeperTimelineOpKind,
    2881              : }
    2882              : 
    2883            0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
    2884              : #[diesel(sql_type = diesel::sql_types::VarChar)]
    2885              : pub(crate) enum SafekeeperTimelineOpKind {
    2886              :     Pull,
    2887              :     Exclude,
    2888              :     Delete,
    2889              : }
    2890              : 
    2891              : impl FromSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
    2892            0 :     fn from_sql(
    2893            0 :         bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
    2894            0 :     ) -> diesel::deserialize::Result<Self> {
    2895            0 :         let bytes = bytes.as_bytes();
    2896            0 :         match core::str::from_utf8(bytes) {
    2897            0 :             Ok(s) => match s {
    2898            0 :                 "pull" => Ok(SafekeeperTimelineOpKind::Pull),
    2899            0 :                 "exclude" => Ok(SafekeeperTimelineOpKind::Exclude),
    2900            0 :                 "delete" => Ok(SafekeeperTimelineOpKind::Delete),
    2901            0 :                 _ => Err(format!("can't parse: {s}").into()),
    2902              :             },
    2903            0 :             Err(e) => Err(format!("invalid UTF-8 for op_kind: {e}").into()),
    2904              :         }
    2905            0 :     }
    2906              : }
    2907              : 
    2908              : impl ToSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
    2909            0 :     fn to_sql<'b>(
    2910            0 :         &'b self,
    2911            0 :         out: &mut diesel::serialize::Output<'b, '_, Pg>,
    2912            0 :     ) -> diesel::serialize::Result {
    2913            0 :         let kind_str = match self {
    2914            0 :             SafekeeperTimelineOpKind::Pull => "pull",
    2915            0 :             SafekeeperTimelineOpKind::Exclude => "exclude",
    2916            0 :             SafekeeperTimelineOpKind::Delete => "delete",
    2917              :         };
    2918            0 :         out.write_all(kind_str.as_bytes())
    2919            0 :             .map(|_| IsNull::No)
    2920            0 :             .map_err(Into::into)
    2921            0 :     }
    2922              : }
    2923              : 
    2924            0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Clone)]
    2925              : #[diesel(table_name = crate::schema::timeline_imports)]
    2926              : pub(crate) struct TimelineImportPersistence {
    2927              :     pub(crate) tenant_id: String,
    2928              :     pub(crate) timeline_id: String,
    2929              :     pub(crate) shard_statuses: serde_json::Value,
    2930              : }
        

Generated by: LCOV version 2.1-beta