Line data Source code
1 : pub(crate) mod split_state;
2 : use std::collections::HashMap;
3 : use std::io::Write;
4 : use std::str::FromStr;
5 : use std::sync::Arc;
6 : use std::time::{Duration, Instant};
7 :
8 : use diesel::deserialize::{FromSql, FromSqlRow};
9 : use diesel::expression::AsExpression;
10 : use diesel::pg::Pg;
11 : use diesel::prelude::*;
12 : use diesel::serialize::{IsNull, ToSql};
13 : use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
14 : use diesel_async::pooled_connection::bb8::Pool;
15 : use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig};
16 : use diesel_async::{AsyncPgConnection, RunQueryDsl};
17 : use diesel_migrations::{EmbeddedMigrations, embed_migrations};
18 : use futures::FutureExt;
19 : use futures::future::BoxFuture;
20 : use itertools::Itertools;
21 : use pageserver_api::controller_api::{
22 : AvailabilityZone, MetadataHealthRecord, NodeLifecycle, NodeSchedulingPolicy, PlacementPolicy,
23 : SafekeeperDescribeResponse, ShardSchedulingPolicy, SkSchedulingPolicy,
24 : };
25 : use pageserver_api::models::{ShardImportStatus, TenantConfig};
26 : use pageserver_api::shard::{
27 : ShardConfigError, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
28 : };
29 : use rustls::client::WebPkiServerVerifier;
30 : use rustls::client::danger::{ServerCertVerified, ServerCertVerifier};
31 : use rustls::crypto::ring;
32 : use safekeeper_api::membership::SafekeeperGeneration;
33 : use scoped_futures::ScopedBoxFuture;
34 : use serde::{Deserialize, Serialize};
35 : use utils::generation::Generation;
36 : use utils::id::{NodeId, TenantId, TimelineId};
37 : use utils::lsn::Lsn;
38 :
39 : use self::split_state::SplitState;
40 : use crate::metrics::{
41 : DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
42 : };
43 : use crate::node::Node;
44 : use crate::timeline_import::{
45 : TimelineImport, TimelineImportUpdateError, TimelineImportUpdateFollowUp,
46 : };
47 : const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
48 :
49 : /// ## What do we store?
50 : ///
51 : /// The storage controller service does not store most of its state durably.
52 : ///
53 : /// The essential things to store durably are:
54 : /// - generation numbers, as these must always advance monotonically to ensure data safety.
55 : /// - Tenant's PlacementPolicy and TenantConfig, as the source of truth for these is something external.
56 : /// - Node's scheduling policies, as the source of truth for these is something external.
57 : ///
58 : /// Other things we store durably as an implementation detail:
59 : /// - Node's host/port: this could be avoided it we made nodes emit a self-registering heartbeat,
60 : /// but it is operationally simpler to make this service the authority for which nodes
61 : /// it talks to.
62 : ///
63 : /// ## Performance/efficiency
64 : ///
65 : /// The storage controller service does not go via the database for most things: there are
66 : /// a couple of places where we must, and where efficiency matters:
67 : /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
68 : /// before it can attach a tenant, so this acts as a bound on how fast things like
69 : /// failover can happen.
70 : /// - Pageserver re-attach: we will increment many shards' generations when this happens,
71 : /// so it is important to avoid e.g. issuing O(N) queries.
72 : ///
73 : /// Database calls relating to nodes have low performance requirements, as they are very rarely
74 : /// updated, and reads of nodes are always from memory, not the database. We only require that
75 : /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
76 : pub struct Persistence {
77 : connection_pool: Pool<AsyncPgConnection>,
78 : }
79 :
80 : /// Legacy format, for use in JSON compat objects in test environment
81 0 : #[derive(Serialize, Deserialize)]
82 : struct JsonPersistence {
83 : tenants: HashMap<TenantShardId, TenantShardPersistence>,
84 : }
85 :
86 : #[derive(thiserror::Error, Debug)]
87 : pub(crate) enum DatabaseError {
88 : #[error(transparent)]
89 : Query(#[from] diesel::result::Error),
90 : #[error(transparent)]
91 : Connection(#[from] diesel::result::ConnectionError),
92 : #[error(transparent)]
93 : ConnectionPool(#[from] diesel_async::pooled_connection::bb8::RunError),
94 : #[error("Logical error: {0}")]
95 : Logical(String),
96 : #[error("Migration error: {0}")]
97 : Migration(String),
98 : #[error("CAS error: {0}")]
99 : Cas(String),
100 : }
101 :
102 : #[derive(measured::FixedCardinalityLabel, Copy, Clone)]
103 : pub(crate) enum DatabaseOperation {
104 : InsertNode,
105 : UpdateNode,
106 : DeleteNode,
107 : ListNodes,
108 : ListTombstones,
109 : BeginShardSplit,
110 : CompleteShardSplit,
111 : AbortShardSplit,
112 : Detach,
113 : ReAttach,
114 : IncrementGeneration,
115 : TenantGenerations,
116 : ShardGenerations,
117 : ListTenantShards,
118 : LoadTenant,
119 : InsertTenantShards,
120 : UpdateTenantShard,
121 : DeleteTenant,
122 : UpdateTenantConfig,
123 : UpdateMetadataHealth,
124 : ListMetadataHealth,
125 : ListMetadataHealthUnhealthy,
126 : ListMetadataHealthOutdated,
127 : ListSafekeepers,
128 : GetLeader,
129 : UpdateLeader,
130 : SetPreferredAzs,
131 : InsertTimeline,
132 : UpdateTimelineMembership,
133 : GetTimeline,
134 : InsertTimelineReconcile,
135 : RemoveTimelineReconcile,
136 : ListTimelineReconcile,
137 : ListTimelineReconcileStartup,
138 : InsertTimelineImport,
139 : UpdateTimelineImport,
140 : DeleteTimelineImport,
141 : ListTimelineImports,
142 : IsTenantImportingTimeline,
143 : }
144 :
145 : #[must_use]
146 : pub(crate) enum AbortShardSplitStatus {
147 : /// We aborted the split in the database by reverting to the parent shards
148 : Aborted,
149 : /// The split had already been persisted.
150 : Complete,
151 : }
152 :
153 : pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
154 :
155 : /// Some methods can operate on either a whole tenant or a single shard
156 : #[derive(Clone)]
157 : pub(crate) enum TenantFilter {
158 : Tenant(TenantId),
159 : Shard(TenantShardId),
160 : }
161 :
162 : /// Represents the results of looking up generation+pageserver for the shards of a tenant
163 : pub(crate) struct ShardGenerationState {
164 : pub(crate) tenant_shard_id: TenantShardId,
165 : pub(crate) generation: Option<Generation>,
166 : pub(crate) generation_pageserver: Option<NodeId>,
167 : }
168 :
169 : // A generous allowance for how many times we may retry serializable transactions
170 : // before giving up. This is not expected to be hit: it is a defensive measure in case we
171 : // somehow engineer a situation where duelling transactions might otherwise live-lock.
172 : const MAX_RETRIES: usize = 128;
173 :
174 : impl Persistence {
175 : // The default postgres connection limit is 100. We use up to 99, to leave one free for a human admin under
176 : // normal circumstances. This assumes we have exclusive use of the database cluster to which we connect.
177 : pub const MAX_CONNECTIONS: u32 = 99;
178 :
179 : // We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
180 : const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
181 : const MAX_CONNECTION_LIFETIME: Duration = Duration::from_secs(60);
182 :
183 0 : pub async fn new(database_url: String) -> Self {
184 0 : let mut mgr_config = ManagerConfig::default();
185 0 : mgr_config.custom_setup = Box::new(establish_connection_rustls);
186 :
187 0 : let manager = AsyncDieselConnectionManager::<AsyncPgConnection>::new_with_config(
188 0 : database_url,
189 0 : mgr_config,
190 : );
191 :
192 : // We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
193 : // to execute queries (database queries are not generally on latency-sensitive paths).
194 0 : let connection_pool = Pool::builder()
195 0 : .max_size(Self::MAX_CONNECTIONS)
196 0 : .max_lifetime(Some(Self::MAX_CONNECTION_LIFETIME))
197 0 : .idle_timeout(Some(Self::IDLE_CONNECTION_TIMEOUT))
198 0 : // Always keep at least one connection ready to go
199 0 : .min_idle(Some(1))
200 0 : .test_on_check_out(true)
201 0 : .build(manager)
202 0 : .await
203 0 : .expect("Could not build connection pool");
204 :
205 0 : Self { connection_pool }
206 0 : }
207 :
208 : /// A helper for use during startup, where we would like to tolerate concurrent restarts of the
209 : /// database and the storage controller, therefore the database might not be available right away
210 0 : pub async fn await_connection(
211 0 : database_url: &str,
212 0 : timeout: Duration,
213 0 : ) -> Result<(), diesel::ConnectionError> {
214 0 : let started_at = Instant::now();
215 0 : log_postgres_connstr_info(database_url)
216 0 : .map_err(|e| diesel::ConnectionError::InvalidConnectionUrl(e.to_string()))?;
217 : loop {
218 0 : match establish_connection_rustls(database_url).await {
219 : Ok(_) => {
220 0 : tracing::info!("Connected to database.");
221 0 : return Ok(());
222 : }
223 0 : Err(e) => {
224 0 : if started_at.elapsed() > timeout {
225 0 : return Err(e);
226 : } else {
227 0 : tracing::info!("Database not yet available, waiting... ({e})");
228 0 : tokio::time::sleep(Duration::from_millis(100)).await;
229 : }
230 : }
231 : }
232 : }
233 0 : }
234 :
235 : /// Execute the diesel migrations that are built into this binary
236 0 : pub(crate) async fn migration_run(&self) -> DatabaseResult<()> {
237 : use diesel_migrations::{HarnessWithOutput, MigrationHarness};
238 :
239 : // Can't use self.with_conn here as we do spawn_blocking which requires static.
240 0 : let conn = self
241 0 : .connection_pool
242 0 : .dedicated_connection()
243 0 : .await
244 0 : .map_err(|e| DatabaseError::Migration(e.to_string()))?;
245 0 : let mut async_wrapper: AsyncConnectionWrapper<AsyncPgConnection> =
246 0 : AsyncConnectionWrapper::from(conn);
247 0 : tokio::task::spawn_blocking(move || {
248 0 : let mut retry_count = 0;
249 : loop {
250 0 : let result = HarnessWithOutput::write_to_stdout(&mut async_wrapper)
251 0 : .run_pending_migrations(MIGRATIONS)
252 0 : .map(|_| ())
253 0 : .map_err(|e| DatabaseError::Migration(e.to_string()));
254 0 : match result {
255 0 : Ok(r) => break Ok(r),
256 : Err(
257 0 : err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
258 : diesel::result::DatabaseErrorKind::SerializationFailure,
259 : _,
260 : )),
261 : ) => {
262 0 : retry_count += 1;
263 0 : if retry_count > MAX_RETRIES {
264 0 : tracing::error!(
265 0 : "Exceeded max retries on SerializationFailure errors: {err:?}"
266 : );
267 0 : break Err(err);
268 : } else {
269 : // Retry on serialization errors: these are expected, because even though our
270 : // transactions don't fight for the same rows, they will occasionally collide
271 : // on index pages (e.g. increment_generation for unrelated shards can collide)
272 0 : tracing::debug!(
273 0 : "Retrying transaction on serialization failure {err:?}"
274 : );
275 0 : continue;
276 : }
277 : }
278 0 : Err(e) => break Err(e),
279 : }
280 : }
281 0 : })
282 0 : .await
283 0 : .map_err(|e| DatabaseError::Migration(e.to_string()))??;
284 0 : Ok(())
285 0 : }
286 :
287 : /// Wraps `with_conn` in order to collect latency and error metrics
288 0 : async fn with_measured_conn<'a, 'b, F, R>(
289 0 : &self,
290 0 : op: DatabaseOperation,
291 0 : func: F,
292 0 : ) -> DatabaseResult<R>
293 0 : where
294 0 : F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
295 0 : + Send
296 0 : + std::marker::Sync
297 0 : + 'a,
298 0 : R: Send + 'b,
299 0 : {
300 0 : let latency = &METRICS_REGISTRY
301 0 : .metrics_group
302 0 : .storage_controller_database_query_latency;
303 0 : let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op });
304 :
305 0 : let res = self.with_conn(func).await;
306 :
307 0 : if let Err(err) = &res {
308 0 : let error_counter = &METRICS_REGISTRY
309 0 : .metrics_group
310 0 : .storage_controller_database_query_error;
311 0 : error_counter.inc(DatabaseQueryErrorLabelGroup {
312 0 : error_type: err.error_label(),
313 0 : operation: op,
314 0 : })
315 0 : }
316 :
317 0 : res
318 0 : }
319 :
320 : /// Call the provided function with a Diesel database connection in a retry loop
321 0 : async fn with_conn<'a, 'b, F, R>(&self, func: F) -> DatabaseResult<R>
322 0 : where
323 0 : F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
324 0 : + Send
325 0 : + std::marker::Sync
326 0 : + 'a,
327 0 : R: Send + 'b,
328 0 : {
329 0 : let mut retry_count = 0;
330 : loop {
331 0 : let mut conn = self.connection_pool.get().await?;
332 0 : match conn
333 0 : .build_transaction()
334 0 : .serializable()
335 0 : .run(|c| func(c))
336 0 : .await
337 : {
338 0 : Ok(r) => break Ok(r),
339 : Err(
340 0 : err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
341 : diesel::result::DatabaseErrorKind::SerializationFailure,
342 : _,
343 : )),
344 : ) => {
345 0 : retry_count += 1;
346 0 : if retry_count > MAX_RETRIES {
347 0 : tracing::error!(
348 0 : "Exceeded max retries on SerializationFailure errors: {err:?}"
349 : );
350 0 : break Err(err);
351 : } else {
352 : // Retry on serialization errors: these are expected, because even though our
353 : // transactions don't fight for the same rows, they will occasionally collide
354 : // on index pages (e.g. increment_generation for unrelated shards can collide)
355 0 : tracing::debug!("Retrying transaction on serialization failure {err:?}");
356 0 : continue;
357 : }
358 : }
359 0 : Err(e) => break Err(e),
360 : }
361 : }
362 0 : }
363 :
364 : /// When a node is first registered, persist it before using it for anything
365 : /// If the provided node_id already exists, it will be error.
366 : /// The common case is when a node marked for deletion wants to register.
367 0 : pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
368 0 : let np = &node.to_persistent();
369 0 : self.with_measured_conn(DatabaseOperation::InsertNode, move |conn| {
370 0 : Box::pin(async move {
371 0 : diesel::insert_into(crate::schema::nodes::table)
372 0 : .values(np)
373 0 : .execute(conn)
374 0 : .await?;
375 0 : Ok(())
376 0 : })
377 0 : })
378 0 : .await
379 0 : }
380 :
381 : /// At startup, populate the list of nodes which our shards may be placed on
382 0 : pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> {
383 : use crate::schema::nodes::dsl::*;
384 :
385 0 : let result: Vec<NodePersistence> = self
386 0 : .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
387 0 : Box::pin(async move {
388 0 : Ok(crate::schema::nodes::table
389 0 : .filter(lifecycle.ne(String::from(NodeLifecycle::Deleted)))
390 0 : .load::<NodePersistence>(conn)
391 0 : .await?)
392 0 : })
393 0 : })
394 0 : .await?;
395 :
396 0 : tracing::info!("list_nodes: loaded {} nodes", result.len());
397 :
398 0 : Ok(result)
399 0 : }
400 :
401 0 : pub(crate) async fn list_tombstones(&self) -> DatabaseResult<Vec<NodePersistence>> {
402 : use crate::schema::nodes::dsl::*;
403 :
404 0 : let result: Vec<NodePersistence> = self
405 0 : .with_measured_conn(DatabaseOperation::ListTombstones, move |conn| {
406 0 : Box::pin(async move {
407 0 : Ok(crate::schema::nodes::table
408 0 : .filter(lifecycle.eq(String::from(NodeLifecycle::Deleted)))
409 0 : .load::<NodePersistence>(conn)
410 0 : .await?)
411 0 : })
412 0 : })
413 0 : .await?;
414 :
415 0 : tracing::info!("list_tombstones: loaded {} nodes", result.len());
416 :
417 0 : Ok(result)
418 0 : }
419 :
420 0 : pub(crate) async fn update_node<V>(
421 0 : &self,
422 0 : input_node_id: NodeId,
423 0 : values: V,
424 0 : ) -> DatabaseResult<()>
425 0 : where
426 0 : V: diesel::AsChangeset<Target = crate::schema::nodes::table> + Clone + Send + Sync,
427 0 : V::Changeset: diesel::query_builder::QueryFragment<diesel::pg::Pg> + Send, // valid Postgres SQL
428 0 : {
429 : use crate::schema::nodes::dsl::*;
430 0 : let updated = self
431 0 : .with_measured_conn(DatabaseOperation::UpdateNode, move |conn| {
432 0 : let values = values.clone();
433 0 : Box::pin(async move {
434 0 : let updated = diesel::update(nodes)
435 0 : .filter(node_id.eq(input_node_id.0 as i64))
436 0 : .filter(lifecycle.ne(String::from(NodeLifecycle::Deleted)))
437 0 : .set(values)
438 0 : .execute(conn)
439 0 : .await?;
440 0 : Ok(updated)
441 0 : })
442 0 : })
443 0 : .await?;
444 :
445 0 : if updated != 1 {
446 0 : Err(DatabaseError::Logical(format!(
447 0 : "Node {node_id:?} not found for update",
448 0 : )))
449 : } else {
450 0 : Ok(())
451 : }
452 0 : }
453 :
454 0 : pub(crate) async fn update_node_scheduling_policy(
455 0 : &self,
456 0 : input_node_id: NodeId,
457 0 : input_scheduling: NodeSchedulingPolicy,
458 0 : ) -> DatabaseResult<()> {
459 : use crate::schema::nodes::dsl::*;
460 0 : self.update_node(
461 0 : input_node_id,
462 0 : scheduling_policy.eq(String::from(input_scheduling)),
463 0 : )
464 0 : .await
465 0 : }
466 :
467 0 : pub(crate) async fn update_node_on_registration(
468 0 : &self,
469 0 : input_node_id: NodeId,
470 0 : input_https_port: Option<u16>,
471 0 : ) -> DatabaseResult<()> {
472 : use crate::schema::nodes::dsl::*;
473 0 : self.update_node(
474 0 : input_node_id,
475 0 : listen_https_port.eq(input_https_port.map(|x| x as i32)),
476 : )
477 0 : .await
478 0 : }
479 :
480 : /// Tombstone is a special state where the node is not deleted from the database,
481 : /// but it is not available for usage.
482 : /// The main reason for it is to prevent the flaky node to register.
483 0 : pub(crate) async fn set_tombstone(&self, del_node_id: NodeId) -> DatabaseResult<()> {
484 : use crate::schema::nodes::dsl::*;
485 0 : self.update_node(
486 0 : del_node_id,
487 0 : lifecycle.eq(String::from(NodeLifecycle::Deleted)),
488 0 : )
489 0 : .await
490 0 : }
491 :
492 0 : pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
493 : use crate::schema::nodes::dsl::*;
494 0 : self.with_measured_conn(DatabaseOperation::DeleteNode, move |conn| {
495 0 : Box::pin(async move {
496 : // You can hard delete a node only if it has a tombstone.
497 : // So we need to check if the node has lifecycle set to deleted.
498 0 : let node_to_delete = nodes
499 0 : .filter(node_id.eq(del_node_id.0 as i64))
500 0 : .first::<NodePersistence>(conn)
501 0 : .await
502 0 : .optional()?;
503 :
504 0 : if let Some(np) = node_to_delete {
505 0 : let lc = NodeLifecycle::from_str(&np.lifecycle).map_err(|e| {
506 0 : DatabaseError::Logical(format!(
507 0 : "Node {del_node_id} has invalid lifecycle: {e}"
508 0 : ))
509 0 : })?;
510 :
511 0 : if lc != NodeLifecycle::Deleted {
512 0 : return Err(DatabaseError::Logical(format!(
513 0 : "Node {del_node_id} was not soft deleted before, cannot hard delete it"
514 0 : )));
515 0 : }
516 :
517 0 : diesel::delete(nodes)
518 0 : .filter(node_id.eq(del_node_id.0 as i64))
519 0 : .execute(conn)
520 0 : .await?;
521 0 : }
522 :
523 0 : Ok(())
524 0 : })
525 0 : })
526 0 : .await
527 0 : }
528 :
529 : /// At startup, load the high level state for shards, such as their config + policy. This will
530 : /// be enriched at runtime with state discovered on pageservers.
531 : ///
532 : /// We exclude shards configured to be detached. During startup, if we see any attached locations
533 : /// for such shards, they will automatically be detached as 'orphans'.
534 0 : pub(crate) async fn load_active_tenant_shards(
535 0 : &self,
536 0 : ) -> DatabaseResult<Vec<TenantShardPersistence>> {
537 : use crate::schema::tenant_shards::dsl::*;
538 0 : self.with_measured_conn(DatabaseOperation::ListTenantShards, move |conn| {
539 0 : Box::pin(async move {
540 0 : let query = tenant_shards.filter(
541 0 : placement_policy.ne(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
542 : );
543 0 : let result = query.load::<TenantShardPersistence>(conn).await?;
544 :
545 0 : Ok(result)
546 0 : })
547 0 : })
548 0 : .await
549 0 : }
550 :
551 : /// When restoring a previously detached tenant into memory, load it from the database
552 0 : pub(crate) async fn load_tenant(
553 0 : &self,
554 0 : filter_tenant_id: TenantId,
555 0 : ) -> DatabaseResult<Vec<TenantShardPersistence>> {
556 : use crate::schema::tenant_shards::dsl::*;
557 0 : self.with_measured_conn(DatabaseOperation::LoadTenant, move |conn| {
558 0 : Box::pin(async move {
559 0 : let query = tenant_shards.filter(tenant_id.eq(filter_tenant_id.to_string()));
560 0 : let result = query.load::<TenantShardPersistence>(conn).await?;
561 :
562 0 : Ok(result)
563 0 : })
564 0 : })
565 0 : .await
566 0 : }
567 :
568 : /// Tenants must be persisted before we schedule them for the first time. This enables us
569 : /// to correctly retain generation monotonicity, and the externally provided placement policy & config.
570 0 : pub(crate) async fn insert_tenant_shards(
571 0 : &self,
572 0 : shards: Vec<TenantShardPersistence>,
573 0 : ) -> DatabaseResult<()> {
574 : use crate::schema::{metadata_health, tenant_shards};
575 :
576 0 : let now = chrono::Utc::now();
577 :
578 0 : let metadata_health_records = shards
579 0 : .iter()
580 0 : .map(|t| MetadataHealthPersistence {
581 0 : tenant_id: t.tenant_id.clone(),
582 0 : shard_number: t.shard_number,
583 0 : shard_count: t.shard_count,
584 : healthy: true,
585 0 : last_scrubbed_at: now,
586 0 : })
587 0 : .collect::<Vec<_>>();
588 :
589 0 : let shards = &shards;
590 0 : let metadata_health_records = &metadata_health_records;
591 0 : self.with_measured_conn(DatabaseOperation::InsertTenantShards, move |conn| {
592 0 : Box::pin(async move {
593 0 : diesel::insert_into(tenant_shards::table)
594 0 : .values(shards)
595 0 : .execute(conn)
596 0 : .await?;
597 :
598 0 : diesel::insert_into(metadata_health::table)
599 0 : .values(metadata_health_records)
600 0 : .execute(conn)
601 0 : .await?;
602 0 : Ok(())
603 0 : })
604 0 : })
605 0 : .await
606 0 : }
607 :
608 : /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
609 : /// the tenant from memory on this server.
610 0 : pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
611 : use crate::schema::tenant_shards::dsl::*;
612 0 : self.with_measured_conn(DatabaseOperation::DeleteTenant, move |conn| {
613 0 : Box::pin(async move {
614 : // `metadata_health` status (if exists) is also deleted based on the cascade behavior.
615 0 : diesel::delete(tenant_shards)
616 0 : .filter(tenant_id.eq(del_tenant_id.to_string()))
617 0 : .execute(conn)
618 0 : .await?;
619 0 : Ok(())
620 0 : })
621 0 : })
622 0 : .await
623 0 : }
624 :
625 : /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient
626 : /// batched increment of the generations of all tenants whose generation_pageserver is equal to
627 : /// the node that called /re-attach.
628 : #[tracing::instrument(skip_all, fields(node_id))]
629 : pub(crate) async fn re_attach(
630 : &self,
631 : input_node_id: NodeId,
632 : ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
633 : use crate::schema::nodes::dsl::{scheduling_policy, *};
634 : use crate::schema::tenant_shards::dsl::*;
635 : let updated = self
636 0 : .with_measured_conn(DatabaseOperation::ReAttach, move |conn| {
637 0 : Box::pin(async move {
638 0 : let node: Option<NodePersistence> = nodes
639 0 : .filter(node_id.eq(input_node_id.0 as i64))
640 0 : .first::<NodePersistence>(conn)
641 0 : .await
642 0 : .optional()?;
643 :
644 : // Check if the node is not marked as deleted
645 0 : match node {
646 0 : Some(node) if matches!(NodeLifecycle::from_str(&node.lifecycle), Ok(NodeLifecycle::Deleted)) => {
647 0 : return Err(DatabaseError::Logical(format!(
648 0 : "Node {input_node_id} is marked as deleted, re-attach is not allowed"
649 0 : )));
650 : }
651 0 : _ => {
652 0 : // go through
653 0 : }
654 : };
655 :
656 0 : let rows_updated = diesel::update(tenant_shards)
657 0 : .filter(generation_pageserver.eq(input_node_id.0 as i64))
658 0 : .set(generation.eq(generation + 1))
659 0 : .execute(conn)
660 0 : .await?;
661 :
662 0 : tracing::info!("Incremented {} tenants' generations", rows_updated);
663 :
664 : // TODO: UPDATE+SELECT in one query
665 :
666 0 : let updated = tenant_shards
667 0 : .filter(generation_pageserver.eq(input_node_id.0 as i64))
668 0 : .select(TenantShardPersistence::as_select())
669 0 : .load(conn)
670 0 : .await?;
671 :
672 0 : if let Some(node) = node {
673 0 : let old_scheduling_policy =
674 0 : NodeSchedulingPolicy::from_str(&node.scheduling_policy).unwrap();
675 0 : let new_scheduling_policy = match old_scheduling_policy {
676 0 : NodeSchedulingPolicy::Active => NodeSchedulingPolicy::Active,
677 0 : NodeSchedulingPolicy::PauseForRestart => NodeSchedulingPolicy::Active,
678 0 : NodeSchedulingPolicy::Draining => NodeSchedulingPolicy::Active,
679 0 : NodeSchedulingPolicy::Filling => NodeSchedulingPolicy::Active,
680 0 : NodeSchedulingPolicy::Pause => NodeSchedulingPolicy::Pause,
681 0 : NodeSchedulingPolicy::Deleting => NodeSchedulingPolicy::Pause,
682 : };
683 0 : diesel::update(nodes)
684 0 : .filter(node_id.eq(input_node_id.0 as i64))
685 0 : .set(scheduling_policy.eq(String::from(new_scheduling_policy)))
686 0 : .execute(conn)
687 0 : .await?;
688 0 : }
689 :
690 0 : Ok(updated)
691 0 : })
692 0 : })
693 : .await?;
694 :
695 : let mut result = HashMap::new();
696 : for tsp in updated {
697 : let tenant_shard_id = TenantShardId {
698 : tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
699 0 : .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
700 : shard_number: ShardNumber(tsp.shard_number as u8),
701 : shard_count: ShardCount::new(tsp.shard_count as u8),
702 : };
703 :
704 : let Some(g) = tsp.generation else {
705 : // If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
706 : // we only set generation_pageserver when setting generation.
707 : return Err(DatabaseError::Logical(
708 : "Generation should always be set after incrementing".to_string(),
709 : ));
710 : };
711 : result.insert(tenant_shard_id, Generation::new(g as u32));
712 : }
713 :
714 : Ok(result)
715 : }
716 :
717 : /// Reconciler calls this immediately before attaching to a new pageserver, to acquire a unique, monotonically
718 : /// advancing generation number. We also store the NodeId for which the generation was issued, so that in
719 : /// [`Self::re_attach`] we can do a bulk UPDATE on the generations for that node.
720 0 : pub(crate) async fn increment_generation(
721 0 : &self,
722 0 : tenant_shard_id: TenantShardId,
723 0 : node_id: NodeId,
724 0 : ) -> anyhow::Result<Generation> {
725 : use crate::schema::tenant_shards::dsl::*;
726 0 : let updated = self
727 0 : .with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| {
728 0 : Box::pin(async move {
729 0 : let updated = diesel::update(tenant_shards)
730 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
731 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
732 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
733 0 : .set((
734 0 : generation.eq(generation + 1),
735 0 : generation_pageserver.eq(node_id.0 as i64),
736 0 : ))
737 0 : // TODO: only returning() the generation column
738 0 : .returning(TenantShardPersistence::as_returning())
739 0 : .get_result(conn)
740 0 : .await?;
741 :
742 0 : Ok(updated)
743 0 : })
744 0 : })
745 0 : .await?;
746 :
747 : // Generation is always non-null in the rseult: if the generation column had been NULL, then we
748 : // should have experienced an SQL Confilict error while executing a query that tries to increment it.
749 0 : debug_assert!(updated.generation.is_some());
750 0 : let Some(g) = updated.generation else {
751 0 : return Err(DatabaseError::Logical(
752 0 : "Generation should always be set after incrementing".to_string(),
753 0 : )
754 0 : .into());
755 : };
756 :
757 0 : Ok(Generation::new(g as u32))
758 0 : }
759 :
760 : /// When we want to call out to the running shards for a tenant, e.g. during timeline CRUD operations,
761 : /// we need to know where the shard is attached, _and_ the generation, so that we can re-check the generation
762 : /// afterwards to confirm that our timeline CRUD operation is truly persistent (it must have happened in the
763 : /// latest generation)
764 : ///
765 : /// If the tenant doesn't exist, an empty vector is returned.
766 : ///
767 : /// Output is sorted by shard number
768 0 : pub(crate) async fn tenant_generations(
769 0 : &self,
770 0 : filter_tenant_id: TenantId,
771 0 : ) -> Result<Vec<ShardGenerationState>, DatabaseError> {
772 : use crate::schema::tenant_shards::dsl::*;
773 0 : let rows = self
774 0 : .with_measured_conn(DatabaseOperation::TenantGenerations, move |conn| {
775 0 : Box::pin(async move {
776 0 : let result = tenant_shards
777 0 : .filter(tenant_id.eq(filter_tenant_id.to_string()))
778 0 : .select(TenantShardPersistence::as_select())
779 0 : .order(shard_number)
780 0 : .load(conn)
781 0 : .await?;
782 0 : Ok(result)
783 0 : })
784 0 : })
785 0 : .await?;
786 :
787 0 : Ok(rows
788 0 : .into_iter()
789 0 : .map(|p| ShardGenerationState {
790 0 : tenant_shard_id: p
791 0 : .get_tenant_shard_id()
792 0 : .expect("Corrupt tenant shard id in database"),
793 0 : generation: p.generation.map(|g| Generation::new(g as u32)),
794 0 : generation_pageserver: p.generation_pageserver.map(|n| NodeId(n as u64)),
795 0 : })
796 0 : .collect())
797 0 : }
798 :
799 : /// Read the generation number of specific tenant shards
800 : ///
801 : /// Output is unsorted. Output may not include values for all inputs, if they are missing in the database.
802 0 : pub(crate) async fn shard_generations(
803 0 : &self,
804 0 : mut tenant_shard_ids: impl Iterator<Item = &TenantShardId>,
805 0 : ) -> Result<Vec<(TenantShardId, Option<Generation>)>, DatabaseError> {
806 0 : let mut rows = Vec::with_capacity(tenant_shard_ids.size_hint().0);
807 :
808 : // We will chunk our input to avoid composing arbitrarily long `IN` clauses. Typically we are
809 : // called with a single digit number of IDs, but in principle we could be called with tens
810 : // of thousands (all the shards on one pageserver) from the generation validation API.
811 : loop {
812 : // A modest hardcoded chunk size to handle typical cases in a single query but never generate particularly
813 : // large query strings.
814 0 : let chunk_ids = tenant_shard_ids.by_ref().take(32);
815 :
816 : // Compose a comma separated list of tuples for matching on (tenant_id, shard_number, shard_count)
817 0 : let in_clause = chunk_ids
818 0 : .map(|tsid| {
819 0 : format!(
820 0 : "('{}', {}, {})",
821 : tsid.tenant_id, tsid.shard_number.0, tsid.shard_count.0
822 : )
823 0 : })
824 0 : .join(",");
825 :
826 : // We are done when our iterator gives us nothing to filter on
827 0 : if in_clause.is_empty() {
828 0 : break;
829 0 : }
830 :
831 0 : let in_clause = &in_clause;
832 0 : let chunk_rows = self
833 0 : .with_measured_conn(DatabaseOperation::ShardGenerations, move |conn| {
834 0 : Box::pin(async move {
835 : // diesel doesn't support multi-column IN queries, so we compose raw SQL. No escaping is required because
836 : // the inputs are strongly typed and cannot carry any user-supplied raw string content.
837 0 : let result : Vec<TenantShardPersistence> = diesel::sql_query(
838 0 : format!("SELECT * from tenant_shards where (tenant_id, shard_number, shard_count) in ({in_clause});").as_str()
839 0 : ).load(conn).await?;
840 :
841 0 : Ok(result)
842 0 : })
843 0 : })
844 0 : .await?;
845 0 : rows.extend(chunk_rows.into_iter())
846 : }
847 :
848 0 : Ok(rows
849 0 : .into_iter()
850 0 : .map(|tsp| {
851 : (
852 0 : tsp.get_tenant_shard_id()
853 0 : .expect("Bad tenant ID in database"),
854 0 : tsp.generation.map(|g| Generation::new(g as u32)),
855 : )
856 0 : })
857 0 : .collect())
858 0 : }
859 :
860 : #[allow(non_local_definitions)]
861 : /// For use when updating a persistent property of a tenant, such as its config or placement_policy.
862 : ///
863 : /// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
864 : /// API: use [`Self::increment_generation`] instead. Setting the generation via this route is a one-time thing
865 : /// that we only do the first time a tenant is set to an attached policy via /location_config.
866 0 : pub(crate) async fn update_tenant_shard(
867 0 : &self,
868 0 : tenant: TenantFilter,
869 0 : input_placement_policy: Option<PlacementPolicy>,
870 0 : input_config: Option<TenantConfig>,
871 0 : input_generation: Option<Generation>,
872 0 : input_scheduling_policy: Option<ShardSchedulingPolicy>,
873 0 : ) -> DatabaseResult<()> {
874 : use crate::schema::tenant_shards::dsl::*;
875 :
876 0 : let tenant = &tenant;
877 0 : let input_placement_policy = &input_placement_policy;
878 0 : let input_config = &input_config;
879 0 : let input_generation = &input_generation;
880 0 : let input_scheduling_policy = &input_scheduling_policy;
881 0 : self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
882 0 : Box::pin(async move {
883 0 : let query = match tenant {
884 0 : TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards)
885 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
886 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
887 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
888 0 : .into_boxed(),
889 0 : TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
890 0 : .filter(tenant_id.eq(input_tenant_id.to_string()))
891 0 : .into_boxed(),
892 : };
893 :
894 : // Clear generation_pageserver if we are moving into a state where we won't have
895 : // any attached pageservers.
896 0 : let input_generation_pageserver = match input_placement_policy {
897 0 : None | Some(PlacementPolicy::Attached(_)) => None,
898 0 : Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) => Some(None),
899 : };
900 :
901 : #[derive(AsChangeset)]
902 : #[diesel(table_name = crate::schema::tenant_shards)]
903 : struct ShardUpdate {
904 : generation: Option<i32>,
905 : placement_policy: Option<String>,
906 : config: Option<String>,
907 : scheduling_policy: Option<String>,
908 : generation_pageserver: Option<Option<i64>>,
909 : }
910 :
911 0 : let update = ShardUpdate {
912 0 : generation: input_generation.map(|g| g.into().unwrap() as i32),
913 0 : placement_policy: input_placement_policy
914 0 : .as_ref()
915 0 : .map(|p| serde_json::to_string(&p).unwrap()),
916 0 : config: input_config
917 0 : .as_ref()
918 0 : .map(|c| serde_json::to_string(&c).unwrap()),
919 0 : scheduling_policy: input_scheduling_policy
920 0 : .map(|p| serde_json::to_string(&p).unwrap()),
921 0 : generation_pageserver: input_generation_pageserver,
922 : };
923 :
924 0 : query.set(update).execute(conn).await?;
925 :
926 0 : Ok(())
927 0 : })
928 0 : })
929 0 : .await?;
930 :
931 0 : Ok(())
932 0 : }
933 :
934 : /// Note that passing None for a shard clears the preferred AZ (rather than leaving it unmodified)
935 0 : pub(crate) async fn set_tenant_shard_preferred_azs(
936 0 : &self,
937 0 : preferred_azs: Vec<(TenantShardId, Option<AvailabilityZone>)>,
938 0 : ) -> DatabaseResult<Vec<(TenantShardId, Option<AvailabilityZone>)>> {
939 : use crate::schema::tenant_shards::dsl::*;
940 :
941 0 : let preferred_azs = preferred_azs.as_slice();
942 0 : self.with_measured_conn(DatabaseOperation::SetPreferredAzs, move |conn| {
943 0 : Box::pin(async move {
944 0 : let mut shards_updated = Vec::default();
945 :
946 0 : for (tenant_shard_id, preferred_az) in preferred_azs.iter() {
947 0 : let updated = diesel::update(tenant_shards)
948 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
949 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
950 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
951 0 : .set(preferred_az_id.eq(preferred_az.as_ref().map(|az| az.0.clone())))
952 0 : .execute(conn)
953 0 : .await?;
954 :
955 0 : if updated == 1 {
956 0 : shards_updated.push((*tenant_shard_id, preferred_az.clone()));
957 0 : }
958 : }
959 :
960 0 : Ok(shards_updated)
961 0 : })
962 0 : })
963 0 : .await
964 0 : }
965 :
966 0 : pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
967 : use crate::schema::tenant_shards::dsl::*;
968 0 : self.with_measured_conn(DatabaseOperation::Detach, move |conn| {
969 0 : Box::pin(async move {
970 0 : let updated = diesel::update(tenant_shards)
971 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
972 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
973 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
974 0 : .set((
975 0 : generation_pageserver.eq(Option::<i64>::None),
976 0 : placement_policy
977 0 : .eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
978 0 : ))
979 0 : .execute(conn)
980 0 : .await?;
981 :
982 0 : Ok(updated)
983 0 : })
984 0 : })
985 0 : .await?;
986 :
987 0 : Ok(())
988 0 : }
989 :
990 : // When we start shard splitting, we must durably mark the tenant so that
991 : // on restart, we know that we must go through recovery.
992 : //
993 : // We create the child shards here, so that they will be available for increment_generation calls
994 : // if some pageserver holding a child shard needs to restart before the overall tenant split is complete.
995 0 : pub(crate) async fn begin_shard_split(
996 0 : &self,
997 0 : old_shard_count: ShardCount,
998 0 : split_tenant_id: TenantId,
999 0 : parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
1000 0 : ) -> DatabaseResult<()> {
1001 : use crate::schema::tenant_shards::dsl::*;
1002 0 : let parent_to_children = parent_to_children.as_slice();
1003 0 : self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| {
1004 0 : Box::pin(async move {
1005 : // Mark parent shards as splitting
1006 :
1007 0 : let updated = diesel::update(tenant_shards)
1008 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1009 0 : .filter(shard_count.eq(old_shard_count.literal() as i32))
1010 0 : .set((splitting.eq(1),))
1011 0 : .execute(conn).await?;
1012 0 : if u8::try_from(updated)
1013 0 : .map_err(|_| DatabaseError::Logical(
1014 0 : format!("Overflow existing shard count {updated} while splitting"))
1015 0 : )? != old_shard_count.count() {
1016 : // Perhaps a deletion or another split raced with this attempt to split, mutating
1017 : // the parent shards that we intend to split. In this case the split request should fail.
1018 0 : return Err(DatabaseError::Logical(
1019 0 : format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
1020 0 : ));
1021 0 : }
1022 :
1023 : // FIXME: spurious clone to sidestep closure move rules
1024 0 : let parent_to_children = parent_to_children.to_vec();
1025 :
1026 : // Insert child shards
1027 0 : for (parent_shard_id, children) in parent_to_children {
1028 0 : let mut parent = crate::schema::tenant_shards::table
1029 0 : .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
1030 0 : .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
1031 0 : .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
1032 0 : .load::<TenantShardPersistence>(conn).await?;
1033 0 : let parent = if parent.len() != 1 {
1034 0 : return Err(DatabaseError::Logical(format!(
1035 0 : "Parent shard {parent_shard_id} not found"
1036 0 : )));
1037 : } else {
1038 0 : parent.pop().unwrap()
1039 : };
1040 0 : for mut shard in children {
1041 : // Carry the parent's generation into the child
1042 0 : shard.generation = parent.generation;
1043 :
1044 0 : debug_assert!(shard.splitting == SplitState::Splitting);
1045 0 : diesel::insert_into(tenant_shards)
1046 0 : .values(shard)
1047 0 : .execute(conn).await?;
1048 : }
1049 : }
1050 :
1051 0 : Ok(())
1052 0 : })
1053 0 : })
1054 0 : .await
1055 0 : }
1056 :
1057 : // When we finish shard splitting, we must atomically clean up the old shards
1058 : // and insert the new shards, and clear the splitting marker.
1059 0 : pub(crate) async fn complete_shard_split(
1060 0 : &self,
1061 0 : split_tenant_id: TenantId,
1062 0 : old_shard_count: ShardCount,
1063 0 : new_shard_count: ShardCount,
1064 0 : ) -> DatabaseResult<()> {
1065 : use crate::schema::tenant_shards::dsl::*;
1066 0 : self.with_measured_conn(DatabaseOperation::CompleteShardSplit, move |conn| {
1067 0 : Box::pin(async move {
1068 : // Sanity: child shards must still exist, as we're deleting parent shards
1069 0 : let child_shards_query = tenant_shards
1070 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1071 0 : .filter(shard_count.eq(new_shard_count.literal() as i32));
1072 0 : let child_shards = child_shards_query
1073 0 : .load::<TenantShardPersistence>(conn)
1074 0 : .await?;
1075 0 : if child_shards.len() != new_shard_count.count() as usize {
1076 0 : return Err(DatabaseError::Logical(format!(
1077 0 : "Unexpected child shard count {} while completing split to \
1078 0 : count {new_shard_count:?} on tenant {split_tenant_id}",
1079 0 : child_shards.len()
1080 0 : )));
1081 0 : }
1082 :
1083 : // Drop parent shards
1084 0 : diesel::delete(tenant_shards)
1085 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1086 0 : .filter(shard_count.eq(old_shard_count.literal() as i32))
1087 0 : .execute(conn)
1088 0 : .await?;
1089 :
1090 : // Clear sharding flag
1091 0 : let updated = diesel::update(tenant_shards)
1092 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1093 0 : .filter(shard_count.eq(new_shard_count.literal() as i32))
1094 0 : .set((splitting.eq(0),))
1095 0 : .execute(conn)
1096 0 : .await?;
1097 0 : assert!(updated == new_shard_count.count() as usize);
1098 :
1099 0 : Ok(())
1100 0 : })
1101 0 : })
1102 0 : .await
1103 0 : }
1104 :
1105 : /// Used when the remote part of a shard split failed: we will revert the database state to have only
1106 : /// the parent shards, with SplitState::Idle.
1107 0 : pub(crate) async fn abort_shard_split(
1108 0 : &self,
1109 0 : split_tenant_id: TenantId,
1110 0 : new_shard_count: ShardCount,
1111 0 : ) -> DatabaseResult<AbortShardSplitStatus> {
1112 : use crate::schema::tenant_shards::dsl::*;
1113 0 : self.with_measured_conn(DatabaseOperation::AbortShardSplit, move |conn| {
1114 0 : Box::pin(async move {
1115 : // Clear the splitting state on parent shards
1116 0 : let updated = diesel::update(tenant_shards)
1117 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1118 0 : .filter(shard_count.ne(new_shard_count.literal() as i32))
1119 0 : .set((splitting.eq(0),))
1120 0 : .execute(conn)
1121 0 : .await?;
1122 :
1123 : // Parent shards are already gone: we cannot abort.
1124 0 : if updated == 0 {
1125 0 : return Ok(AbortShardSplitStatus::Complete);
1126 0 : }
1127 :
1128 : // Sanity check: if parent shards were present, their cardinality should
1129 : // be less than the number of child shards.
1130 0 : if updated >= new_shard_count.count() as usize {
1131 0 : return Err(DatabaseError::Logical(format!(
1132 0 : "Unexpected parent shard count {updated} while aborting split to \
1133 0 : count {new_shard_count:?} on tenant {split_tenant_id}"
1134 0 : )));
1135 0 : }
1136 :
1137 : // Erase child shards
1138 0 : diesel::delete(tenant_shards)
1139 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1140 0 : .filter(shard_count.eq(new_shard_count.literal() as i32))
1141 0 : .execute(conn)
1142 0 : .await?;
1143 :
1144 0 : Ok(AbortShardSplitStatus::Aborted)
1145 0 : })
1146 0 : })
1147 0 : .await
1148 0 : }
1149 :
1150 : /// Stores all the latest metadata health updates durably. Updates existing entry on conflict.
1151 : ///
1152 : /// **Correctness:** `metadata_health_updates` should all belong the tenant shards managed by the storage controller.
1153 : #[allow(dead_code)]
1154 0 : pub(crate) async fn update_metadata_health_records(
1155 0 : &self,
1156 0 : healthy_records: Vec<MetadataHealthPersistence>,
1157 0 : unhealthy_records: Vec<MetadataHealthPersistence>,
1158 0 : now: chrono::DateTime<chrono::Utc>,
1159 0 : ) -> DatabaseResult<()> {
1160 : use crate::schema::metadata_health::dsl::*;
1161 :
1162 0 : let healthy_records = healthy_records.as_slice();
1163 0 : let unhealthy_records = unhealthy_records.as_slice();
1164 0 : self.with_measured_conn(DatabaseOperation::UpdateMetadataHealth, move |conn| {
1165 0 : Box::pin(async move {
1166 0 : diesel::insert_into(metadata_health)
1167 0 : .values(healthy_records)
1168 0 : .on_conflict((tenant_id, shard_number, shard_count))
1169 0 : .do_update()
1170 0 : .set((healthy.eq(true), last_scrubbed_at.eq(now)))
1171 0 : .execute(conn)
1172 0 : .await?;
1173 :
1174 0 : diesel::insert_into(metadata_health)
1175 0 : .values(unhealthy_records)
1176 0 : .on_conflict((tenant_id, shard_number, shard_count))
1177 0 : .do_update()
1178 0 : .set((healthy.eq(false), last_scrubbed_at.eq(now)))
1179 0 : .execute(conn)
1180 0 : .await?;
1181 0 : Ok(())
1182 0 : })
1183 0 : })
1184 0 : .await
1185 0 : }
1186 :
1187 : /// Lists all the metadata health records.
1188 : #[allow(dead_code)]
1189 0 : pub(crate) async fn list_metadata_health_records(
1190 0 : &self,
1191 0 : ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
1192 0 : self.with_measured_conn(DatabaseOperation::ListMetadataHealth, move |conn| {
1193 0 : Box::pin(async {
1194 0 : Ok(crate::schema::metadata_health::table
1195 0 : .load::<MetadataHealthPersistence>(conn)
1196 0 : .await?)
1197 0 : })
1198 0 : })
1199 0 : .await
1200 0 : }
1201 :
1202 : /// Lists all the metadata health records that is unhealthy.
1203 : #[allow(dead_code)]
1204 0 : pub(crate) async fn list_unhealthy_metadata_health_records(
1205 0 : &self,
1206 0 : ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
1207 : use crate::schema::metadata_health::dsl::*;
1208 0 : self.with_measured_conn(
1209 0 : DatabaseOperation::ListMetadataHealthUnhealthy,
1210 0 : move |conn| {
1211 0 : Box::pin(async {
1212 : DatabaseResult::Ok(
1213 0 : crate::schema::metadata_health::table
1214 0 : .filter(healthy.eq(false))
1215 0 : .load::<MetadataHealthPersistence>(conn)
1216 0 : .await?,
1217 : )
1218 0 : })
1219 0 : },
1220 : )
1221 0 : .await
1222 0 : }
1223 :
1224 : /// Lists all the metadata health records that have not been updated since an `earlier` time.
1225 : #[allow(dead_code)]
1226 0 : pub(crate) async fn list_outdated_metadata_health_records(
1227 0 : &self,
1228 0 : earlier: chrono::DateTime<chrono::Utc>,
1229 0 : ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
1230 : use crate::schema::metadata_health::dsl::*;
1231 :
1232 0 : self.with_measured_conn(DatabaseOperation::ListMetadataHealthOutdated, move |conn| {
1233 0 : Box::pin(async move {
1234 0 : let query = metadata_health.filter(last_scrubbed_at.lt(earlier));
1235 0 : let res = query.load::<MetadataHealthPersistence>(conn).await?;
1236 :
1237 0 : Ok(res)
1238 0 : })
1239 0 : })
1240 0 : .await
1241 0 : }
1242 :
1243 : /// Get the current entry from the `leader` table if one exists.
1244 : /// It is an error for the table to contain more than one entry.
1245 0 : pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
1246 0 : let mut leader: Vec<ControllerPersistence> = self
1247 0 : .with_measured_conn(DatabaseOperation::GetLeader, move |conn| {
1248 0 : Box::pin(async move {
1249 0 : Ok(crate::schema::controllers::table
1250 0 : .load::<ControllerPersistence>(conn)
1251 0 : .await?)
1252 0 : })
1253 0 : })
1254 0 : .await?;
1255 :
1256 0 : if leader.len() > 1 {
1257 0 : return Err(DatabaseError::Logical(format!(
1258 0 : "More than one entry present in the leader table: {leader:?}"
1259 0 : )));
1260 0 : }
1261 :
1262 0 : Ok(leader.pop())
1263 0 : }
1264 :
1265 : /// Update the new leader with compare-exchange semantics. If `prev` does not
1266 : /// match the current leader entry, then the update is treated as a failure.
1267 : /// When `prev` is not specified, the update is forced.
1268 0 : pub(crate) async fn update_leader(
1269 0 : &self,
1270 0 : prev: Option<ControllerPersistence>,
1271 0 : new: ControllerPersistence,
1272 0 : ) -> DatabaseResult<()> {
1273 : use crate::schema::controllers::dsl::*;
1274 :
1275 0 : let updated = self
1276 0 : .with_measured_conn(DatabaseOperation::UpdateLeader, move |conn| {
1277 0 : let prev = prev.clone();
1278 0 : let new = new.clone();
1279 0 : Box::pin(async move {
1280 0 : let updated = match &prev {
1281 0 : Some(prev) => {
1282 0 : diesel::update(controllers)
1283 0 : .filter(address.eq(prev.address.clone()))
1284 0 : .filter(started_at.eq(prev.started_at))
1285 0 : .set((
1286 0 : address.eq(new.address.clone()),
1287 0 : started_at.eq(new.started_at),
1288 0 : ))
1289 0 : .execute(conn)
1290 0 : .await?
1291 : }
1292 : None => {
1293 0 : diesel::insert_into(controllers)
1294 0 : .values(new.clone())
1295 0 : .execute(conn)
1296 0 : .await?
1297 : }
1298 : };
1299 :
1300 0 : Ok(updated)
1301 0 : })
1302 0 : })
1303 0 : .await?;
1304 :
1305 0 : if updated == 0 {
1306 0 : return Err(DatabaseError::Logical(
1307 0 : "Leader table update failed".to_string(),
1308 0 : ));
1309 0 : }
1310 :
1311 0 : Ok(())
1312 0 : }
1313 :
1314 : /// At startup, populate the list of nodes which our shards may be placed on
1315 0 : pub(crate) async fn list_safekeepers(&self) -> DatabaseResult<Vec<SafekeeperPersistence>> {
1316 0 : let safekeepers: Vec<SafekeeperPersistence> = self
1317 0 : .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
1318 0 : Box::pin(async move {
1319 0 : Ok(crate::schema::safekeepers::table
1320 0 : .load::<SafekeeperPersistence>(conn)
1321 0 : .await?)
1322 0 : })
1323 0 : })
1324 0 : .await?;
1325 :
1326 0 : tracing::info!("list_safekeepers: loaded {} nodes", safekeepers.len());
1327 :
1328 0 : Ok(safekeepers)
1329 0 : }
1330 :
1331 0 : pub(crate) async fn safekeeper_upsert(
1332 0 : &self,
1333 0 : record: SafekeeperUpsert,
1334 0 : ) -> Result<(), DatabaseError> {
1335 : use crate::schema::safekeepers::dsl::*;
1336 :
1337 0 : self.with_conn(move |conn| {
1338 0 : let record = record.clone();
1339 0 : Box::pin(async move {
1340 0 : let bind = record
1341 0 : .as_insert_or_update()
1342 0 : .map_err(|e| DatabaseError::Logical(format!("{e}")))?;
1343 :
1344 0 : let inserted_updated = diesel::insert_into(safekeepers)
1345 0 : .values(&bind)
1346 0 : .on_conflict(id)
1347 0 : .do_update()
1348 0 : .set(&bind)
1349 0 : .execute(conn)
1350 0 : .await?;
1351 :
1352 0 : if inserted_updated != 1 {
1353 0 : return Err(DatabaseError::Logical(format!(
1354 0 : "unexpected number of rows ({inserted_updated})"
1355 0 : )));
1356 0 : }
1357 :
1358 0 : Ok(())
1359 0 : })
1360 0 : })
1361 0 : .await
1362 0 : }
1363 :
1364 0 : pub(crate) async fn set_safekeeper_scheduling_policy(
1365 0 : &self,
1366 0 : id_: i64,
1367 0 : scheduling_policy_: SkSchedulingPolicy,
1368 0 : ) -> Result<(), DatabaseError> {
1369 : use crate::schema::safekeepers::dsl::*;
1370 :
1371 0 : self.with_conn(move |conn| {
1372 0 : Box::pin(async move {
1373 : #[derive(Insertable, AsChangeset)]
1374 : #[diesel(table_name = crate::schema::safekeepers)]
1375 : struct UpdateSkSchedulingPolicy<'a> {
1376 : id: i64,
1377 : scheduling_policy: &'a str,
1378 : }
1379 0 : let scheduling_policy_ = String::from(scheduling_policy_);
1380 :
1381 0 : let rows_affected = diesel::update(safekeepers.filter(id.eq(id_)))
1382 0 : .set(scheduling_policy.eq(scheduling_policy_))
1383 0 : .execute(conn)
1384 0 : .await?;
1385 :
1386 0 : if rows_affected != 1 {
1387 0 : return Err(DatabaseError::Logical(format!(
1388 0 : "unexpected number of rows ({rows_affected})",
1389 0 : )));
1390 0 : }
1391 :
1392 0 : Ok(())
1393 0 : })
1394 0 : })
1395 0 : .await
1396 0 : }
1397 :
1398 : /// Activate the given safekeeper, ensuring that there is no TOCTOU.
1399 : /// Returns `Some` if the safekeeper has indeed been activating (or already active). Other states return `None`.
1400 0 : pub(crate) async fn activate_safekeeper(&self, id_: i64) -> Result<Option<()>, DatabaseError> {
1401 : use crate::schema::safekeepers::dsl::*;
1402 :
1403 0 : self.with_conn(move |conn| {
1404 0 : Box::pin(async move {
1405 : #[derive(Insertable, AsChangeset)]
1406 : #[diesel(table_name = crate::schema::safekeepers)]
1407 : struct UpdateSkSchedulingPolicy<'a> {
1408 : id: i64,
1409 : scheduling_policy: &'a str,
1410 : }
1411 0 : let scheduling_policy_active = String::from(SkSchedulingPolicy::Active);
1412 0 : let scheduling_policy_activating = String::from(SkSchedulingPolicy::Activating);
1413 :
1414 0 : let rows_affected = diesel::update(
1415 0 : safekeepers.filter(id.eq(id_)).filter(
1416 0 : scheduling_policy
1417 0 : .eq(scheduling_policy_activating)
1418 0 : .or(scheduling_policy.eq(&scheduling_policy_active)),
1419 0 : ),
1420 0 : )
1421 0 : .set(scheduling_policy.eq(&scheduling_policy_active))
1422 0 : .execute(conn)
1423 0 : .await?;
1424 :
1425 0 : if rows_affected == 0 {
1426 0 : return Ok(Some(()));
1427 0 : }
1428 0 : if rows_affected != 1 {
1429 0 : return Err(DatabaseError::Logical(format!(
1430 0 : "unexpected number of rows ({rows_affected})",
1431 0 : )));
1432 0 : }
1433 :
1434 0 : Ok(Some(()))
1435 0 : })
1436 0 : })
1437 0 : .await
1438 0 : }
1439 :
1440 : /// Persist timeline. Returns if the timeline was newly inserted. If it wasn't, we haven't done any writes.
1441 0 : pub(crate) async fn insert_timeline(&self, entry: TimelinePersistence) -> DatabaseResult<bool> {
1442 : use crate::schema::timelines;
1443 :
1444 0 : let entry = &entry;
1445 0 : self.with_measured_conn(DatabaseOperation::InsertTimeline, move |conn| {
1446 0 : Box::pin(async move {
1447 0 : let inserted_updated = diesel::insert_into(timelines::table)
1448 0 : .values(entry)
1449 0 : .on_conflict((timelines::tenant_id, timelines::timeline_id))
1450 0 : .do_nothing()
1451 0 : .execute(conn)
1452 0 : .await?;
1453 :
1454 0 : match inserted_updated {
1455 0 : 0 => Ok(false),
1456 0 : 1 => Ok(true),
1457 0 : _ => Err(DatabaseError::Logical(format!(
1458 0 : "unexpected number of rows ({inserted_updated})"
1459 0 : ))),
1460 : }
1461 0 : })
1462 0 : })
1463 0 : .await
1464 0 : }
1465 :
1466 : /// Update timeline membership configuration in the database.
1467 : /// Perform a compare-and-swap (CAS) operation on the timeline's generation.
1468 : /// The `new_generation` must be the next (+1) generation after the one in the database.
1469 0 : pub(crate) async fn update_timeline_membership(
1470 0 : &self,
1471 0 : tenant_id: TenantId,
1472 0 : timeline_id: TimelineId,
1473 0 : new_generation: SafekeeperGeneration,
1474 0 : sk_set: &[NodeId],
1475 0 : new_sk_set: Option<&[NodeId]>,
1476 0 : ) -> DatabaseResult<()> {
1477 : use crate::schema::timelines::dsl;
1478 :
1479 0 : let prev_generation = new_generation.previous().unwrap();
1480 :
1481 0 : let tenant_id = &tenant_id;
1482 0 : let timeline_id = &timeline_id;
1483 0 : self.with_measured_conn(DatabaseOperation::UpdateTimelineMembership, move |conn| {
1484 0 : Box::pin(async move {
1485 0 : let updated = diesel::update(dsl::timelines)
1486 0 : .filter(dsl::tenant_id.eq(&tenant_id.to_string()))
1487 0 : .filter(dsl::timeline_id.eq(&timeline_id.to_string()))
1488 0 : .filter(dsl::generation.eq(prev_generation.into_inner() as i32))
1489 0 : .set((
1490 0 : dsl::generation.eq(new_generation.into_inner() as i32),
1491 0 : dsl::sk_set.eq(sk_set.iter().map(|id| id.0 as i64).collect::<Vec<_>>()),
1492 0 : dsl::new_sk_set.eq(new_sk_set
1493 0 : .map(|set| set.iter().map(|id| id.0 as i64).collect::<Vec<_>>())),
1494 : ))
1495 0 : .execute(conn)
1496 0 : .await?;
1497 :
1498 0 : match updated {
1499 : 0 => {
1500 : // TODO(diko): It makes sense to select the current generation
1501 : // and include it in the error message for better debuggability.
1502 0 : Err(DatabaseError::Cas(
1503 0 : "Failed to update membership configuration".to_string(),
1504 0 : ))
1505 : }
1506 0 : 1 => Ok(()),
1507 0 : _ => Err(DatabaseError::Logical(format!(
1508 0 : "unexpected number of rows ({updated})"
1509 0 : ))),
1510 : }
1511 0 : })
1512 0 : })
1513 0 : .await
1514 0 : }
1515 :
1516 : /// Load timeline from db. Returns `None` if not present.
1517 0 : pub(crate) async fn get_timeline(
1518 0 : &self,
1519 0 : tenant_id: TenantId,
1520 0 : timeline_id: TimelineId,
1521 0 : ) -> DatabaseResult<Option<TimelinePersistence>> {
1522 : use crate::schema::timelines::dsl;
1523 :
1524 0 : let tenant_id = &tenant_id;
1525 0 : let timeline_id = &timeline_id;
1526 0 : let timeline_from_db = self
1527 0 : .with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
1528 0 : Box::pin(async move {
1529 0 : let mut from_db: Vec<TimelineFromDb> = dsl::timelines
1530 0 : .filter(
1531 0 : dsl::tenant_id
1532 0 : .eq(&tenant_id.to_string())
1533 0 : .and(dsl::timeline_id.eq(&timeline_id.to_string())),
1534 0 : )
1535 0 : .load(conn)
1536 0 : .await?;
1537 0 : if from_db.is_empty() {
1538 0 : return Ok(None);
1539 0 : }
1540 0 : if from_db.len() != 1 {
1541 0 : return Err(DatabaseError::Logical(format!(
1542 0 : "unexpected number of rows ({})",
1543 0 : from_db.len()
1544 0 : )));
1545 0 : }
1546 :
1547 0 : Ok(Some(from_db.pop().unwrap().into_persistence()))
1548 0 : })
1549 0 : })
1550 0 : .await?;
1551 :
1552 0 : Ok(timeline_from_db)
1553 0 : }
1554 :
1555 : /// Set `delete_at` for the given timeline
1556 0 : pub(crate) async fn timeline_set_deleted_at(
1557 0 : &self,
1558 0 : tenant_id: TenantId,
1559 0 : timeline_id: TimelineId,
1560 0 : ) -> DatabaseResult<()> {
1561 : use crate::schema::timelines;
1562 :
1563 0 : let deletion_time = chrono::Local::now().to_utc();
1564 0 : self.with_measured_conn(DatabaseOperation::InsertTimeline, move |conn| {
1565 0 : Box::pin(async move {
1566 0 : let updated = diesel::update(timelines::table)
1567 0 : .filter(timelines::tenant_id.eq(tenant_id.to_string()))
1568 0 : .filter(timelines::timeline_id.eq(timeline_id.to_string()))
1569 0 : .set(timelines::deleted_at.eq(Some(deletion_time)))
1570 0 : .execute(conn)
1571 0 : .await?;
1572 :
1573 0 : match updated {
1574 0 : 0 => Ok(()),
1575 0 : 1 => Ok(()),
1576 0 : _ => Err(DatabaseError::Logical(format!(
1577 0 : "unexpected number of rows ({updated})"
1578 0 : ))),
1579 : }
1580 0 : })
1581 0 : })
1582 0 : .await
1583 0 : }
1584 :
1585 : /// Load timeline from db. Returns `None` if not present.
1586 : ///
1587 : /// Only works if `deleted_at` is set, so you should call [`Self::timeline_set_deleted_at`] before.
1588 0 : pub(crate) async fn delete_timeline(
1589 0 : &self,
1590 0 : tenant_id: TenantId,
1591 0 : timeline_id: TimelineId,
1592 0 : ) -> DatabaseResult<()> {
1593 : use crate::schema::timelines::dsl;
1594 :
1595 0 : let tenant_id = &tenant_id;
1596 0 : let timeline_id = &timeline_id;
1597 0 : self.with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
1598 0 : Box::pin(async move {
1599 0 : diesel::delete(dsl::timelines)
1600 0 : .filter(dsl::tenant_id.eq(&tenant_id.to_string()))
1601 0 : .filter(dsl::timeline_id.eq(&timeline_id.to_string()))
1602 0 : .filter(dsl::deleted_at.is_not_null())
1603 0 : .execute(conn)
1604 0 : .await?;
1605 0 : Ok(())
1606 0 : })
1607 0 : })
1608 0 : .await?;
1609 :
1610 0 : Ok(())
1611 0 : }
1612 :
1613 : /// Loads a list of all timelines from database.
1614 0 : pub(crate) async fn list_timelines_for_tenant(
1615 0 : &self,
1616 0 : tenant_id: TenantId,
1617 0 : ) -> DatabaseResult<Vec<TimelinePersistence>> {
1618 : use crate::schema::timelines::dsl;
1619 :
1620 0 : let tenant_id = &tenant_id;
1621 0 : let timelines = self
1622 0 : .with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
1623 0 : Box::pin(async move {
1624 0 : let timelines: Vec<TimelineFromDb> = dsl::timelines
1625 0 : .filter(dsl::tenant_id.eq(&tenant_id.to_string()))
1626 0 : .load(conn)
1627 0 : .await?;
1628 0 : Ok(timelines)
1629 0 : })
1630 0 : })
1631 0 : .await?;
1632 :
1633 0 : let timelines = timelines
1634 0 : .into_iter()
1635 0 : .map(TimelineFromDb::into_persistence)
1636 0 : .collect();
1637 0 : Ok(timelines)
1638 0 : }
1639 :
1640 : /// Persist pending op. Returns if it was newly inserted. If it wasn't, we haven't done any writes.
1641 0 : pub(crate) async fn insert_pending_op(
1642 0 : &self,
1643 0 : entry: TimelinePendingOpPersistence,
1644 0 : ) -> DatabaseResult<bool> {
1645 : use crate::schema::safekeeper_timeline_pending_ops as skpo;
1646 : // This overrides the `filter` fn used in other functions, so contain the mayhem via a function-local use
1647 : use diesel::query_dsl::methods::FilterDsl;
1648 :
1649 0 : let entry = &entry;
1650 0 : self.with_measured_conn(DatabaseOperation::InsertTimelineReconcile, move |conn| {
1651 0 : Box::pin(async move {
1652 : // For simplicity it makes sense to keep only the last operation
1653 : // per (tenant, timeline, sk) tuple: if we migrated a timeline
1654 : // from node and adding it back it is not necessary to remove
1655 : // data on it. Hence, generation is not part of primary key and
1656 : // we override any rows with lower generations here.
1657 0 : let inserted_updated = diesel::insert_into(skpo::table)
1658 0 : .values(entry)
1659 0 : .on_conflict((skpo::tenant_id, skpo::timeline_id, skpo::sk_id))
1660 0 : .do_update()
1661 0 : .set(entry)
1662 0 : .filter(skpo::generation.lt(entry.generation))
1663 0 : .execute(conn)
1664 0 : .await?;
1665 :
1666 0 : match inserted_updated {
1667 0 : 0 => Ok(false),
1668 0 : 1 => Ok(true),
1669 0 : _ => Err(DatabaseError::Logical(format!(
1670 0 : "unexpected number of rows ({inserted_updated})"
1671 0 : ))),
1672 : }
1673 0 : })
1674 0 : })
1675 0 : .await
1676 0 : }
1677 : /// Remove persisted pending op.
1678 0 : pub(crate) async fn remove_pending_op(
1679 0 : &self,
1680 0 : tenant_id: TenantId,
1681 0 : timeline_id: Option<TimelineId>,
1682 0 : sk_id: NodeId,
1683 0 : generation: u32,
1684 0 : ) -> DatabaseResult<()> {
1685 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1686 :
1687 0 : let tenant_id = &tenant_id;
1688 0 : let timeline_id = &timeline_id;
1689 0 : self.with_measured_conn(DatabaseOperation::RemoveTimelineReconcile, move |conn| {
1690 0 : let timeline_id_str = timeline_id.map(|tid| tid.to_string()).unwrap_or_default();
1691 0 : Box::pin(async move {
1692 0 : diesel::delete(dsl::safekeeper_timeline_pending_ops)
1693 0 : .filter(dsl::tenant_id.eq(tenant_id.to_string()))
1694 0 : .filter(dsl::timeline_id.eq(timeline_id_str))
1695 0 : .filter(dsl::sk_id.eq(sk_id.0 as i64))
1696 0 : .filter(dsl::generation.eq(generation as i32))
1697 0 : .execute(conn)
1698 0 : .await?;
1699 0 : Ok(())
1700 0 : })
1701 0 : })
1702 0 : .await
1703 0 : }
1704 :
1705 : /// Load pending operations from db, joined together with timeline data.
1706 0 : pub(crate) async fn list_pending_ops_with_timelines(
1707 0 : &self,
1708 0 : ) -> DatabaseResult<Vec<(TimelinePendingOpPersistence, Option<TimelinePersistence>)>> {
1709 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1710 : use crate::schema::timelines;
1711 :
1712 0 : let timeline_from_db = self
1713 0 : .with_measured_conn(
1714 0 : DatabaseOperation::ListTimelineReconcileStartup,
1715 0 : move |conn| {
1716 0 : Box::pin(async move {
1717 0 : let from_db: Vec<(TimelinePendingOpPersistence, Option<TimelineFromDb>)> =
1718 0 : dsl::safekeeper_timeline_pending_ops
1719 0 : .left_join(
1720 0 : timelines::table.on(timelines::tenant_id
1721 0 : .eq(dsl::tenant_id)
1722 0 : .and(timelines::timeline_id.eq(dsl::timeline_id))),
1723 0 : )
1724 0 : .select((
1725 0 : TimelinePendingOpPersistence::as_select(),
1726 0 : Option::<TimelineFromDb>::as_select(),
1727 0 : ))
1728 0 : .load(conn)
1729 0 : .await?;
1730 0 : Ok(from_db)
1731 0 : })
1732 0 : },
1733 : )
1734 0 : .await?;
1735 :
1736 0 : Ok(timeline_from_db
1737 0 : .into_iter()
1738 0 : .map(|(op, tl_opt)| (op, tl_opt.map(|tl_opt| tl_opt.into_persistence())))
1739 0 : .collect())
1740 0 : }
1741 : /// List pending operations for a given timeline (including tenant-global ones)
1742 0 : pub(crate) async fn list_pending_ops_for_timeline(
1743 0 : &self,
1744 0 : tenant_id: TenantId,
1745 0 : timeline_id: TimelineId,
1746 0 : ) -> DatabaseResult<Vec<TimelinePendingOpPersistence>> {
1747 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1748 :
1749 0 : let timelines_from_db = self
1750 0 : .with_measured_conn(DatabaseOperation::ListTimelineReconcile, move |conn| {
1751 0 : Box::pin(async move {
1752 0 : let from_db: Vec<TimelinePendingOpPersistence> =
1753 0 : dsl::safekeeper_timeline_pending_ops
1754 0 : .filter(dsl::tenant_id.eq(tenant_id.to_string()))
1755 0 : .filter(
1756 0 : dsl::timeline_id
1757 0 : .eq(timeline_id.to_string())
1758 0 : .or(dsl::timeline_id.eq("")),
1759 0 : )
1760 0 : .load(conn)
1761 0 : .await?;
1762 0 : Ok(from_db)
1763 0 : })
1764 0 : })
1765 0 : .await?;
1766 :
1767 0 : Ok(timelines_from_db)
1768 0 : }
1769 :
1770 : /// Delete all pending ops for the given timeline.
1771 : ///
1772 : /// Use this only at timeline deletion, otherwise use generation based APIs
1773 0 : pub(crate) async fn remove_pending_ops_for_timeline(
1774 0 : &self,
1775 0 : tenant_id: TenantId,
1776 0 : timeline_id: Option<TimelineId>,
1777 0 : ) -> DatabaseResult<()> {
1778 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1779 :
1780 0 : let tenant_id = &tenant_id;
1781 0 : let timeline_id = &timeline_id;
1782 0 : self.with_measured_conn(DatabaseOperation::RemoveTimelineReconcile, move |conn| {
1783 0 : let timeline_id_str = timeline_id.map(|tid| tid.to_string()).unwrap_or_default();
1784 0 : Box::pin(async move {
1785 0 : diesel::delete(dsl::safekeeper_timeline_pending_ops)
1786 0 : .filter(dsl::tenant_id.eq(tenant_id.to_string()))
1787 0 : .filter(dsl::timeline_id.eq(timeline_id_str))
1788 0 : .execute(conn)
1789 0 : .await?;
1790 0 : Ok(())
1791 0 : })
1792 0 : })
1793 0 : .await?;
1794 :
1795 0 : Ok(())
1796 0 : }
1797 :
1798 0 : pub(crate) async fn insert_timeline_import(
1799 0 : &self,
1800 0 : import: TimelineImportPersistence,
1801 0 : ) -> DatabaseResult<bool> {
1802 0 : self.with_measured_conn(DatabaseOperation::InsertTimelineImport, move |conn| {
1803 0 : Box::pin({
1804 0 : let import = import.clone();
1805 0 : async move {
1806 0 : let inserted = diesel::insert_into(crate::schema::timeline_imports::table)
1807 0 : .values(import)
1808 0 : .execute(conn)
1809 0 : .await?;
1810 0 : Ok(inserted == 1)
1811 0 : }
1812 : })
1813 0 : })
1814 0 : .await
1815 0 : }
1816 :
1817 0 : pub(crate) async fn list_timeline_imports(&self) -> DatabaseResult<Vec<TimelineImport>> {
1818 : use crate::schema::timeline_imports::dsl;
1819 0 : let persistent = self
1820 0 : .with_measured_conn(DatabaseOperation::ListTimelineImports, move |conn| {
1821 0 : Box::pin(async move {
1822 0 : let from_db: Vec<TimelineImportPersistence> =
1823 0 : dsl::timeline_imports.load(conn).await?;
1824 0 : Ok(from_db)
1825 0 : })
1826 0 : })
1827 0 : .await?;
1828 :
1829 0 : let imports: Result<Vec<TimelineImport>, _> = persistent
1830 0 : .into_iter()
1831 0 : .map(TimelineImport::from_persistent)
1832 0 : .collect();
1833 0 : match imports {
1834 0 : Ok(ok) => Ok(ok.into_iter().collect()),
1835 0 : Err(err) => Err(DatabaseError::Logical(format!(
1836 0 : "failed to deserialize import: {err}"
1837 0 : ))),
1838 : }
1839 0 : }
1840 :
1841 0 : pub(crate) async fn get_timeline_import(
1842 0 : &self,
1843 0 : tenant_id: TenantId,
1844 0 : timeline_id: TimelineId,
1845 0 : ) -> DatabaseResult<Option<TimelineImport>> {
1846 : use crate::schema::timeline_imports::dsl;
1847 0 : let persistent_import = self
1848 0 : .with_measured_conn(DatabaseOperation::ListTimelineImports, move |conn| {
1849 0 : Box::pin(async move {
1850 0 : let mut from_db: Vec<TimelineImportPersistence> = dsl::timeline_imports
1851 0 : .filter(dsl::tenant_id.eq(tenant_id.to_string()))
1852 0 : .filter(dsl::timeline_id.eq(timeline_id.to_string()))
1853 0 : .load(conn)
1854 0 : .await?;
1855 :
1856 0 : if from_db.len() > 1 {
1857 0 : return Err(DatabaseError::Logical(format!(
1858 0 : "unexpected number of rows ({})",
1859 0 : from_db.len()
1860 0 : )));
1861 0 : }
1862 :
1863 0 : Ok(from_db.pop())
1864 0 : })
1865 0 : })
1866 0 : .await?;
1867 :
1868 0 : persistent_import
1869 0 : .map(TimelineImport::from_persistent)
1870 0 : .transpose()
1871 0 : .map_err(|err| DatabaseError::Logical(format!("failed to deserialize import: {err}")))
1872 0 : }
1873 :
1874 0 : pub(crate) async fn delete_timeline_import(
1875 0 : &self,
1876 0 : tenant_id: TenantId,
1877 0 : timeline_id: TimelineId,
1878 0 : ) -> DatabaseResult<()> {
1879 : use crate::schema::timeline_imports::dsl;
1880 :
1881 0 : self.with_measured_conn(DatabaseOperation::DeleteTimelineImport, move |conn| {
1882 0 : Box::pin(async move {
1883 0 : diesel::delete(crate::schema::timeline_imports::table)
1884 0 : .filter(
1885 0 : dsl::tenant_id
1886 0 : .eq(tenant_id.to_string())
1887 0 : .and(dsl::timeline_id.eq(timeline_id.to_string())),
1888 0 : )
1889 0 : .execute(conn)
1890 0 : .await?;
1891 :
1892 0 : Ok(())
1893 0 : })
1894 0 : })
1895 0 : .await
1896 0 : }
1897 :
1898 : /// Idempotently update the status of one shard for an ongoing timeline import
1899 : ///
1900 : /// If the update was persisted to the database, then the current state of the
1901 : /// import is returned to the caller. In case of logical errors a bespoke
1902 : /// [`TimelineImportUpdateError`] instance is returned. Other database errors
1903 : /// are covered by the outer [`DatabaseError`].
1904 0 : pub(crate) async fn update_timeline_import(
1905 0 : &self,
1906 0 : tenant_shard_id: TenantShardId,
1907 0 : timeline_id: TimelineId,
1908 0 : shard_status: ShardImportStatus,
1909 0 : ) -> DatabaseResult<Result<Option<TimelineImport>, TimelineImportUpdateError>> {
1910 : use crate::schema::timeline_imports::dsl;
1911 :
1912 0 : self.with_measured_conn(DatabaseOperation::UpdateTimelineImport, move |conn| {
1913 0 : Box::pin({
1914 0 : let shard_status = shard_status.clone();
1915 0 : async move {
1916 : // Load the current state from the database
1917 0 : let mut from_db: Vec<TimelineImportPersistence> = dsl::timeline_imports
1918 0 : .filter(
1919 0 : dsl::tenant_id
1920 0 : .eq(tenant_shard_id.tenant_id.to_string())
1921 0 : .and(dsl::timeline_id.eq(timeline_id.to_string())),
1922 0 : )
1923 0 : .load(conn)
1924 0 : .await?;
1925 :
1926 0 : assert!(from_db.len() <= 1);
1927 :
1928 0 : let mut status = match from_db.pop() {
1929 0 : Some(some) => TimelineImport::from_persistent(some).unwrap(),
1930 : None => {
1931 0 : return Ok(Err(TimelineImportUpdateError::ImportNotFound {
1932 0 : tenant_id: tenant_shard_id.tenant_id,
1933 0 : timeline_id,
1934 0 : }));
1935 : }
1936 : };
1937 :
1938 : // Perform the update in-memory
1939 0 : let follow_up = match status.update(tenant_shard_id.to_index(), shard_status) {
1940 0 : Ok(ok) => ok,
1941 0 : Err(err) => {
1942 0 : return Ok(Err(err));
1943 : }
1944 : };
1945 :
1946 0 : let new_persistent = status.to_persistent();
1947 :
1948 : // Write back if required (in the same transaction)
1949 0 : match follow_up {
1950 : TimelineImportUpdateFollowUp::Persist => {
1951 0 : let updated = diesel::update(dsl::timeline_imports)
1952 0 : .filter(
1953 0 : dsl::tenant_id
1954 0 : .eq(tenant_shard_id.tenant_id.to_string())
1955 0 : .and(dsl::timeline_id.eq(timeline_id.to_string())),
1956 0 : )
1957 0 : .set(dsl::shard_statuses.eq(new_persistent.shard_statuses))
1958 0 : .execute(conn)
1959 0 : .await?;
1960 :
1961 0 : if updated != 1 {
1962 0 : return Ok(Err(TimelineImportUpdateError::ImportNotFound {
1963 0 : tenant_id: tenant_shard_id.tenant_id,
1964 0 : timeline_id,
1965 0 : }));
1966 0 : }
1967 :
1968 0 : Ok(Ok(Some(status)))
1969 : }
1970 0 : TimelineImportUpdateFollowUp::None => Ok(Ok(None)),
1971 : }
1972 0 : }
1973 : })
1974 0 : })
1975 0 : .await
1976 0 : }
1977 :
1978 0 : pub(crate) async fn is_tenant_importing_timeline(
1979 0 : &self,
1980 0 : tenant_id: TenantId,
1981 0 : ) -> DatabaseResult<bool> {
1982 : use crate::schema::timeline_imports::dsl;
1983 0 : self.with_measured_conn(DatabaseOperation::IsTenantImportingTimeline, move |conn| {
1984 0 : Box::pin(async move {
1985 0 : let imports: i64 = dsl::timeline_imports
1986 0 : .filter(dsl::tenant_id.eq(tenant_id.to_string()))
1987 0 : .count()
1988 0 : .get_result(conn)
1989 0 : .await?;
1990 :
1991 0 : Ok(imports > 0)
1992 0 : })
1993 0 : })
1994 0 : .await
1995 0 : }
1996 : }
1997 :
1998 0 : pub(crate) fn load_certs() -> anyhow::Result<Arc<rustls::RootCertStore>> {
1999 0 : let der_certs = rustls_native_certs::load_native_certs();
2000 :
2001 0 : if !der_certs.errors.is_empty() {
2002 0 : anyhow::bail!("could not parse certificates: {:?}", der_certs.errors);
2003 0 : }
2004 :
2005 0 : let mut store = rustls::RootCertStore::empty();
2006 0 : store.add_parsable_certificates(der_certs.certs);
2007 0 : Ok(Arc::new(store))
2008 0 : }
2009 :
2010 : #[derive(Debug)]
2011 : /// A verifier that accepts all certificates (but logs an error still)
2012 : struct AcceptAll(Arc<WebPkiServerVerifier>);
2013 : impl ServerCertVerifier for AcceptAll {
2014 0 : fn verify_server_cert(
2015 0 : &self,
2016 0 : end_entity: &rustls::pki_types::CertificateDer<'_>,
2017 0 : intermediates: &[rustls::pki_types::CertificateDer<'_>],
2018 0 : server_name: &rustls::pki_types::ServerName<'_>,
2019 0 : ocsp_response: &[u8],
2020 0 : now: rustls::pki_types::UnixTime,
2021 0 : ) -> Result<ServerCertVerified, rustls::Error> {
2022 0 : let r =
2023 0 : self.0
2024 0 : .verify_server_cert(end_entity, intermediates, server_name, ocsp_response, now);
2025 0 : if let Err(err) = r {
2026 0 : tracing::info!(
2027 : ?server_name,
2028 0 : "ignoring db connection TLS validation error: {err:?}"
2029 : );
2030 0 : return Ok(ServerCertVerified::assertion());
2031 0 : }
2032 0 : r
2033 0 : }
2034 0 : fn verify_tls12_signature(
2035 0 : &self,
2036 0 : message: &[u8],
2037 0 : cert: &rustls::pki_types::CertificateDer<'_>,
2038 0 : dss: &rustls::DigitallySignedStruct,
2039 0 : ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
2040 0 : self.0.verify_tls12_signature(message, cert, dss)
2041 0 : }
2042 0 : fn verify_tls13_signature(
2043 0 : &self,
2044 0 : message: &[u8],
2045 0 : cert: &rustls::pki_types::CertificateDer<'_>,
2046 0 : dss: &rustls::DigitallySignedStruct,
2047 0 : ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
2048 0 : self.0.verify_tls13_signature(message, cert, dss)
2049 0 : }
2050 0 : fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
2051 0 : self.0.supported_verify_schemes()
2052 0 : }
2053 : }
2054 :
2055 : /// Loads the root certificates and constructs a client config suitable for connecting.
2056 : /// This function is blocking.
2057 0 : fn client_config_with_root_certs() -> anyhow::Result<rustls::ClientConfig> {
2058 0 : let client_config =
2059 0 : rustls::ClientConfig::builder_with_provider(Arc::new(ring::default_provider()))
2060 0 : .with_safe_default_protocol_versions()
2061 0 : .expect("ring should support the default protocol versions");
2062 : static DO_CERT_CHECKS: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
2063 0 : let do_cert_checks =
2064 0 : DO_CERT_CHECKS.get_or_init(|| std::env::var("STORCON_DB_CERT_CHECKS").is_ok());
2065 0 : Ok(if *do_cert_checks {
2066 0 : client_config
2067 0 : .with_root_certificates(load_certs()?)
2068 0 : .with_no_client_auth()
2069 : } else {
2070 0 : let verifier = AcceptAll(
2071 0 : WebPkiServerVerifier::builder_with_provider(
2072 0 : load_certs()?,
2073 0 : Arc::new(ring::default_provider()),
2074 : )
2075 0 : .build()?,
2076 : );
2077 0 : client_config
2078 0 : .dangerous()
2079 0 : .with_custom_certificate_verifier(Arc::new(verifier))
2080 0 : .with_no_client_auth()
2081 : })
2082 0 : }
2083 :
2084 0 : fn establish_connection_rustls(config: &str) -> BoxFuture<ConnectionResult<AsyncPgConnection>> {
2085 0 : let fut = async {
2086 : // We first set up the way we want rustls to work.
2087 0 : let rustls_config = client_config_with_root_certs()
2088 0 : .map_err(|err| ConnectionError::BadConnection(format!("{err:?}")))?;
2089 0 : let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config);
2090 0 : let (client, conn) = tokio_postgres::connect(config, tls)
2091 0 : .await
2092 0 : .map_err(|e| ConnectionError::BadConnection(e.to_string()))?;
2093 :
2094 0 : AsyncPgConnection::try_from_client_and_connection(client, conn).await
2095 0 : };
2096 0 : fut.boxed()
2097 0 : }
2098 :
2099 : #[cfg_attr(test, test)]
2100 1 : fn test_config_debug_censors_password() {
2101 1 : let has_pw =
2102 1 : "host=/var/lib/postgresql,localhost port=1234 user=specialuser password='NOT ALLOWED TAG'";
2103 1 : let has_pw_cfg = has_pw.parse::<tokio_postgres::Config>().unwrap();
2104 1 : assert!(format!("{has_pw_cfg:?}").contains("specialuser"));
2105 : // Ensure that the password is not leaked by the debug impl
2106 1 : assert!(!format!("{has_pw_cfg:?}").contains("NOT ALLOWED TAG"));
2107 1 : }
2108 :
2109 0 : fn log_postgres_connstr_info(config_str: &str) -> anyhow::Result<()> {
2110 0 : let config = config_str
2111 0 : .parse::<tokio_postgres::Config>()
2112 0 : .map_err(|_e| anyhow::anyhow!("Couldn't parse config str"))?;
2113 : // We use debug formatting here, and use a unit test to ensure that we don't leak the password.
2114 : // To make extra sure the test gets ran, run it every time the function is called
2115 : // (this is rather cold code, we can afford it).
2116 : #[cfg(not(test))]
2117 0 : test_config_debug_censors_password();
2118 0 : tracing::info!("database connection config: {config:?}");
2119 0 : Ok(())
2120 0 : }
2121 :
2122 : /// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably
2123 : #[derive(
2124 0 : QueryableByName, Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq,
2125 : )]
2126 : #[diesel(table_name = crate::schema::tenant_shards)]
2127 : pub(crate) struct TenantShardPersistence {
2128 : #[serde(default)]
2129 : pub(crate) tenant_id: String,
2130 : #[serde(default)]
2131 : pub(crate) shard_number: i32,
2132 : #[serde(default)]
2133 : pub(crate) shard_count: i32,
2134 : #[serde(default)]
2135 : pub(crate) shard_stripe_size: i32,
2136 :
2137 : // Latest generation number: next time we attach, increment this
2138 : // and use the incremented number when attaching.
2139 : //
2140 : // Generation is only None when first onboarding a tenant, where it may
2141 : // be in PlacementPolicy::Secondary and therefore have no valid generation state.
2142 : pub(crate) generation: Option<i32>,
2143 :
2144 : // Currently attached pageserver
2145 : #[serde(rename = "pageserver")]
2146 : pub(crate) generation_pageserver: Option<i64>,
2147 :
2148 : #[serde(default)]
2149 : pub(crate) placement_policy: String,
2150 : #[serde(default)]
2151 : pub(crate) splitting: SplitState,
2152 : #[serde(default)]
2153 : pub(crate) config: String,
2154 : #[serde(default)]
2155 : pub(crate) scheduling_policy: String,
2156 :
2157 : // Hint that we should attempt to schedule this tenant shard the given
2158 : // availability zone in order to minimise the chances of cross-AZ communication
2159 : // with compute.
2160 : pub(crate) preferred_az_id: Option<String>,
2161 : }
2162 :
2163 : impl TenantShardPersistence {
2164 0 : fn get_shard_count(&self) -> Result<ShardCount, ShardConfigError> {
2165 0 : self.shard_count
2166 0 : .try_into()
2167 0 : .map(ShardCount)
2168 0 : .map_err(|_| ShardConfigError::InvalidCount)
2169 0 : }
2170 :
2171 0 : fn get_shard_number(&self) -> Result<ShardNumber, ShardConfigError> {
2172 0 : self.shard_number
2173 0 : .try_into()
2174 0 : .map(ShardNumber)
2175 0 : .map_err(|_| ShardConfigError::InvalidNumber)
2176 0 : }
2177 :
2178 0 : fn get_stripe_size(&self) -> Result<ShardStripeSize, ShardConfigError> {
2179 0 : self.shard_stripe_size
2180 0 : .try_into()
2181 0 : .map(ShardStripeSize)
2182 0 : .map_err(|_| ShardConfigError::InvalidStripeSize)
2183 0 : }
2184 :
2185 0 : pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
2186 0 : if self.shard_count == 0 {
2187 : // NB: carry over the stripe size from the persisted record, to avoid consistency check
2188 : // failures if the persisted value differs from the default stripe size. The stripe size
2189 : // doesn't really matter for unsharded tenants anyway.
2190 0 : Ok(ShardIdentity::unsharded_with_stripe_size(
2191 0 : self.get_stripe_size()?,
2192 : ))
2193 : } else {
2194 0 : Ok(ShardIdentity::new(
2195 0 : self.get_shard_number()?,
2196 0 : self.get_shard_count()?,
2197 0 : self.get_stripe_size()?,
2198 0 : )?)
2199 : }
2200 0 : }
2201 :
2202 0 : pub(crate) fn get_tenant_shard_id(&self) -> anyhow::Result<TenantShardId> {
2203 : Ok(TenantShardId {
2204 0 : tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
2205 0 : shard_number: self.get_shard_number()?,
2206 0 : shard_count: self.get_shard_count()?,
2207 : })
2208 0 : }
2209 : }
2210 :
2211 : /// Parts of [`crate::node::Node`] that are stored durably
2212 0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)]
2213 : #[diesel(table_name = crate::schema::nodes)]
2214 : pub(crate) struct NodePersistence {
2215 : pub(crate) node_id: i64,
2216 : pub(crate) scheduling_policy: String,
2217 : pub(crate) listen_http_addr: String,
2218 : pub(crate) listen_http_port: i32,
2219 : pub(crate) listen_pg_addr: String,
2220 : pub(crate) listen_pg_port: i32,
2221 : pub(crate) availability_zone_id: String,
2222 : pub(crate) listen_https_port: Option<i32>,
2223 : pub(crate) lifecycle: String,
2224 : pub(crate) listen_grpc_addr: Option<String>,
2225 : pub(crate) listen_grpc_port: Option<i32>,
2226 : }
2227 :
2228 : /// Tenant metadata health status that are stored durably.
2229 0 : #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
2230 : #[diesel(table_name = crate::schema::metadata_health)]
2231 : pub(crate) struct MetadataHealthPersistence {
2232 : #[serde(default)]
2233 : pub(crate) tenant_id: String,
2234 : #[serde(default)]
2235 : pub(crate) shard_number: i32,
2236 : #[serde(default)]
2237 : pub(crate) shard_count: i32,
2238 :
2239 : pub(crate) healthy: bool,
2240 : pub(crate) last_scrubbed_at: chrono::DateTime<chrono::Utc>,
2241 : }
2242 :
2243 : impl MetadataHealthPersistence {
2244 0 : pub fn new(
2245 0 : tenant_shard_id: TenantShardId,
2246 0 : healthy: bool,
2247 0 : last_scrubbed_at: chrono::DateTime<chrono::Utc>,
2248 0 : ) -> Self {
2249 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
2250 0 : let shard_number = tenant_shard_id.shard_number.0 as i32;
2251 0 : let shard_count = tenant_shard_id.shard_count.literal() as i32;
2252 :
2253 0 : MetadataHealthPersistence {
2254 0 : tenant_id,
2255 0 : shard_number,
2256 0 : shard_count,
2257 0 : healthy,
2258 0 : last_scrubbed_at,
2259 0 : }
2260 0 : }
2261 :
2262 : #[allow(dead_code)]
2263 0 : pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
2264 : Ok(TenantShardId {
2265 0 : tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
2266 0 : shard_number: ShardNumber(self.shard_number as u8),
2267 0 : shard_count: ShardCount::new(self.shard_count as u8),
2268 : })
2269 0 : }
2270 : }
2271 :
2272 : impl From<MetadataHealthPersistence> for MetadataHealthRecord {
2273 0 : fn from(value: MetadataHealthPersistence) -> Self {
2274 0 : MetadataHealthRecord {
2275 0 : tenant_shard_id: value
2276 0 : .get_tenant_shard_id()
2277 0 : .expect("stored tenant id should be valid"),
2278 0 : healthy: value.healthy,
2279 0 : last_scrubbed_at: value.last_scrubbed_at,
2280 0 : }
2281 0 : }
2282 : }
2283 :
2284 : #[derive(
2285 0 : Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Debug, Clone,
2286 : )]
2287 : #[diesel(table_name = crate::schema::controllers)]
2288 : pub(crate) struct ControllerPersistence {
2289 : pub(crate) address: String,
2290 : pub(crate) started_at: chrono::DateTime<chrono::Utc>,
2291 : }
2292 :
2293 : // What we store in the database
2294 0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Eq, PartialEq, Debug, Clone)]
2295 : #[diesel(table_name = crate::schema::safekeepers)]
2296 : pub(crate) struct SafekeeperPersistence {
2297 : pub(crate) id: i64,
2298 : pub(crate) region_id: String,
2299 : /// 1 is special, it means just created (not currently posted to storcon).
2300 : /// Zero or negative is not really expected.
2301 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
2302 : pub(crate) version: i64,
2303 : pub(crate) host: String,
2304 : pub(crate) port: i32,
2305 : pub(crate) http_port: i32,
2306 : pub(crate) availability_zone_id: String,
2307 : pub(crate) scheduling_policy: SkSchedulingPolicyFromSql,
2308 : pub(crate) https_port: Option<i32>,
2309 : }
2310 :
2311 : /// Wrapper struct around [`SkSchedulingPolicy`] because both it and [`FromSql`] are from foreign crates,
2312 : /// and we don't want to make [`safekeeper_api`] depend on [`diesel`].
2313 0 : #[derive(Serialize, Deserialize, FromSqlRow, Eq, PartialEq, Debug, Copy, Clone)]
2314 : pub(crate) struct SkSchedulingPolicyFromSql(pub(crate) SkSchedulingPolicy);
2315 :
2316 : impl From<SkSchedulingPolicy> for SkSchedulingPolicyFromSql {
2317 0 : fn from(value: SkSchedulingPolicy) -> Self {
2318 0 : SkSchedulingPolicyFromSql(value)
2319 0 : }
2320 : }
2321 :
2322 : impl FromSql<diesel::sql_types::VarChar, Pg> for SkSchedulingPolicyFromSql {
2323 0 : fn from_sql(
2324 0 : bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
2325 0 : ) -> diesel::deserialize::Result<Self> {
2326 0 : let bytes = bytes.as_bytes();
2327 0 : match core::str::from_utf8(bytes) {
2328 0 : Ok(s) => match SkSchedulingPolicy::from_str(s) {
2329 0 : Ok(policy) => Ok(SkSchedulingPolicyFromSql(policy)),
2330 0 : Err(e) => Err(format!("can't parse: {e}").into()),
2331 : },
2332 0 : Err(e) => Err(format!("invalid UTF-8 for scheduling policy: {e}").into()),
2333 : }
2334 0 : }
2335 : }
2336 :
2337 : impl SafekeeperPersistence {
2338 0 : pub(crate) fn from_upsert(
2339 0 : upsert: SafekeeperUpsert,
2340 0 : scheduling_policy: SkSchedulingPolicy,
2341 0 : ) -> Self {
2342 0 : crate::persistence::SafekeeperPersistence {
2343 0 : id: upsert.id,
2344 0 : region_id: upsert.region_id,
2345 0 : version: upsert.version,
2346 0 : host: upsert.host,
2347 0 : port: upsert.port,
2348 0 : http_port: upsert.http_port,
2349 0 : https_port: upsert.https_port,
2350 0 : availability_zone_id: upsert.availability_zone_id,
2351 0 : scheduling_policy: SkSchedulingPolicyFromSql(scheduling_policy),
2352 0 : }
2353 0 : }
2354 0 : pub(crate) fn as_describe_response(&self) -> Result<SafekeeperDescribeResponse, DatabaseError> {
2355 0 : Ok(SafekeeperDescribeResponse {
2356 0 : id: NodeId(self.id as u64),
2357 0 : region_id: self.region_id.clone(),
2358 0 : version: self.version,
2359 0 : host: self.host.clone(),
2360 0 : port: self.port,
2361 0 : http_port: self.http_port,
2362 0 : https_port: self.https_port,
2363 0 : availability_zone_id: self.availability_zone_id.clone(),
2364 0 : scheduling_policy: self.scheduling_policy.0,
2365 0 : })
2366 0 : }
2367 : }
2368 :
2369 : /// What we expect from the upsert http api
2370 0 : #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)]
2371 : pub(crate) struct SafekeeperUpsert {
2372 : pub(crate) id: i64,
2373 : pub(crate) region_id: String,
2374 : /// 1 is special, it means just created (not currently posted to storcon).
2375 : /// Zero or negative is not really expected.
2376 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
2377 : pub(crate) version: i64,
2378 : pub(crate) host: String,
2379 : pub(crate) port: i32,
2380 : /// The active flag will not be stored in the database and will be ignored.
2381 : pub(crate) active: Option<bool>,
2382 : pub(crate) http_port: i32,
2383 : pub(crate) https_port: Option<i32>,
2384 : pub(crate) availability_zone_id: String,
2385 : }
2386 :
2387 : impl SafekeeperUpsert {
2388 0 : fn as_insert_or_update(&self) -> anyhow::Result<InsertUpdateSafekeeper<'_>> {
2389 0 : if self.version < 0 {
2390 0 : anyhow::bail!("negative version: {}", self.version);
2391 0 : }
2392 0 : Ok(InsertUpdateSafekeeper {
2393 0 : id: self.id,
2394 0 : region_id: &self.region_id,
2395 0 : version: self.version,
2396 0 : host: &self.host,
2397 0 : port: self.port,
2398 0 : http_port: self.http_port,
2399 0 : https_port: self.https_port,
2400 0 : availability_zone_id: &self.availability_zone_id,
2401 0 : // None means a wish to not update this column. We expose abilities to update it via other means.
2402 0 : scheduling_policy: None,
2403 0 : })
2404 0 : }
2405 : }
2406 :
2407 : #[derive(Insertable, AsChangeset)]
2408 : #[diesel(table_name = crate::schema::safekeepers)]
2409 : struct InsertUpdateSafekeeper<'a> {
2410 : id: i64,
2411 : region_id: &'a str,
2412 : version: i64,
2413 : host: &'a str,
2414 : port: i32,
2415 : http_port: i32,
2416 : https_port: Option<i32>,
2417 : availability_zone_id: &'a str,
2418 : scheduling_policy: Option<&'a str>,
2419 : }
2420 :
2421 0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
2422 : #[diesel(sql_type = crate::schema::sql_types::PgLsn)]
2423 : pub(crate) struct LsnWrapper(pub(crate) Lsn);
2424 :
2425 : impl From<Lsn> for LsnWrapper {
2426 0 : fn from(value: Lsn) -> Self {
2427 0 : LsnWrapper(value)
2428 0 : }
2429 : }
2430 :
2431 : impl FromSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
2432 0 : fn from_sql(
2433 0 : bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
2434 0 : ) -> diesel::deserialize::Result<Self> {
2435 0 : let byte_arr: diesel::deserialize::Result<[u8; 8]> = bytes
2436 0 : .as_bytes()
2437 0 : .try_into()
2438 0 : .map_err(|_| "Can't obtain lsn from sql".into());
2439 0 : Ok(LsnWrapper(Lsn(u64::from_be_bytes(byte_arr?))))
2440 0 : }
2441 : }
2442 :
2443 : impl ToSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
2444 0 : fn to_sql<'b>(
2445 0 : &'b self,
2446 0 : out: &mut diesel::serialize::Output<'b, '_, Pg>,
2447 0 : ) -> diesel::serialize::Result {
2448 0 : out.write_all(&u64::to_be_bytes(self.0.0))
2449 0 : .map(|_| IsNull::No)
2450 0 : .map_err(Into::into)
2451 0 : }
2452 : }
2453 :
2454 : #[derive(Insertable, AsChangeset, Clone)]
2455 : #[diesel(table_name = crate::schema::timelines)]
2456 : pub(crate) struct TimelinePersistence {
2457 : pub(crate) tenant_id: String,
2458 : pub(crate) timeline_id: String,
2459 : pub(crate) start_lsn: LsnWrapper,
2460 : pub(crate) generation: i32,
2461 : pub(crate) sk_set: Vec<i64>,
2462 : pub(crate) new_sk_set: Option<Vec<i64>>,
2463 : pub(crate) cplane_notified_generation: i32,
2464 : pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
2465 : }
2466 :
2467 : /// This is separate from [TimelinePersistence] only because postgres allows NULLs
2468 : /// in arrays and there is no way to forbid that at schema level. Hence diesel
2469 : /// wants `sk_set` to be `Vec<Option<i64>>` instead of `Vec<i64>` for
2470 : /// Queryable/Selectable. It does however allow insertions without redundant
2471 : /// Option(s), so [TimelinePersistence] doesn't have them.
2472 0 : #[derive(Queryable, Selectable)]
2473 : #[diesel(table_name = crate::schema::timelines)]
2474 : pub(crate) struct TimelineFromDb {
2475 : pub(crate) tenant_id: String,
2476 : pub(crate) timeline_id: String,
2477 : pub(crate) start_lsn: LsnWrapper,
2478 : pub(crate) generation: i32,
2479 : pub(crate) sk_set: Vec<Option<i64>>,
2480 : pub(crate) new_sk_set: Option<Vec<Option<i64>>>,
2481 : pub(crate) cplane_notified_generation: i32,
2482 : pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
2483 : }
2484 :
2485 : impl TimelineFromDb {
2486 0 : fn into_persistence(self) -> TimelinePersistence {
2487 : // We should never encounter null entries in the sets, but we need to filter them out.
2488 : // There is no way to forbid this in the schema that diesel recognizes (to our knowledge).
2489 0 : let sk_set = self.sk_set.into_iter().flatten().collect::<Vec<_>>();
2490 0 : let new_sk_set = self
2491 0 : .new_sk_set
2492 0 : .map(|s| s.into_iter().flatten().collect::<Vec<_>>());
2493 0 : TimelinePersistence {
2494 0 : tenant_id: self.tenant_id,
2495 0 : timeline_id: self.timeline_id,
2496 0 : start_lsn: self.start_lsn,
2497 0 : generation: self.generation,
2498 0 : sk_set,
2499 0 : new_sk_set,
2500 0 : cplane_notified_generation: self.cplane_notified_generation,
2501 0 : deleted_at: self.deleted_at,
2502 0 : }
2503 0 : }
2504 : }
2505 :
2506 0 : #[derive(Insertable, AsChangeset, Queryable, Selectable, Clone)]
2507 : #[diesel(table_name = crate::schema::safekeeper_timeline_pending_ops)]
2508 : pub(crate) struct TimelinePendingOpPersistence {
2509 : pub(crate) sk_id: i64,
2510 : pub(crate) tenant_id: String,
2511 : pub(crate) timeline_id: String,
2512 : pub(crate) generation: i32,
2513 : pub(crate) op_kind: SafekeeperTimelineOpKind,
2514 : }
2515 :
2516 0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
2517 : #[diesel(sql_type = diesel::sql_types::VarChar)]
2518 : pub(crate) enum SafekeeperTimelineOpKind {
2519 : Pull,
2520 : Exclude,
2521 : Delete,
2522 : }
2523 :
2524 : impl FromSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
2525 0 : fn from_sql(
2526 0 : bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
2527 0 : ) -> diesel::deserialize::Result<Self> {
2528 0 : let bytes = bytes.as_bytes();
2529 0 : match core::str::from_utf8(bytes) {
2530 0 : Ok(s) => match s {
2531 0 : "pull" => Ok(SafekeeperTimelineOpKind::Pull),
2532 0 : "exclude" => Ok(SafekeeperTimelineOpKind::Exclude),
2533 0 : "delete" => Ok(SafekeeperTimelineOpKind::Delete),
2534 0 : _ => Err(format!("can't parse: {s}").into()),
2535 : },
2536 0 : Err(e) => Err(format!("invalid UTF-8 for op_kind: {e}").into()),
2537 : }
2538 0 : }
2539 : }
2540 :
2541 : impl ToSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
2542 0 : fn to_sql<'b>(
2543 0 : &'b self,
2544 0 : out: &mut diesel::serialize::Output<'b, '_, Pg>,
2545 0 : ) -> diesel::serialize::Result {
2546 0 : let kind_str = match self {
2547 0 : SafekeeperTimelineOpKind::Pull => "pull",
2548 0 : SafekeeperTimelineOpKind::Exclude => "exclude",
2549 0 : SafekeeperTimelineOpKind::Delete => "delete",
2550 : };
2551 0 : out.write_all(kind_str.as_bytes())
2552 0 : .map(|_| IsNull::No)
2553 0 : .map_err(Into::into)
2554 0 : }
2555 : }
2556 :
2557 0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Clone)]
2558 : #[diesel(table_name = crate::schema::timeline_imports)]
2559 : pub(crate) struct TimelineImportPersistence {
2560 : pub(crate) tenant_id: String,
2561 : pub(crate) timeline_id: String,
2562 : pub(crate) shard_statuses: serde_json::Value,
2563 : }
|