Line data Source code
1 : pub(crate) mod split_state;
2 : use std::collections::HashMap;
3 : use std::io::Write;
4 : use std::str::FromStr;
5 : use std::sync::Arc;
6 : use std::time::{Duration, Instant};
7 :
8 : use diesel::deserialize::{FromSql, FromSqlRow};
9 : use diesel::expression::AsExpression;
10 : use diesel::pg::Pg;
11 : use diesel::prelude::*;
12 : use diesel::serialize::{IsNull, ToSql};
13 : use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
14 : use diesel_async::pooled_connection::bb8::Pool;
15 : use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig};
16 : use diesel_async::{AsyncPgConnection, RunQueryDsl};
17 : use diesel_migrations::{EmbeddedMigrations, embed_migrations};
18 : use futures::FutureExt;
19 : use futures::future::BoxFuture;
20 : use itertools::Itertools;
21 : use pageserver_api::controller_api::{
22 : AvailabilityZone, MetadataHealthRecord, NodeSchedulingPolicy, PlacementPolicy,
23 : SafekeeperDescribeResponse, ShardSchedulingPolicy, SkSchedulingPolicy,
24 : };
25 : use pageserver_api::models::TenantConfig;
26 : use pageserver_api::shard::{
27 : ShardConfigError, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
28 : };
29 : use rustls::client::WebPkiServerVerifier;
30 : use rustls::client::danger::{ServerCertVerified, ServerCertVerifier};
31 : use rustls::crypto::ring;
32 : use scoped_futures::ScopedBoxFuture;
33 : use serde::{Deserialize, Serialize};
34 : use utils::generation::Generation;
35 : use utils::id::{NodeId, TenantId, TimelineId};
36 : use utils::lsn::Lsn;
37 :
38 : use self::split_state::SplitState;
39 : use crate::metrics::{
40 : DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
41 : };
42 : use crate::node::Node;
43 : const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
44 :
45 : /// ## What do we store?
46 : ///
47 : /// The storage controller service does not store most of its state durably.
48 : ///
49 : /// The essential things to store durably are:
50 : /// - generation numbers, as these must always advance monotonically to ensure data safety.
51 : /// - Tenant's PlacementPolicy and TenantConfig, as the source of truth for these is something external.
52 : /// - Node's scheduling policies, as the source of truth for these is something external.
53 : ///
54 : /// Other things we store durably as an implementation detail:
55 : /// - Node's host/port: this could be avoided it we made nodes emit a self-registering heartbeat,
56 : /// but it is operationally simpler to make this service the authority for which nodes
57 : /// it talks to.
58 : ///
59 : /// ## Performance/efficiency
60 : ///
61 : /// The storage controller service does not go via the database for most things: there are
62 : /// a couple of places where we must, and where efficiency matters:
63 : /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
64 : /// before it can attach a tenant, so this acts as a bound on how fast things like
65 : /// failover can happen.
66 : /// - Pageserver re-attach: we will increment many shards' generations when this happens,
67 : /// so it is important to avoid e.g. issuing O(N) queries.
68 : ///
69 : /// Database calls relating to nodes have low performance requirements, as they are very rarely
70 : /// updated, and reads of nodes are always from memory, not the database. We only require that
71 : /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
72 : pub struct Persistence {
73 : connection_pool: Pool<AsyncPgConnection>,
74 : }
75 :
76 : /// Legacy format, for use in JSON compat objects in test environment
77 0 : #[derive(Serialize, Deserialize)]
78 : struct JsonPersistence {
79 : tenants: HashMap<TenantShardId, TenantShardPersistence>,
80 : }
81 :
82 : #[derive(thiserror::Error, Debug)]
83 : pub(crate) enum DatabaseError {
84 : #[error(transparent)]
85 : Query(#[from] diesel::result::Error),
86 : #[error(transparent)]
87 : Connection(#[from] diesel::result::ConnectionError),
88 : #[error(transparent)]
89 : ConnectionPool(#[from] diesel_async::pooled_connection::bb8::RunError),
90 : #[error("Logical error: {0}")]
91 : Logical(String),
92 : #[error("Migration error: {0}")]
93 : Migration(String),
94 : }
95 :
96 : #[derive(measured::FixedCardinalityLabel, Copy, Clone)]
97 : pub(crate) enum DatabaseOperation {
98 : InsertNode,
99 : UpdateNode,
100 : DeleteNode,
101 : ListNodes,
102 : BeginShardSplit,
103 : CompleteShardSplit,
104 : AbortShardSplit,
105 : Detach,
106 : ReAttach,
107 : IncrementGeneration,
108 : TenantGenerations,
109 : ShardGenerations,
110 : ListTenantShards,
111 : LoadTenant,
112 : InsertTenantShards,
113 : UpdateTenantShard,
114 : DeleteTenant,
115 : UpdateTenantConfig,
116 : UpdateMetadataHealth,
117 : ListMetadataHealth,
118 : ListMetadataHealthUnhealthy,
119 : ListMetadataHealthOutdated,
120 : ListSafekeepers,
121 : GetLeader,
122 : UpdateLeader,
123 : SetPreferredAzs,
124 : InsertTimeline,
125 : GetTimeline,
126 : InsertTimelineReconcile,
127 : RemoveTimelineReconcile,
128 : ListTimelineReconcile,
129 : }
130 :
131 : #[must_use]
132 : pub(crate) enum AbortShardSplitStatus {
133 : /// We aborted the split in the database by reverting to the parent shards
134 : Aborted,
135 : /// The split had already been persisted.
136 : Complete,
137 : }
138 :
139 : pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
140 :
141 : /// Some methods can operate on either a whole tenant or a single shard
142 : #[derive(Clone)]
143 : pub(crate) enum TenantFilter {
144 : Tenant(TenantId),
145 : Shard(TenantShardId),
146 : }
147 :
148 : /// Represents the results of looking up generation+pageserver for the shards of a tenant
149 : pub(crate) struct ShardGenerationState {
150 : pub(crate) tenant_shard_id: TenantShardId,
151 : pub(crate) generation: Option<Generation>,
152 : pub(crate) generation_pageserver: Option<NodeId>,
153 : }
154 :
155 : // A generous allowance for how many times we may retry serializable transactions
156 : // before giving up. This is not expected to be hit: it is a defensive measure in case we
157 : // somehow engineer a situation where duelling transactions might otherwise live-lock.
158 : const MAX_RETRIES: usize = 128;
159 :
160 : impl Persistence {
161 : // The default postgres connection limit is 100. We use up to 99, to leave one free for a human admin under
162 : // normal circumstances. This assumes we have exclusive use of the database cluster to which we connect.
163 : pub const MAX_CONNECTIONS: u32 = 99;
164 :
165 : // We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
166 : const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
167 : const MAX_CONNECTION_LIFETIME: Duration = Duration::from_secs(60);
168 :
169 0 : pub async fn new(database_url: String) -> Self {
170 0 : let mut mgr_config = ManagerConfig::default();
171 0 : mgr_config.custom_setup = Box::new(establish_connection_rustls);
172 0 :
173 0 : let manager = AsyncDieselConnectionManager::<AsyncPgConnection>::new_with_config(
174 0 : database_url,
175 0 : mgr_config,
176 0 : );
177 :
178 : // We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
179 : // to execute queries (database queries are not generally on latency-sensitive paths).
180 0 : let connection_pool = Pool::builder()
181 0 : .max_size(Self::MAX_CONNECTIONS)
182 0 : .max_lifetime(Some(Self::MAX_CONNECTION_LIFETIME))
183 0 : .idle_timeout(Some(Self::IDLE_CONNECTION_TIMEOUT))
184 0 : // Always keep at least one connection ready to go
185 0 : .min_idle(Some(1))
186 0 : .test_on_check_out(true)
187 0 : .build(manager)
188 0 : .await
189 0 : .expect("Could not build connection pool");
190 0 :
191 0 : Self { connection_pool }
192 0 : }
193 :
194 : /// A helper for use during startup, where we would like to tolerate concurrent restarts of the
195 : /// database and the storage controller, therefore the database might not be available right away
196 0 : pub async fn await_connection(
197 0 : database_url: &str,
198 0 : timeout: Duration,
199 0 : ) -> Result<(), diesel::ConnectionError> {
200 0 : let started_at = Instant::now();
201 0 : log_postgres_connstr_info(database_url)
202 0 : .map_err(|e| diesel::ConnectionError::InvalidConnectionUrl(e.to_string()))?;
203 : loop {
204 0 : match establish_connection_rustls(database_url).await {
205 : Ok(_) => {
206 0 : tracing::info!("Connected to database.");
207 0 : return Ok(());
208 : }
209 0 : Err(e) => {
210 0 : if started_at.elapsed() > timeout {
211 0 : return Err(e);
212 : } else {
213 0 : tracing::info!("Database not yet available, waiting... ({e})");
214 0 : tokio::time::sleep(Duration::from_millis(100)).await;
215 : }
216 : }
217 : }
218 : }
219 0 : }
220 :
221 : /// Execute the diesel migrations that are built into this binary
222 0 : pub(crate) async fn migration_run(&self) -> DatabaseResult<()> {
223 : use diesel_migrations::{HarnessWithOutput, MigrationHarness};
224 :
225 : // Can't use self.with_conn here as we do spawn_blocking which requires static.
226 0 : let conn = self
227 0 : .connection_pool
228 0 : .dedicated_connection()
229 0 : .await
230 0 : .map_err(|e| DatabaseError::Migration(e.to_string()))?;
231 0 : let mut async_wrapper: AsyncConnectionWrapper<AsyncPgConnection> =
232 0 : AsyncConnectionWrapper::from(conn);
233 0 : tokio::task::spawn_blocking(move || {
234 0 : let mut retry_count = 0;
235 0 : loop {
236 0 : let result = HarnessWithOutput::write_to_stdout(&mut async_wrapper)
237 0 : .run_pending_migrations(MIGRATIONS)
238 0 : .map(|_| ())
239 0 : .map_err(|e| DatabaseError::Migration(e.to_string()));
240 0 : match result {
241 0 : Ok(r) => break Ok(r),
242 : Err(
243 0 : err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
244 0 : diesel::result::DatabaseErrorKind::SerializationFailure,
245 0 : _,
246 0 : )),
247 0 : ) => {
248 0 : retry_count += 1;
249 0 : if retry_count > MAX_RETRIES {
250 0 : tracing::error!(
251 0 : "Exceeded max retries on SerializationFailure errors: {err:?}"
252 : );
253 0 : break Err(err);
254 : } else {
255 : // Retry on serialization errors: these are expected, because even though our
256 : // transactions don't fight for the same rows, they will occasionally collide
257 : // on index pages (e.g. increment_generation for unrelated shards can collide)
258 0 : tracing::debug!(
259 0 : "Retrying transaction on serialization failure {err:?}"
260 : );
261 0 : continue;
262 : }
263 : }
264 0 : Err(e) => break Err(e),
265 : }
266 : }
267 0 : })
268 0 : .await
269 0 : .map_err(|e| DatabaseError::Migration(e.to_string()))??;
270 0 : Ok(())
271 0 : }
272 :
273 : /// Wraps `with_conn` in order to collect latency and error metrics
274 0 : async fn with_measured_conn<'a, 'b, F, R>(
275 0 : &self,
276 0 : op: DatabaseOperation,
277 0 : func: F,
278 0 : ) -> DatabaseResult<R>
279 0 : where
280 0 : F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
281 0 : + Send
282 0 : + std::marker::Sync
283 0 : + 'a,
284 0 : R: Send + 'b,
285 0 : {
286 0 : let latency = &METRICS_REGISTRY
287 0 : .metrics_group
288 0 : .storage_controller_database_query_latency;
289 0 : let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op });
290 :
291 0 : let res = self.with_conn(func).await;
292 :
293 0 : if let Err(err) = &res {
294 0 : let error_counter = &METRICS_REGISTRY
295 0 : .metrics_group
296 0 : .storage_controller_database_query_error;
297 0 : error_counter.inc(DatabaseQueryErrorLabelGroup {
298 0 : error_type: err.error_label(),
299 0 : operation: op,
300 0 : })
301 0 : }
302 :
303 0 : res
304 0 : }
305 :
306 : /// Call the provided function with a Diesel database connection in a retry loop
307 0 : async fn with_conn<'a, 'b, F, R>(&self, func: F) -> DatabaseResult<R>
308 0 : where
309 0 : F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
310 0 : + Send
311 0 : + std::marker::Sync
312 0 : + 'a,
313 0 : R: Send + 'b,
314 0 : {
315 0 : let mut retry_count = 0;
316 : loop {
317 0 : let mut conn = self.connection_pool.get().await?;
318 0 : match conn
319 0 : .build_transaction()
320 0 : .serializable()
321 0 : .run(|c| func(c))
322 0 : .await
323 : {
324 0 : Ok(r) => break Ok(r),
325 : Err(
326 0 : err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
327 0 : diesel::result::DatabaseErrorKind::SerializationFailure,
328 0 : _,
329 0 : )),
330 0 : ) => {
331 0 : retry_count += 1;
332 0 : if retry_count > MAX_RETRIES {
333 0 : tracing::error!(
334 0 : "Exceeded max retries on SerializationFailure errors: {err:?}"
335 : );
336 0 : break Err(err);
337 : } else {
338 : // Retry on serialization errors: these are expected, because even though our
339 : // transactions don't fight for the same rows, they will occasionally collide
340 : // on index pages (e.g. increment_generation for unrelated shards can collide)
341 0 : tracing::debug!("Retrying transaction on serialization failure {err:?}");
342 0 : continue;
343 : }
344 : }
345 0 : Err(e) => break Err(e),
346 : }
347 : }
348 0 : }
349 :
350 : /// When a node is first registered, persist it before using it for anything
351 0 : pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
352 0 : let np = &node.to_persistent();
353 0 : self.with_measured_conn(DatabaseOperation::InsertNode, move |conn| {
354 0 : Box::pin(async move {
355 0 : diesel::insert_into(crate::schema::nodes::table)
356 0 : .values(np)
357 0 : .execute(conn)
358 0 : .await?;
359 0 : Ok(())
360 0 : })
361 0 : })
362 0 : .await
363 0 : }
364 :
365 : /// At startup, populate the list of nodes which our shards may be placed on
366 0 : pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> {
367 0 : let nodes: Vec<NodePersistence> = self
368 0 : .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
369 0 : Box::pin(async move {
370 0 : Ok(crate::schema::nodes::table
371 0 : .load::<NodePersistence>(conn)
372 0 : .await?)
373 0 : })
374 0 : })
375 0 : .await?;
376 :
377 0 : tracing::info!("list_nodes: loaded {} nodes", nodes.len());
378 :
379 0 : Ok(nodes)
380 0 : }
381 :
382 0 : pub(crate) async fn update_node<V>(
383 0 : &self,
384 0 : input_node_id: NodeId,
385 0 : values: V,
386 0 : ) -> DatabaseResult<()>
387 0 : where
388 0 : V: diesel::AsChangeset<Target = crate::schema::nodes::table> + Clone + Send + Sync,
389 0 : V::Changeset: diesel::query_builder::QueryFragment<diesel::pg::Pg> + Send, // valid Postgres SQL
390 0 : {
391 : use crate::schema::nodes::dsl::*;
392 0 : let updated = self
393 0 : .with_measured_conn(DatabaseOperation::UpdateNode, move |conn| {
394 0 : let values = values.clone();
395 0 : Box::pin(async move {
396 0 : let updated = diesel::update(nodes)
397 0 : .filter(node_id.eq(input_node_id.0 as i64))
398 0 : .set(values)
399 0 : .execute(conn)
400 0 : .await?;
401 0 : Ok(updated)
402 0 : })
403 0 : })
404 0 : .await?;
405 :
406 0 : if updated != 1 {
407 0 : Err(DatabaseError::Logical(format!(
408 0 : "Node {node_id:?} not found for update",
409 0 : )))
410 : } else {
411 0 : Ok(())
412 : }
413 0 : }
414 :
415 0 : pub(crate) async fn update_node_scheduling_policy(
416 0 : &self,
417 0 : input_node_id: NodeId,
418 0 : input_scheduling: NodeSchedulingPolicy,
419 0 : ) -> DatabaseResult<()> {
420 : use crate::schema::nodes::dsl::*;
421 0 : self.update_node(
422 0 : input_node_id,
423 0 : scheduling_policy.eq(String::from(input_scheduling)),
424 0 : )
425 0 : .await
426 0 : }
427 :
428 0 : pub(crate) async fn update_node_on_registration(
429 0 : &self,
430 0 : input_node_id: NodeId,
431 0 : input_https_port: Option<u16>,
432 0 : ) -> DatabaseResult<()> {
433 : use crate::schema::nodes::dsl::*;
434 0 : self.update_node(
435 0 : input_node_id,
436 0 : listen_https_port.eq(input_https_port.map(|x| x as i32)),
437 0 : )
438 0 : .await
439 0 : }
440 :
441 : /// At startup, load the high level state for shards, such as their config + policy. This will
442 : /// be enriched at runtime with state discovered on pageservers.
443 : ///
444 : /// We exclude shards configured to be detached. During startup, if we see any attached locations
445 : /// for such shards, they will automatically be detached as 'orphans'.
446 0 : pub(crate) async fn load_active_tenant_shards(
447 0 : &self,
448 0 : ) -> DatabaseResult<Vec<TenantShardPersistence>> {
449 : use crate::schema::tenant_shards::dsl::*;
450 0 : self.with_measured_conn(DatabaseOperation::ListTenantShards, move |conn| {
451 0 : Box::pin(async move {
452 0 : let query = tenant_shards.filter(
453 0 : placement_policy.ne(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
454 0 : );
455 0 : let result = query.load::<TenantShardPersistence>(conn).await?;
456 :
457 0 : Ok(result)
458 0 : })
459 0 : })
460 0 : .await
461 0 : }
462 :
463 : /// When restoring a previously detached tenant into memory, load it from the database
464 0 : pub(crate) async fn load_tenant(
465 0 : &self,
466 0 : filter_tenant_id: TenantId,
467 0 : ) -> DatabaseResult<Vec<TenantShardPersistence>> {
468 : use crate::schema::tenant_shards::dsl::*;
469 0 : self.with_measured_conn(DatabaseOperation::LoadTenant, move |conn| {
470 0 : Box::pin(async move {
471 0 : let query = tenant_shards.filter(tenant_id.eq(filter_tenant_id.to_string()));
472 0 : let result = query.load::<TenantShardPersistence>(conn).await?;
473 :
474 0 : Ok(result)
475 0 : })
476 0 : })
477 0 : .await
478 0 : }
479 :
480 : /// Tenants must be persisted before we schedule them for the first time. This enables us
481 : /// to correctly retain generation monotonicity, and the externally provided placement policy & config.
482 0 : pub(crate) async fn insert_tenant_shards(
483 0 : &self,
484 0 : shards: Vec<TenantShardPersistence>,
485 0 : ) -> DatabaseResult<()> {
486 : use crate::schema::{metadata_health, tenant_shards};
487 :
488 0 : let now = chrono::Utc::now();
489 0 :
490 0 : let metadata_health_records = shards
491 0 : .iter()
492 0 : .map(|t| MetadataHealthPersistence {
493 0 : tenant_id: t.tenant_id.clone(),
494 0 : shard_number: t.shard_number,
495 0 : shard_count: t.shard_count,
496 0 : healthy: true,
497 0 : last_scrubbed_at: now,
498 0 : })
499 0 : .collect::<Vec<_>>();
500 0 :
501 0 : let shards = &shards;
502 0 : let metadata_health_records = &metadata_health_records;
503 0 : self.with_measured_conn(DatabaseOperation::InsertTenantShards, move |conn| {
504 0 : Box::pin(async move {
505 0 : diesel::insert_into(tenant_shards::table)
506 0 : .values(shards)
507 0 : .execute(conn)
508 0 : .await?;
509 :
510 0 : diesel::insert_into(metadata_health::table)
511 0 : .values(metadata_health_records)
512 0 : .execute(conn)
513 0 : .await?;
514 0 : Ok(())
515 0 : })
516 0 : })
517 0 : .await
518 0 : }
519 :
520 : /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
521 : /// the tenant from memory on this server.
522 0 : pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
523 : use crate::schema::tenant_shards::dsl::*;
524 0 : self.with_measured_conn(DatabaseOperation::DeleteTenant, move |conn| {
525 0 : Box::pin(async move {
526 0 : // `metadata_health` status (if exists) is also deleted based on the cascade behavior.
527 0 : diesel::delete(tenant_shards)
528 0 : .filter(tenant_id.eq(del_tenant_id.to_string()))
529 0 : .execute(conn)
530 0 : .await?;
531 0 : Ok(())
532 0 : })
533 0 : })
534 0 : .await
535 0 : }
536 :
537 0 : pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
538 : use crate::schema::nodes::dsl::*;
539 0 : self.with_measured_conn(DatabaseOperation::DeleteNode, move |conn| {
540 0 : Box::pin(async move {
541 0 : diesel::delete(nodes)
542 0 : .filter(node_id.eq(del_node_id.0 as i64))
543 0 : .execute(conn)
544 0 : .await?;
545 :
546 0 : Ok(())
547 0 : })
548 0 : })
549 0 : .await
550 0 : }
551 :
552 : /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient
553 : /// batched increment of the generations of all tenants whose generation_pageserver is equal to
554 : /// the node that called /re-attach.
555 : #[tracing::instrument(skip_all, fields(node_id))]
556 : pub(crate) async fn re_attach(
557 : &self,
558 : input_node_id: NodeId,
559 : ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
560 : use crate::schema::nodes::dsl::{scheduling_policy, *};
561 : use crate::schema::tenant_shards::dsl::*;
562 : let updated = self
563 0 : .with_measured_conn(DatabaseOperation::ReAttach, move |conn| {
564 0 : Box::pin(async move {
565 0 : let rows_updated = diesel::update(tenant_shards)
566 0 : .filter(generation_pageserver.eq(input_node_id.0 as i64))
567 0 : .set(generation.eq(generation + 1))
568 0 : .execute(conn)
569 0 : .await?;
570 :
571 0 : tracing::info!("Incremented {} tenants' generations", rows_updated);
572 :
573 : // TODO: UPDATE+SELECT in one query
574 :
575 0 : let updated = tenant_shards
576 0 : .filter(generation_pageserver.eq(input_node_id.0 as i64))
577 0 : .select(TenantShardPersistence::as_select())
578 0 : .load(conn)
579 0 : .await?;
580 :
581 : // If the node went through a drain and restart phase before re-attaching,
582 : // then reset it's node scheduling policy to active.
583 0 : diesel::update(nodes)
584 0 : .filter(node_id.eq(input_node_id.0 as i64))
585 0 : .filter(
586 0 : scheduling_policy
587 0 : .eq(String::from(NodeSchedulingPolicy::PauseForRestart))
588 0 : .or(scheduling_policy
589 0 : .eq(String::from(NodeSchedulingPolicy::Draining)))
590 0 : .or(scheduling_policy
591 0 : .eq(String::from(NodeSchedulingPolicy::Filling))),
592 0 : )
593 0 : .set(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Active)))
594 0 : .execute(conn)
595 0 : .await?;
596 :
597 0 : Ok(updated)
598 0 : })
599 0 : })
600 : .await?;
601 :
602 : let mut result = HashMap::new();
603 : for tsp in updated {
604 : let tenant_shard_id = TenantShardId {
605 : tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
606 0 : .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
607 : shard_number: ShardNumber(tsp.shard_number as u8),
608 : shard_count: ShardCount::new(tsp.shard_count as u8),
609 : };
610 :
611 : let Some(g) = tsp.generation else {
612 : // If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
613 : // we only set generation_pageserver when setting generation.
614 : return Err(DatabaseError::Logical(
615 : "Generation should always be set after incrementing".to_string(),
616 : ));
617 : };
618 : result.insert(tenant_shard_id, Generation::new(g as u32));
619 : }
620 :
621 : Ok(result)
622 : }
623 :
624 : /// Reconciler calls this immediately before attaching to a new pageserver, to acquire a unique, monotonically
625 : /// advancing generation number. We also store the NodeId for which the generation was issued, so that in
626 : /// [`Self::re_attach`] we can do a bulk UPDATE on the generations for that node.
627 0 : pub(crate) async fn increment_generation(
628 0 : &self,
629 0 : tenant_shard_id: TenantShardId,
630 0 : node_id: NodeId,
631 0 : ) -> anyhow::Result<Generation> {
632 : use crate::schema::tenant_shards::dsl::*;
633 0 : let updated = self
634 0 : .with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| {
635 0 : Box::pin(async move {
636 0 : let updated = diesel::update(tenant_shards)
637 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
638 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
639 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
640 0 : .set((
641 0 : generation.eq(generation + 1),
642 0 : generation_pageserver.eq(node_id.0 as i64),
643 0 : ))
644 0 : // TODO: only returning() the generation column
645 0 : .returning(TenantShardPersistence::as_returning())
646 0 : .get_result(conn)
647 0 : .await?;
648 :
649 0 : Ok(updated)
650 0 : })
651 0 : })
652 0 : .await?;
653 :
654 : // Generation is always non-null in the rseult: if the generation column had been NULL, then we
655 : // should have experienced an SQL Confilict error while executing a query that tries to increment it.
656 0 : debug_assert!(updated.generation.is_some());
657 0 : let Some(g) = updated.generation else {
658 0 : return Err(DatabaseError::Logical(
659 0 : "Generation should always be set after incrementing".to_string(),
660 0 : )
661 0 : .into());
662 : };
663 :
664 0 : Ok(Generation::new(g as u32))
665 0 : }
666 :
667 : /// When we want to call out to the running shards for a tenant, e.g. during timeline CRUD operations,
668 : /// we need to know where the shard is attached, _and_ the generation, so that we can re-check the generation
669 : /// afterwards to confirm that our timeline CRUD operation is truly persistent (it must have happened in the
670 : /// latest generation)
671 : ///
672 : /// If the tenant doesn't exist, an empty vector is returned.
673 : ///
674 : /// Output is sorted by shard number
675 0 : pub(crate) async fn tenant_generations(
676 0 : &self,
677 0 : filter_tenant_id: TenantId,
678 0 : ) -> Result<Vec<ShardGenerationState>, DatabaseError> {
679 : use crate::schema::tenant_shards::dsl::*;
680 0 : let rows = self
681 0 : .with_measured_conn(DatabaseOperation::TenantGenerations, move |conn| {
682 0 : Box::pin(async move {
683 0 : let result = tenant_shards
684 0 : .filter(tenant_id.eq(filter_tenant_id.to_string()))
685 0 : .select(TenantShardPersistence::as_select())
686 0 : .order(shard_number)
687 0 : .load(conn)
688 0 : .await?;
689 0 : Ok(result)
690 0 : })
691 0 : })
692 0 : .await?;
693 :
694 0 : Ok(rows
695 0 : .into_iter()
696 0 : .map(|p| ShardGenerationState {
697 0 : tenant_shard_id: p
698 0 : .get_tenant_shard_id()
699 0 : .expect("Corrupt tenant shard id in database"),
700 0 : generation: p.generation.map(|g| Generation::new(g as u32)),
701 0 : generation_pageserver: p.generation_pageserver.map(|n| NodeId(n as u64)),
702 0 : })
703 0 : .collect())
704 0 : }
705 :
706 : /// Read the generation number of specific tenant shards
707 : ///
708 : /// Output is unsorted. Output may not include values for all inputs, if they are missing in the database.
709 0 : pub(crate) async fn shard_generations(
710 0 : &self,
711 0 : mut tenant_shard_ids: impl Iterator<Item = &TenantShardId>,
712 0 : ) -> Result<Vec<(TenantShardId, Option<Generation>)>, DatabaseError> {
713 0 : let mut rows = Vec::with_capacity(tenant_shard_ids.size_hint().0);
714 :
715 : // We will chunk our input to avoid composing arbitrarily long `IN` clauses. Typically we are
716 : // called with a single digit number of IDs, but in principle we could be called with tens
717 : // of thousands (all the shards on one pageserver) from the generation validation API.
718 0 : loop {
719 0 : // A modest hardcoded chunk size to handle typical cases in a single query but never generate particularly
720 0 : // large query strings.
721 0 : let chunk_ids = tenant_shard_ids.by_ref().take(32);
722 0 :
723 0 : // Compose a comma separated list of tuples for matching on (tenant_id, shard_number, shard_count)
724 0 : let in_clause = chunk_ids
725 0 : .map(|tsid| {
726 0 : format!(
727 0 : "('{}', {}, {})",
728 0 : tsid.tenant_id, tsid.shard_number.0, tsid.shard_count.0
729 0 : )
730 0 : })
731 0 : .join(",");
732 0 :
733 0 : // We are done when our iterator gives us nothing to filter on
734 0 : if in_clause.is_empty() {
735 0 : break;
736 0 : }
737 0 :
738 0 : let in_clause = &in_clause;
739 0 : let chunk_rows = self
740 0 : .with_measured_conn(DatabaseOperation::ShardGenerations, move |conn| {
741 0 : Box::pin(async move {
742 : // diesel doesn't support multi-column IN queries, so we compose raw SQL. No escaping is required because
743 : // the inputs are strongly typed and cannot carry any user-supplied raw string content.
744 0 : let result : Vec<TenantShardPersistence> = diesel::sql_query(
745 0 : format!("SELECT * from tenant_shards where (tenant_id, shard_number, shard_count) in ({in_clause});").as_str()
746 0 : ).load(conn).await?;
747 :
748 0 : Ok(result)
749 0 : })
750 0 : })
751 0 : .await?;
752 0 : rows.extend(chunk_rows.into_iter())
753 : }
754 :
755 0 : Ok(rows
756 0 : .into_iter()
757 0 : .map(|tsp| {
758 0 : (
759 0 : tsp.get_tenant_shard_id()
760 0 : .expect("Bad tenant ID in database"),
761 0 : tsp.generation.map(|g| Generation::new(g as u32)),
762 0 : )
763 0 : })
764 0 : .collect())
765 0 : }
766 :
767 : #[allow(non_local_definitions)]
768 : /// For use when updating a persistent property of a tenant, such as its config or placement_policy.
769 : ///
770 : /// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
771 : /// API: use [`Self::increment_generation`] instead. Setting the generation via this route is a one-time thing
772 : /// that we only do the first time a tenant is set to an attached policy via /location_config.
773 0 : pub(crate) async fn update_tenant_shard(
774 0 : &self,
775 0 : tenant: TenantFilter,
776 0 : input_placement_policy: Option<PlacementPolicy>,
777 0 : input_config: Option<TenantConfig>,
778 0 : input_generation: Option<Generation>,
779 0 : input_scheduling_policy: Option<ShardSchedulingPolicy>,
780 0 : ) -> DatabaseResult<()> {
781 : use crate::schema::tenant_shards::dsl::*;
782 :
783 0 : let tenant = &tenant;
784 0 : let input_placement_policy = &input_placement_policy;
785 0 : let input_config = &input_config;
786 0 : let input_generation = &input_generation;
787 0 : let input_scheduling_policy = &input_scheduling_policy;
788 0 : self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
789 0 : Box::pin(async move {
790 0 : let query = match tenant {
791 0 : TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards)
792 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
793 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
794 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
795 0 : .into_boxed(),
796 0 : TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
797 0 : .filter(tenant_id.eq(input_tenant_id.to_string()))
798 0 : .into_boxed(),
799 : };
800 :
801 : // Clear generation_pageserver if we are moving into a state where we won't have
802 : // any attached pageservers.
803 0 : let input_generation_pageserver = match input_placement_policy {
804 0 : None | Some(PlacementPolicy::Attached(_)) => None,
805 0 : Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) => Some(None),
806 : };
807 :
808 0 : #[derive(AsChangeset)]
809 : #[diesel(table_name = crate::schema::tenant_shards)]
810 : struct ShardUpdate {
811 : generation: Option<i32>,
812 : placement_policy: Option<String>,
813 : config: Option<String>,
814 : scheduling_policy: Option<String>,
815 : generation_pageserver: Option<Option<i64>>,
816 : }
817 :
818 0 : let update = ShardUpdate {
819 0 : generation: input_generation.map(|g| g.into().unwrap() as i32),
820 0 : placement_policy: input_placement_policy
821 0 : .as_ref()
822 0 : .map(|p| serde_json::to_string(&p).unwrap()),
823 0 : config: input_config
824 0 : .as_ref()
825 0 : .map(|c| serde_json::to_string(&c).unwrap()),
826 0 : scheduling_policy: input_scheduling_policy
827 0 : .map(|p| serde_json::to_string(&p).unwrap()),
828 0 : generation_pageserver: input_generation_pageserver,
829 0 : };
830 0 :
831 0 : query.set(update).execute(conn).await?;
832 :
833 0 : Ok(())
834 0 : })
835 0 : })
836 0 : .await?;
837 :
838 0 : Ok(())
839 0 : }
840 :
841 : /// Note that passing None for a shard clears the preferred AZ (rather than leaving it unmodified)
842 0 : pub(crate) async fn set_tenant_shard_preferred_azs(
843 0 : &self,
844 0 : preferred_azs: Vec<(TenantShardId, Option<AvailabilityZone>)>,
845 0 : ) -> DatabaseResult<Vec<(TenantShardId, Option<AvailabilityZone>)>> {
846 : use crate::schema::tenant_shards::dsl::*;
847 :
848 0 : let preferred_azs = preferred_azs.as_slice();
849 0 : self.with_measured_conn(DatabaseOperation::SetPreferredAzs, move |conn| {
850 0 : Box::pin(async move {
851 0 : let mut shards_updated = Vec::default();
852 :
853 0 : for (tenant_shard_id, preferred_az) in preferred_azs.iter() {
854 0 : let updated = diesel::update(tenant_shards)
855 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
856 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
857 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
858 0 : .set(preferred_az_id.eq(preferred_az.as_ref().map(|az| az.0.clone())))
859 0 : .execute(conn)
860 0 : .await?;
861 :
862 0 : if updated == 1 {
863 0 : shards_updated.push((*tenant_shard_id, preferred_az.clone()));
864 0 : }
865 : }
866 :
867 0 : Ok(shards_updated)
868 0 : })
869 0 : })
870 0 : .await
871 0 : }
872 :
873 0 : pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
874 : use crate::schema::tenant_shards::dsl::*;
875 0 : self.with_measured_conn(DatabaseOperation::Detach, move |conn| {
876 0 : Box::pin(async move {
877 0 : let updated = diesel::update(tenant_shards)
878 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
879 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
880 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
881 0 : .set((
882 0 : generation_pageserver.eq(Option::<i64>::None),
883 0 : placement_policy
884 0 : .eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
885 0 : ))
886 0 : .execute(conn)
887 0 : .await?;
888 :
889 0 : Ok(updated)
890 0 : })
891 0 : })
892 0 : .await?;
893 :
894 0 : Ok(())
895 0 : }
896 :
897 : // When we start shard splitting, we must durably mark the tenant so that
898 : // on restart, we know that we must go through recovery.
899 : //
900 : // We create the child shards here, so that they will be available for increment_generation calls
901 : // if some pageserver holding a child shard needs to restart before the overall tenant split is complete.
902 0 : pub(crate) async fn begin_shard_split(
903 0 : &self,
904 0 : old_shard_count: ShardCount,
905 0 : split_tenant_id: TenantId,
906 0 : parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
907 0 : ) -> DatabaseResult<()> {
908 : use crate::schema::tenant_shards::dsl::*;
909 0 : let parent_to_children = parent_to_children.as_slice();
910 0 : self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| {
911 0 : Box::pin(async move {
912 : // Mark parent shards as splitting
913 :
914 0 : let updated = diesel::update(tenant_shards)
915 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
916 0 : .filter(shard_count.eq(old_shard_count.literal() as i32))
917 0 : .set((splitting.eq(1),))
918 0 : .execute(conn).await?;
919 0 : if u8::try_from(updated)
920 0 : .map_err(|_| DatabaseError::Logical(
921 0 : format!("Overflow existing shard count {} while splitting", updated))
922 0 : )? != old_shard_count.count() {
923 : // Perhaps a deletion or another split raced with this attempt to split, mutating
924 : // the parent shards that we intend to split. In this case the split request should fail.
925 0 : return Err(DatabaseError::Logical(
926 0 : format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
927 0 : ));
928 0 : }
929 0 :
930 0 : // FIXME: spurious clone to sidestep closure move rules
931 0 : let parent_to_children = parent_to_children.to_vec();
932 :
933 : // Insert child shards
934 0 : for (parent_shard_id, children) in parent_to_children {
935 0 : let mut parent = crate::schema::tenant_shards::table
936 0 : .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
937 0 : .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
938 0 : .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
939 0 : .load::<TenantShardPersistence>(conn).await?;
940 0 : let parent = if parent.len() != 1 {
941 0 : return Err(DatabaseError::Logical(format!(
942 0 : "Parent shard {parent_shard_id} not found"
943 0 : )));
944 : } else {
945 0 : parent.pop().unwrap()
946 : };
947 0 : for mut shard in children {
948 : // Carry the parent's generation into the child
949 0 : shard.generation = parent.generation;
950 0 :
951 0 : debug_assert!(shard.splitting == SplitState::Splitting);
952 0 : diesel::insert_into(tenant_shards)
953 0 : .values(shard)
954 0 : .execute(conn).await?;
955 : }
956 : }
957 :
958 0 : Ok(())
959 0 : })
960 0 : })
961 0 : .await
962 0 : }
963 :
964 : // When we finish shard splitting, we must atomically clean up the old shards
965 : // and insert the new shards, and clear the splitting marker.
966 0 : pub(crate) async fn complete_shard_split(
967 0 : &self,
968 0 : split_tenant_id: TenantId,
969 0 : old_shard_count: ShardCount,
970 0 : new_shard_count: ShardCount,
971 0 : ) -> DatabaseResult<()> {
972 : use crate::schema::tenant_shards::dsl::*;
973 0 : self.with_measured_conn(DatabaseOperation::CompleteShardSplit, move |conn| {
974 0 : Box::pin(async move {
975 0 : // Sanity: child shards must still exist, as we're deleting parent shards
976 0 : let child_shards_query = tenant_shards
977 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
978 0 : .filter(shard_count.eq(new_shard_count.literal() as i32));
979 0 : let child_shards = child_shards_query
980 0 : .load::<TenantShardPersistence>(conn)
981 0 : .await?;
982 0 : if child_shards.len() != new_shard_count.count() as usize {
983 0 : return Err(DatabaseError::Logical(format!(
984 0 : "Unexpected child shard count {} while completing split to \
985 0 : count {new_shard_count:?} on tenant {split_tenant_id}",
986 0 : child_shards.len()
987 0 : )));
988 0 : }
989 0 :
990 0 : // Drop parent shards
991 0 : diesel::delete(tenant_shards)
992 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
993 0 : .filter(shard_count.eq(old_shard_count.literal() as i32))
994 0 : .execute(conn)
995 0 : .await?;
996 :
997 : // Clear sharding flag
998 0 : let updated = diesel::update(tenant_shards)
999 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1000 0 : .set((splitting.eq(0),))
1001 0 : .execute(conn)
1002 0 : .await?;
1003 0 : debug_assert!(updated > 0);
1004 :
1005 0 : Ok(())
1006 0 : })
1007 0 : })
1008 0 : .await
1009 0 : }
1010 :
1011 : /// Used when the remote part of a shard split failed: we will revert the database state to have only
1012 : /// the parent shards, with SplitState::Idle.
1013 0 : pub(crate) async fn abort_shard_split(
1014 0 : &self,
1015 0 : split_tenant_id: TenantId,
1016 0 : new_shard_count: ShardCount,
1017 0 : ) -> DatabaseResult<AbortShardSplitStatus> {
1018 : use crate::schema::tenant_shards::dsl::*;
1019 0 : self.with_measured_conn(DatabaseOperation::AbortShardSplit, move |conn| {
1020 0 : Box::pin(async move {
1021 : // Clear the splitting state on parent shards
1022 0 : let updated = diesel::update(tenant_shards)
1023 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1024 0 : .filter(shard_count.ne(new_shard_count.literal() as i32))
1025 0 : .set((splitting.eq(0),))
1026 0 : .execute(conn)
1027 0 : .await?;
1028 :
1029 : // Parent shards are already gone: we cannot abort.
1030 0 : if updated == 0 {
1031 0 : return Ok(AbortShardSplitStatus::Complete);
1032 0 : }
1033 0 :
1034 0 : // Sanity check: if parent shards were present, their cardinality should
1035 0 : // be less than the number of child shards.
1036 0 : if updated >= new_shard_count.count() as usize {
1037 0 : return Err(DatabaseError::Logical(format!(
1038 0 : "Unexpected parent shard count {updated} while aborting split to \
1039 0 : count {new_shard_count:?} on tenant {split_tenant_id}"
1040 0 : )));
1041 0 : }
1042 0 :
1043 0 : // Erase child shards
1044 0 : diesel::delete(tenant_shards)
1045 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1046 0 : .filter(shard_count.eq(new_shard_count.literal() as i32))
1047 0 : .execute(conn)
1048 0 : .await?;
1049 :
1050 0 : Ok(AbortShardSplitStatus::Aborted)
1051 0 : })
1052 0 : })
1053 0 : .await
1054 0 : }
1055 :
1056 : /// Stores all the latest metadata health updates durably. Updates existing entry on conflict.
1057 : ///
1058 : /// **Correctness:** `metadata_health_updates` should all belong the tenant shards managed by the storage controller.
1059 : #[allow(dead_code)]
1060 0 : pub(crate) async fn update_metadata_health_records(
1061 0 : &self,
1062 0 : healthy_records: Vec<MetadataHealthPersistence>,
1063 0 : unhealthy_records: Vec<MetadataHealthPersistence>,
1064 0 : now: chrono::DateTime<chrono::Utc>,
1065 0 : ) -> DatabaseResult<()> {
1066 : use crate::schema::metadata_health::dsl::*;
1067 :
1068 0 : let healthy_records = healthy_records.as_slice();
1069 0 : let unhealthy_records = unhealthy_records.as_slice();
1070 0 : self.with_measured_conn(DatabaseOperation::UpdateMetadataHealth, move |conn| {
1071 0 : Box::pin(async move {
1072 0 : diesel::insert_into(metadata_health)
1073 0 : .values(healthy_records)
1074 0 : .on_conflict((tenant_id, shard_number, shard_count))
1075 0 : .do_update()
1076 0 : .set((healthy.eq(true), last_scrubbed_at.eq(now)))
1077 0 : .execute(conn)
1078 0 : .await?;
1079 :
1080 0 : diesel::insert_into(metadata_health)
1081 0 : .values(unhealthy_records)
1082 0 : .on_conflict((tenant_id, shard_number, shard_count))
1083 0 : .do_update()
1084 0 : .set((healthy.eq(false), last_scrubbed_at.eq(now)))
1085 0 : .execute(conn)
1086 0 : .await?;
1087 0 : Ok(())
1088 0 : })
1089 0 : })
1090 0 : .await
1091 0 : }
1092 :
1093 : /// Lists all the metadata health records.
1094 : #[allow(dead_code)]
1095 0 : pub(crate) async fn list_metadata_health_records(
1096 0 : &self,
1097 0 : ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
1098 0 : self.with_measured_conn(DatabaseOperation::ListMetadataHealth, move |conn| {
1099 0 : Box::pin(async {
1100 0 : Ok(crate::schema::metadata_health::table
1101 0 : .load::<MetadataHealthPersistence>(conn)
1102 0 : .await?)
1103 0 : })
1104 0 : })
1105 0 : .await
1106 0 : }
1107 :
1108 : /// Lists all the metadata health records that is unhealthy.
1109 : #[allow(dead_code)]
1110 0 : pub(crate) async fn list_unhealthy_metadata_health_records(
1111 0 : &self,
1112 0 : ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
1113 : use crate::schema::metadata_health::dsl::*;
1114 0 : self.with_measured_conn(
1115 0 : DatabaseOperation::ListMetadataHealthUnhealthy,
1116 0 : move |conn| {
1117 0 : Box::pin(async {
1118 0 : DatabaseResult::Ok(
1119 0 : crate::schema::metadata_health::table
1120 0 : .filter(healthy.eq(false))
1121 0 : .load::<MetadataHealthPersistence>(conn)
1122 0 : .await?,
1123 : )
1124 0 : })
1125 0 : },
1126 0 : )
1127 0 : .await
1128 0 : }
1129 :
1130 : /// Lists all the metadata health records that have not been updated since an `earlier` time.
1131 : #[allow(dead_code)]
1132 0 : pub(crate) async fn list_outdated_metadata_health_records(
1133 0 : &self,
1134 0 : earlier: chrono::DateTime<chrono::Utc>,
1135 0 : ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
1136 : use crate::schema::metadata_health::dsl::*;
1137 :
1138 0 : self.with_measured_conn(DatabaseOperation::ListMetadataHealthOutdated, move |conn| {
1139 0 : Box::pin(async move {
1140 0 : let query = metadata_health.filter(last_scrubbed_at.lt(earlier));
1141 0 : let res = query.load::<MetadataHealthPersistence>(conn).await?;
1142 :
1143 0 : Ok(res)
1144 0 : })
1145 0 : })
1146 0 : .await
1147 0 : }
1148 :
1149 : /// Get the current entry from the `leader` table if one exists.
1150 : /// It is an error for the table to contain more than one entry.
1151 0 : pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
1152 0 : let mut leader: Vec<ControllerPersistence> = self
1153 0 : .with_measured_conn(DatabaseOperation::GetLeader, move |conn| {
1154 0 : Box::pin(async move {
1155 0 : Ok(crate::schema::controllers::table
1156 0 : .load::<ControllerPersistence>(conn)
1157 0 : .await?)
1158 0 : })
1159 0 : })
1160 0 : .await?;
1161 :
1162 0 : if leader.len() > 1 {
1163 0 : return Err(DatabaseError::Logical(format!(
1164 0 : "More than one entry present in the leader table: {leader:?}"
1165 0 : )));
1166 0 : }
1167 0 :
1168 0 : Ok(leader.pop())
1169 0 : }
1170 :
1171 : /// Update the new leader with compare-exchange semantics. If `prev` does not
1172 : /// match the current leader entry, then the update is treated as a failure.
1173 : /// When `prev` is not specified, the update is forced.
1174 0 : pub(crate) async fn update_leader(
1175 0 : &self,
1176 0 : prev: Option<ControllerPersistence>,
1177 0 : new: ControllerPersistence,
1178 0 : ) -> DatabaseResult<()> {
1179 : use crate::schema::controllers::dsl::*;
1180 :
1181 0 : let updated = self
1182 0 : .with_measured_conn(DatabaseOperation::UpdateLeader, move |conn| {
1183 0 : let prev = prev.clone();
1184 0 : let new = new.clone();
1185 0 : Box::pin(async move {
1186 0 : let updated = match &prev {
1187 0 : Some(prev) => {
1188 0 : diesel::update(controllers)
1189 0 : .filter(address.eq(prev.address.clone()))
1190 0 : .filter(started_at.eq(prev.started_at))
1191 0 : .set((
1192 0 : address.eq(new.address.clone()),
1193 0 : started_at.eq(new.started_at),
1194 0 : ))
1195 0 : .execute(conn)
1196 0 : .await?
1197 : }
1198 : None => {
1199 0 : diesel::insert_into(controllers)
1200 0 : .values(new.clone())
1201 0 : .execute(conn)
1202 0 : .await?
1203 : }
1204 : };
1205 :
1206 0 : Ok(updated)
1207 0 : })
1208 0 : })
1209 0 : .await?;
1210 :
1211 0 : if updated == 0 {
1212 0 : return Err(DatabaseError::Logical(
1213 0 : "Leader table update failed".to_string(),
1214 0 : ));
1215 0 : }
1216 0 :
1217 0 : Ok(())
1218 0 : }
1219 :
1220 : /// At startup, populate the list of nodes which our shards may be placed on
1221 0 : pub(crate) async fn list_safekeepers(&self) -> DatabaseResult<Vec<SafekeeperPersistence>> {
1222 0 : let safekeepers: Vec<SafekeeperPersistence> = self
1223 0 : .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
1224 0 : Box::pin(async move {
1225 0 : Ok(crate::schema::safekeepers::table
1226 0 : .load::<SafekeeperPersistence>(conn)
1227 0 : .await?)
1228 0 : })
1229 0 : })
1230 0 : .await?;
1231 :
1232 0 : tracing::info!("list_safekeepers: loaded {} nodes", safekeepers.len());
1233 :
1234 0 : Ok(safekeepers)
1235 0 : }
1236 :
1237 0 : pub(crate) async fn safekeeper_upsert(
1238 0 : &self,
1239 0 : record: SafekeeperUpsert,
1240 0 : ) -> Result<(), DatabaseError> {
1241 : use crate::schema::safekeepers::dsl::*;
1242 :
1243 0 : self.with_conn(move |conn| {
1244 0 : let record = record.clone();
1245 0 : Box::pin(async move {
1246 0 : let bind = record
1247 0 : .as_insert_or_update()
1248 0 : .map_err(|e| DatabaseError::Logical(format!("{e}")))?;
1249 :
1250 0 : let inserted_updated = diesel::insert_into(safekeepers)
1251 0 : .values(&bind)
1252 0 : .on_conflict(id)
1253 0 : .do_update()
1254 0 : .set(&bind)
1255 0 : .execute(conn)
1256 0 : .await?;
1257 :
1258 0 : if inserted_updated != 1 {
1259 0 : return Err(DatabaseError::Logical(format!(
1260 0 : "unexpected number of rows ({})",
1261 0 : inserted_updated
1262 0 : )));
1263 0 : }
1264 0 :
1265 0 : Ok(())
1266 0 : })
1267 0 : })
1268 0 : .await
1269 0 : }
1270 :
1271 0 : pub(crate) async fn set_safekeeper_scheduling_policy(
1272 0 : &self,
1273 0 : id_: i64,
1274 0 : scheduling_policy_: SkSchedulingPolicy,
1275 0 : ) -> Result<(), DatabaseError> {
1276 : use crate::schema::safekeepers::dsl::*;
1277 :
1278 0 : self.with_conn(move |conn| {
1279 0 : Box::pin(async move {
1280 0 : #[derive(Insertable, AsChangeset)]
1281 : #[diesel(table_name = crate::schema::safekeepers)]
1282 : struct UpdateSkSchedulingPolicy<'a> {
1283 : id: i64,
1284 : scheduling_policy: &'a str,
1285 : }
1286 0 : let scheduling_policy_ = String::from(scheduling_policy_);
1287 :
1288 0 : let rows_affected = diesel::update(safekeepers.filter(id.eq(id_)))
1289 0 : .set(scheduling_policy.eq(scheduling_policy_))
1290 0 : .execute(conn)
1291 0 : .await?;
1292 :
1293 0 : if rows_affected != 1 {
1294 0 : return Err(DatabaseError::Logical(format!(
1295 0 : "unexpected number of rows ({rows_affected})",
1296 0 : )));
1297 0 : }
1298 0 :
1299 0 : Ok(())
1300 0 : })
1301 0 : })
1302 0 : .await
1303 0 : }
1304 :
1305 : /// Persist timeline. Returns if the timeline was newly inserted. If it wasn't, we haven't done any writes.
1306 0 : pub(crate) async fn insert_timeline(&self, entry: TimelinePersistence) -> DatabaseResult<bool> {
1307 : use crate::schema::timelines;
1308 :
1309 0 : let entry = &entry;
1310 0 : self.with_measured_conn(DatabaseOperation::InsertTimeline, move |conn| {
1311 0 : Box::pin(async move {
1312 0 : let inserted_updated = diesel::insert_into(timelines::table)
1313 0 : .values(entry)
1314 0 : .on_conflict((timelines::tenant_id, timelines::timeline_id))
1315 0 : .do_nothing()
1316 0 : .execute(conn)
1317 0 : .await?;
1318 :
1319 0 : match inserted_updated {
1320 0 : 0 => Ok(false),
1321 0 : 1 => Ok(true),
1322 0 : _ => Err(DatabaseError::Logical(format!(
1323 0 : "unexpected number of rows ({})",
1324 0 : inserted_updated
1325 0 : ))),
1326 : }
1327 0 : })
1328 0 : })
1329 0 : .await
1330 0 : }
1331 :
1332 : /// Load timeline from db. Returns `None` if not present.
1333 0 : pub(crate) async fn get_timeline(
1334 0 : &self,
1335 0 : tenant_id: TenantId,
1336 0 : timeline_id: TimelineId,
1337 0 : ) -> DatabaseResult<Option<TimelinePersistence>> {
1338 : use crate::schema::timelines::dsl;
1339 :
1340 0 : let tenant_id = &tenant_id;
1341 0 : let timeline_id = &timeline_id;
1342 0 : let timeline_from_db = self
1343 0 : .with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
1344 0 : Box::pin(async move {
1345 0 : let mut from_db: Vec<TimelineFromDb> = dsl::timelines
1346 0 : .filter(
1347 0 : dsl::tenant_id
1348 0 : .eq(&tenant_id.to_string())
1349 0 : .and(dsl::timeline_id.eq(&timeline_id.to_string())),
1350 0 : )
1351 0 : .load(conn)
1352 0 : .await?;
1353 0 : if from_db.is_empty() {
1354 0 : return Ok(None);
1355 0 : }
1356 0 : if from_db.len() != 1 {
1357 0 : return Err(DatabaseError::Logical(format!(
1358 0 : "unexpected number of rows ({})",
1359 0 : from_db.len()
1360 0 : )));
1361 0 : }
1362 0 :
1363 0 : Ok(Some(from_db.pop().unwrap().into_persistence()))
1364 0 : })
1365 0 : })
1366 0 : .await?;
1367 :
1368 0 : Ok(timeline_from_db)
1369 0 : }
1370 : /// Persist pending op. Returns if it was newly inserted. If it wasn't, we haven't done any writes.
1371 0 : pub(crate) async fn insert_pending_op(
1372 0 : &self,
1373 0 : entry: TimelinePendingOpPersistence,
1374 0 : ) -> DatabaseResult<bool> {
1375 : use crate::schema::safekeeper_timeline_pending_ops as skpo;
1376 : // This overrides the `filter` fn used in other functions, so contain the mayhem via a function-local use
1377 : use diesel::query_dsl::methods::FilterDsl;
1378 :
1379 0 : let entry = &entry;
1380 0 : self.with_measured_conn(DatabaseOperation::InsertTimelineReconcile, move |conn| {
1381 0 : Box::pin(async move {
1382 : // For simplicity it makes sense to keep only the last operation
1383 : // per (tenant, timeline, sk) tuple: if we migrated a timeline
1384 : // from node and adding it back it is not necessary to remove
1385 : // data on it. Hence, generation is not part of primary key and
1386 : // we override any rows with lower generations here.
1387 0 : let inserted_updated = diesel::insert_into(skpo::table)
1388 0 : .values(entry)
1389 0 : .on_conflict((skpo::tenant_id, skpo::timeline_id, skpo::sk_id))
1390 0 : .do_update()
1391 0 : .set(entry)
1392 0 : .filter(skpo::generation.lt(entry.generation))
1393 0 : .execute(conn)
1394 0 : .await?;
1395 :
1396 0 : match inserted_updated {
1397 0 : 0 => Ok(false),
1398 0 : 1 => Ok(true),
1399 0 : _ => Err(DatabaseError::Logical(format!(
1400 0 : "unexpected number of rows ({})",
1401 0 : inserted_updated
1402 0 : ))),
1403 : }
1404 0 : })
1405 0 : })
1406 0 : .await
1407 0 : }
1408 : /// Remove persisted pending op.
1409 0 : pub(crate) async fn remove_pending_op(
1410 0 : &self,
1411 0 : tenant_id: TenantId,
1412 0 : timeline_id: TimelineId,
1413 0 : sk_id: NodeId,
1414 0 : generation: u32,
1415 0 : ) -> DatabaseResult<()> {
1416 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1417 :
1418 0 : let tenant_id = &tenant_id;
1419 0 : let timeline_id = &timeline_id;
1420 0 : self.with_measured_conn(DatabaseOperation::RemoveTimelineReconcile, move |conn| {
1421 0 : Box::pin(async move {
1422 0 : diesel::delete(dsl::safekeeper_timeline_pending_ops)
1423 0 : .filter(dsl::tenant_id.eq(tenant_id.to_string()))
1424 0 : .filter(dsl::timeline_id.eq(timeline_id.to_string()))
1425 0 : .filter(dsl::sk_id.eq(sk_id.0 as i64))
1426 0 : .filter(dsl::generation.eq(generation as i32))
1427 0 : .execute(conn)
1428 0 : .await?;
1429 0 : Ok(())
1430 0 : })
1431 0 : })
1432 0 : .await
1433 0 : }
1434 :
1435 : /// Load pending operations from db.
1436 0 : pub(crate) async fn list_pending_ops(
1437 0 : &self,
1438 0 : filter_for_sk: Option<NodeId>,
1439 0 : ) -> DatabaseResult<Vec<TimelinePendingOpPersistence>> {
1440 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1441 :
1442 : const FILTER_VAL_1: i64 = 1;
1443 : const FILTER_VAL_2: i64 = 2;
1444 0 : let filter_opt = filter_for_sk.map(|id| id.0 as i64);
1445 0 : let timeline_from_db = self
1446 0 : .with_measured_conn(DatabaseOperation::ListTimelineReconcile, move |conn| {
1447 0 : Box::pin(async move {
1448 0 : let from_db: Vec<TimelinePendingOpPersistence> =
1449 0 : dsl::safekeeper_timeline_pending_ops
1450 0 : .filter(
1451 0 : dsl::sk_id
1452 0 : .eq(filter_opt.unwrap_or(FILTER_VAL_1))
1453 0 : .and(dsl::sk_id.eq(filter_opt.unwrap_or(FILTER_VAL_2))),
1454 0 : )
1455 0 : .load(conn)
1456 0 : .await?;
1457 0 : Ok(from_db)
1458 0 : })
1459 0 : })
1460 0 : .await?;
1461 :
1462 0 : Ok(timeline_from_db)
1463 0 : }
1464 : }
1465 :
1466 0 : pub(crate) fn load_certs() -> anyhow::Result<Arc<rustls::RootCertStore>> {
1467 0 : let der_certs = rustls_native_certs::load_native_certs();
1468 0 :
1469 0 : if !der_certs.errors.is_empty() {
1470 0 : anyhow::bail!("could not parse certificates: {:?}", der_certs.errors);
1471 0 : }
1472 0 :
1473 0 : let mut store = rustls::RootCertStore::empty();
1474 0 : store.add_parsable_certificates(der_certs.certs);
1475 0 : Ok(Arc::new(store))
1476 0 : }
1477 :
1478 : #[derive(Debug)]
1479 : /// A verifier that accepts all certificates (but logs an error still)
1480 : struct AcceptAll(Arc<WebPkiServerVerifier>);
1481 : impl ServerCertVerifier for AcceptAll {
1482 0 : fn verify_server_cert(
1483 0 : &self,
1484 0 : end_entity: &rustls::pki_types::CertificateDer<'_>,
1485 0 : intermediates: &[rustls::pki_types::CertificateDer<'_>],
1486 0 : server_name: &rustls::pki_types::ServerName<'_>,
1487 0 : ocsp_response: &[u8],
1488 0 : now: rustls::pki_types::UnixTime,
1489 0 : ) -> Result<ServerCertVerified, rustls::Error> {
1490 0 : let r =
1491 0 : self.0
1492 0 : .verify_server_cert(end_entity, intermediates, server_name, ocsp_response, now);
1493 0 : if let Err(err) = r {
1494 0 : tracing::info!(
1495 : ?server_name,
1496 0 : "ignoring db connection TLS validation error: {err:?}"
1497 : );
1498 0 : return Ok(ServerCertVerified::assertion());
1499 0 : }
1500 0 : r
1501 0 : }
1502 0 : fn verify_tls12_signature(
1503 0 : &self,
1504 0 : message: &[u8],
1505 0 : cert: &rustls::pki_types::CertificateDer<'_>,
1506 0 : dss: &rustls::DigitallySignedStruct,
1507 0 : ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
1508 0 : self.0.verify_tls12_signature(message, cert, dss)
1509 0 : }
1510 0 : fn verify_tls13_signature(
1511 0 : &self,
1512 0 : message: &[u8],
1513 0 : cert: &rustls::pki_types::CertificateDer<'_>,
1514 0 : dss: &rustls::DigitallySignedStruct,
1515 0 : ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
1516 0 : self.0.verify_tls13_signature(message, cert, dss)
1517 0 : }
1518 0 : fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
1519 0 : self.0.supported_verify_schemes()
1520 0 : }
1521 : }
1522 :
1523 : /// Loads the root certificates and constructs a client config suitable for connecting.
1524 : /// This function is blocking.
1525 0 : fn client_config_with_root_certs() -> anyhow::Result<rustls::ClientConfig> {
1526 0 : let client_config =
1527 0 : rustls::ClientConfig::builder_with_provider(Arc::new(ring::default_provider()))
1528 0 : .with_safe_default_protocol_versions()
1529 0 : .expect("ring should support the default protocol versions");
1530 : static DO_CERT_CHECKS: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
1531 0 : let do_cert_checks =
1532 0 : DO_CERT_CHECKS.get_or_init(|| std::env::var("STORCON_DB_CERT_CHECKS").is_ok());
1533 0 : Ok(if *do_cert_checks {
1534 0 : client_config
1535 0 : .with_root_certificates(load_certs()?)
1536 0 : .with_no_client_auth()
1537 : } else {
1538 0 : let verifier = AcceptAll(
1539 : WebPkiServerVerifier::builder_with_provider(
1540 0 : load_certs()?,
1541 0 : Arc::new(ring::default_provider()),
1542 0 : )
1543 0 : .build()?,
1544 : );
1545 0 : client_config
1546 0 : .dangerous()
1547 0 : .with_custom_certificate_verifier(Arc::new(verifier))
1548 0 : .with_no_client_auth()
1549 : })
1550 0 : }
1551 :
1552 0 : fn establish_connection_rustls(config: &str) -> BoxFuture<ConnectionResult<AsyncPgConnection>> {
1553 0 : let fut = async {
1554 : // We first set up the way we want rustls to work.
1555 0 : let rustls_config = client_config_with_root_certs()
1556 0 : .map_err(|err| ConnectionError::BadConnection(format!("{err:?}")))?;
1557 0 : let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config);
1558 0 : let (client, conn) = tokio_postgres::connect(config, tls)
1559 0 : .await
1560 0 : .map_err(|e| ConnectionError::BadConnection(e.to_string()))?;
1561 :
1562 0 : AsyncPgConnection::try_from_client_and_connection(client, conn).await
1563 0 : };
1564 0 : fut.boxed()
1565 0 : }
1566 :
1567 : #[cfg_attr(test, test)]
1568 1 : fn test_config_debug_censors_password() {
1569 1 : let has_pw =
1570 1 : "host=/var/lib/postgresql,localhost port=1234 user=specialuser password='NOT ALLOWED TAG'";
1571 1 : let has_pw_cfg = has_pw.parse::<tokio_postgres::Config>().unwrap();
1572 1 : assert!(format!("{has_pw_cfg:?}").contains("specialuser"));
1573 : // Ensure that the password is not leaked by the debug impl
1574 1 : assert!(!format!("{has_pw_cfg:?}").contains("NOT ALLOWED TAG"));
1575 1 : }
1576 :
1577 0 : fn log_postgres_connstr_info(config_str: &str) -> anyhow::Result<()> {
1578 0 : let config = config_str
1579 0 : .parse::<tokio_postgres::Config>()
1580 0 : .map_err(|_e| anyhow::anyhow!("Couldn't parse config str"))?;
1581 : // We use debug formatting here, and use a unit test to ensure that we don't leak the password.
1582 : // To make extra sure the test gets ran, run it every time the function is called
1583 : // (this is rather cold code, we can afford it).
1584 : #[cfg(not(test))]
1585 0 : test_config_debug_censors_password();
1586 0 : tracing::info!("database connection config: {config:?}");
1587 0 : Ok(())
1588 0 : }
1589 :
1590 : /// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably
1591 : #[derive(
1592 0 : QueryableByName, Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq,
1593 : )]
1594 : #[diesel(table_name = crate::schema::tenant_shards)]
1595 : pub(crate) struct TenantShardPersistence {
1596 : #[serde(default)]
1597 : pub(crate) tenant_id: String,
1598 : #[serde(default)]
1599 : pub(crate) shard_number: i32,
1600 : #[serde(default)]
1601 : pub(crate) shard_count: i32,
1602 : #[serde(default)]
1603 : pub(crate) shard_stripe_size: i32,
1604 :
1605 : // Latest generation number: next time we attach, increment this
1606 : // and use the incremented number when attaching.
1607 : //
1608 : // Generation is only None when first onboarding a tenant, where it may
1609 : // be in PlacementPolicy::Secondary and therefore have no valid generation state.
1610 : pub(crate) generation: Option<i32>,
1611 :
1612 : // Currently attached pageserver
1613 : #[serde(rename = "pageserver")]
1614 : pub(crate) generation_pageserver: Option<i64>,
1615 :
1616 : #[serde(default)]
1617 : pub(crate) placement_policy: String,
1618 : #[serde(default)]
1619 : pub(crate) splitting: SplitState,
1620 : #[serde(default)]
1621 : pub(crate) config: String,
1622 : #[serde(default)]
1623 : pub(crate) scheduling_policy: String,
1624 :
1625 : // Hint that we should attempt to schedule this tenant shard the given
1626 : // availability zone in order to minimise the chances of cross-AZ communication
1627 : // with compute.
1628 : pub(crate) preferred_az_id: Option<String>,
1629 : }
1630 :
1631 : impl TenantShardPersistence {
1632 0 : fn get_shard_count(&self) -> Result<ShardCount, ShardConfigError> {
1633 0 : self.shard_count
1634 0 : .try_into()
1635 0 : .map(ShardCount)
1636 0 : .map_err(|_| ShardConfigError::InvalidCount)
1637 0 : }
1638 :
1639 0 : fn get_shard_number(&self) -> Result<ShardNumber, ShardConfigError> {
1640 0 : self.shard_number
1641 0 : .try_into()
1642 0 : .map(ShardNumber)
1643 0 : .map_err(|_| ShardConfigError::InvalidNumber)
1644 0 : }
1645 :
1646 0 : fn get_stripe_size(&self) -> Result<ShardStripeSize, ShardConfigError> {
1647 0 : self.shard_stripe_size
1648 0 : .try_into()
1649 0 : .map(ShardStripeSize)
1650 0 : .map_err(|_| ShardConfigError::InvalidStripeSize)
1651 0 : }
1652 :
1653 0 : pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
1654 0 : if self.shard_count == 0 {
1655 : // NB: carry over the stripe size from the persisted record, to avoid consistency check
1656 : // failures if the persisted value differs from the default stripe size. The stripe size
1657 : // doesn't really matter for unsharded tenants anyway.
1658 : Ok(ShardIdentity::unsharded_with_stripe_size(
1659 0 : self.get_stripe_size()?,
1660 : ))
1661 : } else {
1662 : Ok(ShardIdentity::new(
1663 0 : self.get_shard_number()?,
1664 0 : self.get_shard_count()?,
1665 0 : self.get_stripe_size()?,
1666 0 : )?)
1667 : }
1668 0 : }
1669 :
1670 0 : pub(crate) fn get_tenant_shard_id(&self) -> anyhow::Result<TenantShardId> {
1671 0 : Ok(TenantShardId {
1672 0 : tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
1673 0 : shard_number: self.get_shard_number()?,
1674 0 : shard_count: self.get_shard_count()?,
1675 : })
1676 0 : }
1677 : }
1678 :
1679 : /// Parts of [`crate::node::Node`] that are stored durably
1680 0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)]
1681 : #[diesel(table_name = crate::schema::nodes)]
1682 : pub(crate) struct NodePersistence {
1683 : pub(crate) node_id: i64,
1684 : pub(crate) scheduling_policy: String,
1685 : pub(crate) listen_http_addr: String,
1686 : pub(crate) listen_http_port: i32,
1687 : pub(crate) listen_pg_addr: String,
1688 : pub(crate) listen_pg_port: i32,
1689 : pub(crate) availability_zone_id: String,
1690 : pub(crate) listen_https_port: Option<i32>,
1691 : }
1692 :
1693 : /// Tenant metadata health status that are stored durably.
1694 0 : #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
1695 : #[diesel(table_name = crate::schema::metadata_health)]
1696 : pub(crate) struct MetadataHealthPersistence {
1697 : #[serde(default)]
1698 : pub(crate) tenant_id: String,
1699 : #[serde(default)]
1700 : pub(crate) shard_number: i32,
1701 : #[serde(default)]
1702 : pub(crate) shard_count: i32,
1703 :
1704 : pub(crate) healthy: bool,
1705 : pub(crate) last_scrubbed_at: chrono::DateTime<chrono::Utc>,
1706 : }
1707 :
1708 : impl MetadataHealthPersistence {
1709 0 : pub fn new(
1710 0 : tenant_shard_id: TenantShardId,
1711 0 : healthy: bool,
1712 0 : last_scrubbed_at: chrono::DateTime<chrono::Utc>,
1713 0 : ) -> Self {
1714 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
1715 0 : let shard_number = tenant_shard_id.shard_number.0 as i32;
1716 0 : let shard_count = tenant_shard_id.shard_count.literal() as i32;
1717 0 :
1718 0 : MetadataHealthPersistence {
1719 0 : tenant_id,
1720 0 : shard_number,
1721 0 : shard_count,
1722 0 : healthy,
1723 0 : last_scrubbed_at,
1724 0 : }
1725 0 : }
1726 :
1727 : #[allow(dead_code)]
1728 0 : pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
1729 0 : Ok(TenantShardId {
1730 0 : tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
1731 0 : shard_number: ShardNumber(self.shard_number as u8),
1732 0 : shard_count: ShardCount::new(self.shard_count as u8),
1733 : })
1734 0 : }
1735 : }
1736 :
1737 : impl From<MetadataHealthPersistence> for MetadataHealthRecord {
1738 0 : fn from(value: MetadataHealthPersistence) -> Self {
1739 0 : MetadataHealthRecord {
1740 0 : tenant_shard_id: value
1741 0 : .get_tenant_shard_id()
1742 0 : .expect("stored tenant id should be valid"),
1743 0 : healthy: value.healthy,
1744 0 : last_scrubbed_at: value.last_scrubbed_at,
1745 0 : }
1746 0 : }
1747 : }
1748 :
1749 : #[derive(
1750 0 : Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Debug, Clone,
1751 : )]
1752 : #[diesel(table_name = crate::schema::controllers)]
1753 : pub(crate) struct ControllerPersistence {
1754 : pub(crate) address: String,
1755 : pub(crate) started_at: chrono::DateTime<chrono::Utc>,
1756 : }
1757 :
1758 : // What we store in the database
1759 0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Eq, PartialEq, Debug, Clone)]
1760 : #[diesel(table_name = crate::schema::safekeepers)]
1761 : pub(crate) struct SafekeeperPersistence {
1762 : pub(crate) id: i64,
1763 : pub(crate) region_id: String,
1764 : /// 1 is special, it means just created (not currently posted to storcon).
1765 : /// Zero or negative is not really expected.
1766 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
1767 : pub(crate) version: i64,
1768 : pub(crate) host: String,
1769 : pub(crate) port: i32,
1770 : pub(crate) http_port: i32,
1771 : pub(crate) availability_zone_id: String,
1772 : pub(crate) scheduling_policy: SkSchedulingPolicyFromSql,
1773 : pub(crate) https_port: Option<i32>,
1774 : }
1775 :
1776 : /// Wrapper struct around [`SkSchedulingPolicy`] because both it and [`FromSql`] are from foreign crates,
1777 : /// and we don't want to make [`safekeeper_api`] depend on [`diesel`].
1778 0 : #[derive(Serialize, Deserialize, FromSqlRow, Eq, PartialEq, Debug, Copy, Clone)]
1779 : pub(crate) struct SkSchedulingPolicyFromSql(pub(crate) SkSchedulingPolicy);
1780 :
1781 : impl From<SkSchedulingPolicy> for SkSchedulingPolicyFromSql {
1782 0 : fn from(value: SkSchedulingPolicy) -> Self {
1783 0 : SkSchedulingPolicyFromSql(value)
1784 0 : }
1785 : }
1786 :
1787 : impl FromSql<diesel::sql_types::VarChar, Pg> for SkSchedulingPolicyFromSql {
1788 0 : fn from_sql(
1789 0 : bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
1790 0 : ) -> diesel::deserialize::Result<Self> {
1791 0 : let bytes = bytes.as_bytes();
1792 0 : match core::str::from_utf8(bytes) {
1793 0 : Ok(s) => match SkSchedulingPolicy::from_str(s) {
1794 0 : Ok(policy) => Ok(SkSchedulingPolicyFromSql(policy)),
1795 0 : Err(e) => Err(format!("can't parse: {e}").into()),
1796 : },
1797 0 : Err(e) => Err(format!("invalid UTF-8 for scheduling policy: {e}").into()),
1798 : }
1799 0 : }
1800 : }
1801 :
1802 : impl SafekeeperPersistence {
1803 0 : pub(crate) fn from_upsert(
1804 0 : upsert: SafekeeperUpsert,
1805 0 : scheduling_policy: SkSchedulingPolicy,
1806 0 : ) -> Self {
1807 0 : crate::persistence::SafekeeperPersistence {
1808 0 : id: upsert.id,
1809 0 : region_id: upsert.region_id,
1810 0 : version: upsert.version,
1811 0 : host: upsert.host,
1812 0 : port: upsert.port,
1813 0 : http_port: upsert.http_port,
1814 0 : https_port: upsert.https_port,
1815 0 : availability_zone_id: upsert.availability_zone_id,
1816 0 : scheduling_policy: SkSchedulingPolicyFromSql(scheduling_policy),
1817 0 : }
1818 0 : }
1819 0 : pub(crate) fn as_describe_response(&self) -> Result<SafekeeperDescribeResponse, DatabaseError> {
1820 0 : Ok(SafekeeperDescribeResponse {
1821 0 : id: NodeId(self.id as u64),
1822 0 : region_id: self.region_id.clone(),
1823 0 : version: self.version,
1824 0 : host: self.host.clone(),
1825 0 : port: self.port,
1826 0 : http_port: self.http_port,
1827 0 : https_port: self.https_port,
1828 0 : availability_zone_id: self.availability_zone_id.clone(),
1829 0 : scheduling_policy: self.scheduling_policy.0,
1830 0 : })
1831 0 : }
1832 : }
1833 :
1834 : /// What we expect from the upsert http api
1835 0 : #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)]
1836 : pub(crate) struct SafekeeperUpsert {
1837 : pub(crate) id: i64,
1838 : pub(crate) region_id: String,
1839 : /// 1 is special, it means just created (not currently posted to storcon).
1840 : /// Zero or negative is not really expected.
1841 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
1842 : pub(crate) version: i64,
1843 : pub(crate) host: String,
1844 : pub(crate) port: i32,
1845 : /// The active flag will not be stored in the database and will be ignored.
1846 : pub(crate) active: Option<bool>,
1847 : pub(crate) http_port: i32,
1848 : pub(crate) https_port: Option<i32>,
1849 : pub(crate) availability_zone_id: String,
1850 : }
1851 :
1852 : impl SafekeeperUpsert {
1853 0 : fn as_insert_or_update(&self) -> anyhow::Result<InsertUpdateSafekeeper<'_>> {
1854 0 : if self.version < 0 {
1855 0 : anyhow::bail!("negative version: {}", self.version);
1856 0 : }
1857 0 : Ok(InsertUpdateSafekeeper {
1858 0 : id: self.id,
1859 0 : region_id: &self.region_id,
1860 0 : version: self.version,
1861 0 : host: &self.host,
1862 0 : port: self.port,
1863 0 : http_port: self.http_port,
1864 0 : https_port: self.https_port,
1865 0 : availability_zone_id: &self.availability_zone_id,
1866 0 : // None means a wish to not update this column. We expose abilities to update it via other means.
1867 0 : scheduling_policy: None,
1868 0 : })
1869 0 : }
1870 : }
1871 :
1872 0 : #[derive(Insertable, AsChangeset)]
1873 : #[diesel(table_name = crate::schema::safekeepers)]
1874 : struct InsertUpdateSafekeeper<'a> {
1875 : id: i64,
1876 : region_id: &'a str,
1877 : version: i64,
1878 : host: &'a str,
1879 : port: i32,
1880 : http_port: i32,
1881 : https_port: Option<i32>,
1882 : availability_zone_id: &'a str,
1883 : scheduling_policy: Option<&'a str>,
1884 : }
1885 :
1886 0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
1887 : #[diesel(sql_type = crate::schema::sql_types::PgLsn)]
1888 : pub(crate) struct LsnWrapper(pub(crate) Lsn);
1889 :
1890 : impl From<Lsn> for LsnWrapper {
1891 0 : fn from(value: Lsn) -> Self {
1892 0 : LsnWrapper(value)
1893 0 : }
1894 : }
1895 :
1896 : impl FromSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
1897 0 : fn from_sql(
1898 0 : bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
1899 0 : ) -> diesel::deserialize::Result<Self> {
1900 0 : let byte_arr: diesel::deserialize::Result<[u8; 8]> = bytes
1901 0 : .as_bytes()
1902 0 : .try_into()
1903 0 : .map_err(|_| "Can't obtain lsn from sql".into());
1904 0 : Ok(LsnWrapper(Lsn(u64::from_be_bytes(byte_arr?))))
1905 0 : }
1906 : }
1907 :
1908 : impl ToSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
1909 0 : fn to_sql<'b>(
1910 0 : &'b self,
1911 0 : out: &mut diesel::serialize::Output<'b, '_, Pg>,
1912 0 : ) -> diesel::serialize::Result {
1913 0 : out.write_all(&u64::to_be_bytes(self.0.0))
1914 0 : .map(|_| IsNull::No)
1915 0 : .map_err(Into::into)
1916 0 : }
1917 : }
1918 :
1919 0 : #[derive(Insertable, AsChangeset, Queryable, Selectable, Clone)]
1920 : #[diesel(table_name = crate::schema::timelines)]
1921 : pub(crate) struct TimelinePersistence {
1922 : pub(crate) tenant_id: String,
1923 : pub(crate) timeline_id: String,
1924 : pub(crate) start_lsn: LsnWrapper,
1925 : pub(crate) generation: i32,
1926 : pub(crate) sk_set: Vec<i64>,
1927 : pub(crate) new_sk_set: Option<Vec<i64>>,
1928 : pub(crate) cplane_notified_generation: i32,
1929 : pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
1930 : }
1931 :
1932 : /// This is separate from [TimelinePersistence] only because postgres allows NULLs
1933 : /// in arrays and there is no way to forbid that at schema level. Hence diesel
1934 : /// wants `sk_set` to be `Vec<Option<i64>>` instead of `Vec<i64>` for
1935 : /// Queryable/Selectable. It does however allow insertions without redundant
1936 : /// Option(s), so [TimelinePersistence] doesn't have them.
1937 0 : #[derive(Queryable, Selectable)]
1938 : #[diesel(table_name = crate::schema::timelines)]
1939 : pub(crate) struct TimelineFromDb {
1940 : pub(crate) tenant_id: String,
1941 : pub(crate) timeline_id: String,
1942 : pub(crate) start_lsn: LsnWrapper,
1943 : pub(crate) generation: i32,
1944 : pub(crate) sk_set: Vec<Option<i64>>,
1945 : pub(crate) new_sk_set: Option<Vec<Option<i64>>>,
1946 : pub(crate) cplane_notified_generation: i32,
1947 : pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
1948 : }
1949 :
1950 : impl TimelineFromDb {
1951 0 : fn into_persistence(self) -> TimelinePersistence {
1952 0 : // We should never encounter null entries in the sets, but we need to filter them out.
1953 0 : // There is no way to forbid this in the schema that diesel recognizes (to our knowledge).
1954 0 : let sk_set = self.sk_set.into_iter().flatten().collect::<Vec<_>>();
1955 0 : let new_sk_set = self
1956 0 : .new_sk_set
1957 0 : .map(|s| s.into_iter().flatten().collect::<Vec<_>>());
1958 0 : TimelinePersistence {
1959 0 : tenant_id: self.tenant_id,
1960 0 : timeline_id: self.timeline_id,
1961 0 : start_lsn: self.start_lsn,
1962 0 : generation: self.generation,
1963 0 : sk_set,
1964 0 : new_sk_set,
1965 0 : cplane_notified_generation: self.cplane_notified_generation,
1966 0 : deleted_at: self.deleted_at,
1967 0 : }
1968 0 : }
1969 : }
1970 :
1971 0 : #[derive(Insertable, AsChangeset, Queryable, Selectable, Clone)]
1972 : #[diesel(table_name = crate::schema::safekeeper_timeline_pending_ops)]
1973 : pub(crate) struct TimelinePendingOpPersistence {
1974 : pub(crate) sk_id: i64,
1975 : pub(crate) tenant_id: String,
1976 : pub(crate) timeline_id: String,
1977 : pub(crate) generation: i32,
1978 : pub(crate) op_kind: SafekeeperTimelineOpKind,
1979 : }
1980 :
1981 0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
1982 : #[diesel(sql_type = diesel::sql_types::VarChar)]
1983 : pub(crate) enum SafekeeperTimelineOpKind {
1984 : Pull,
1985 : Exclude,
1986 : Delete,
1987 : }
1988 :
1989 : impl FromSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
1990 0 : fn from_sql(
1991 0 : bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
1992 0 : ) -> diesel::deserialize::Result<Self> {
1993 0 : let bytes = bytes.as_bytes();
1994 0 : match core::str::from_utf8(bytes) {
1995 0 : Ok(s) => match s {
1996 0 : "pull" => Ok(SafekeeperTimelineOpKind::Pull),
1997 0 : "exclude" => Ok(SafekeeperTimelineOpKind::Exclude),
1998 0 : "delete" => Ok(SafekeeperTimelineOpKind::Delete),
1999 0 : _ => Err(format!("can't parse: {s}").into()),
2000 : },
2001 0 : Err(e) => Err(format!("invalid UTF-8 for op_kind: {e}").into()),
2002 : }
2003 0 : }
2004 : }
2005 :
2006 : impl ToSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
2007 0 : fn to_sql<'b>(
2008 0 : &'b self,
2009 0 : out: &mut diesel::serialize::Output<'b, '_, Pg>,
2010 0 : ) -> diesel::serialize::Result {
2011 0 : let kind_str = match self {
2012 0 : SafekeeperTimelineOpKind::Pull => "pull",
2013 0 : SafekeeperTimelineOpKind::Exclude => "exclude",
2014 0 : SafekeeperTimelineOpKind::Delete => "delete",
2015 : };
2016 0 : out.write_all(kind_str.as_bytes())
2017 0 : .map(|_| IsNull::No)
2018 0 : .map_err(Into::into)
2019 0 : }
2020 : }
|