Line data Source code
1 : pub(crate) mod split_state;
2 : use std::collections::HashMap;
3 : use std::io::Write;
4 : use std::str::FromStr;
5 : use std::sync::Arc;
6 : use std::time::{Duration, Instant};
7 :
8 : use diesel::deserialize::{FromSql, FromSqlRow};
9 : use diesel::expression::AsExpression;
10 : use diesel::pg::Pg;
11 : use diesel::prelude::*;
12 : use diesel::serialize::{IsNull, ToSql};
13 : use diesel_async::async_connection_wrapper::AsyncConnectionWrapper;
14 : use diesel_async::pooled_connection::bb8::Pool;
15 : use diesel_async::pooled_connection::{AsyncDieselConnectionManager, ManagerConfig};
16 : use diesel_async::{AsyncPgConnection, RunQueryDsl};
17 : use diesel_migrations::{EmbeddedMigrations, embed_migrations};
18 : use futures::FutureExt;
19 : use futures::future::BoxFuture;
20 : use itertools::Itertools;
21 : use pageserver_api::controller_api::{
22 : AvailabilityZone, MetadataHealthRecord, NodeSchedulingPolicy, PlacementPolicy,
23 : SafekeeperDescribeResponse, ShardSchedulingPolicy, SkSchedulingPolicy,
24 : };
25 : use pageserver_api::models::TenantConfig;
26 : use pageserver_api::shard::{
27 : ShardConfigError, ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
28 : };
29 : use rustls::client::WebPkiServerVerifier;
30 : use rustls::client::danger::{ServerCertVerified, ServerCertVerifier};
31 : use rustls::crypto::ring;
32 : use scoped_futures::ScopedBoxFuture;
33 : use serde::{Deserialize, Serialize};
34 : use utils::generation::Generation;
35 : use utils::id::{NodeId, TenantId, TimelineId};
36 : use utils::lsn::Lsn;
37 :
38 : use self::split_state::SplitState;
39 : use crate::metrics::{
40 : DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
41 : };
42 : use crate::node::Node;
43 : const MIGRATIONS: EmbeddedMigrations = embed_migrations!("./migrations");
44 :
45 : /// ## What do we store?
46 : ///
47 : /// The storage controller service does not store most of its state durably.
48 : ///
49 : /// The essential things to store durably are:
50 : /// - generation numbers, as these must always advance monotonically to ensure data safety.
51 : /// - Tenant's PlacementPolicy and TenantConfig, as the source of truth for these is something external.
52 : /// - Node's scheduling policies, as the source of truth for these is something external.
53 : ///
54 : /// Other things we store durably as an implementation detail:
55 : /// - Node's host/port: this could be avoided it we made nodes emit a self-registering heartbeat,
56 : /// but it is operationally simpler to make this service the authority for which nodes
57 : /// it talks to.
58 : ///
59 : /// ## Performance/efficiency
60 : ///
61 : /// The storage controller service does not go via the database for most things: there are
62 : /// a couple of places where we must, and where efficiency matters:
63 : /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
64 : /// before it can attach a tenant, so this acts as a bound on how fast things like
65 : /// failover can happen.
66 : /// - Pageserver re-attach: we will increment many shards' generations when this happens,
67 : /// so it is important to avoid e.g. issuing O(N) queries.
68 : ///
69 : /// Database calls relating to nodes have low performance requirements, as they are very rarely
70 : /// updated, and reads of nodes are always from memory, not the database. We only require that
71 : /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
72 : pub struct Persistence {
73 : connection_pool: Pool<AsyncPgConnection>,
74 : }
75 :
76 : /// Legacy format, for use in JSON compat objects in test environment
77 0 : #[derive(Serialize, Deserialize)]
78 : struct JsonPersistence {
79 : tenants: HashMap<TenantShardId, TenantShardPersistence>,
80 : }
81 :
82 : #[derive(thiserror::Error, Debug)]
83 : pub(crate) enum DatabaseError {
84 : #[error(transparent)]
85 : Query(#[from] diesel::result::Error),
86 : #[error(transparent)]
87 : Connection(#[from] diesel::result::ConnectionError),
88 : #[error(transparent)]
89 : ConnectionPool(#[from] diesel_async::pooled_connection::bb8::RunError),
90 : #[error("Logical error: {0}")]
91 : Logical(String),
92 : #[error("Migration error: {0}")]
93 : Migration(String),
94 : }
95 :
96 : #[derive(measured::FixedCardinalityLabel, Copy, Clone)]
97 : pub(crate) enum DatabaseOperation {
98 : InsertNode,
99 : UpdateNode,
100 : DeleteNode,
101 : ListNodes,
102 : BeginShardSplit,
103 : CompleteShardSplit,
104 : AbortShardSplit,
105 : Detach,
106 : ReAttach,
107 : IncrementGeneration,
108 : TenantGenerations,
109 : ShardGenerations,
110 : ListTenantShards,
111 : LoadTenant,
112 : InsertTenantShards,
113 : UpdateTenantShard,
114 : DeleteTenant,
115 : UpdateTenantConfig,
116 : UpdateMetadataHealth,
117 : ListMetadataHealth,
118 : ListMetadataHealthUnhealthy,
119 : ListMetadataHealthOutdated,
120 : ListSafekeepers,
121 : GetLeader,
122 : UpdateLeader,
123 : SetPreferredAzs,
124 : InsertTimeline,
125 : GetTimeline,
126 : InsertTimelineReconcile,
127 : RemoveTimelineReconcile,
128 : ListTimelineReconcile,
129 : ListTimelineReconcileStartup,
130 : }
131 :
132 : #[must_use]
133 : pub(crate) enum AbortShardSplitStatus {
134 : /// We aborted the split in the database by reverting to the parent shards
135 : Aborted,
136 : /// The split had already been persisted.
137 : Complete,
138 : }
139 :
140 : pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
141 :
142 : /// Some methods can operate on either a whole tenant or a single shard
143 : #[derive(Clone)]
144 : pub(crate) enum TenantFilter {
145 : Tenant(TenantId),
146 : Shard(TenantShardId),
147 : }
148 :
149 : /// Represents the results of looking up generation+pageserver for the shards of a tenant
150 : pub(crate) struct ShardGenerationState {
151 : pub(crate) tenant_shard_id: TenantShardId,
152 : pub(crate) generation: Option<Generation>,
153 : pub(crate) generation_pageserver: Option<NodeId>,
154 : }
155 :
156 : // A generous allowance for how many times we may retry serializable transactions
157 : // before giving up. This is not expected to be hit: it is a defensive measure in case we
158 : // somehow engineer a situation where duelling transactions might otherwise live-lock.
159 : const MAX_RETRIES: usize = 128;
160 :
161 : impl Persistence {
162 : // The default postgres connection limit is 100. We use up to 99, to leave one free for a human admin under
163 : // normal circumstances. This assumes we have exclusive use of the database cluster to which we connect.
164 : pub const MAX_CONNECTIONS: u32 = 99;
165 :
166 : // We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
167 : const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
168 : const MAX_CONNECTION_LIFETIME: Duration = Duration::from_secs(60);
169 :
170 0 : pub async fn new(database_url: String) -> Self {
171 0 : let mut mgr_config = ManagerConfig::default();
172 0 : mgr_config.custom_setup = Box::new(establish_connection_rustls);
173 0 :
174 0 : let manager = AsyncDieselConnectionManager::<AsyncPgConnection>::new_with_config(
175 0 : database_url,
176 0 : mgr_config,
177 0 : );
178 :
179 : // We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
180 : // to execute queries (database queries are not generally on latency-sensitive paths).
181 0 : let connection_pool = Pool::builder()
182 0 : .max_size(Self::MAX_CONNECTIONS)
183 0 : .max_lifetime(Some(Self::MAX_CONNECTION_LIFETIME))
184 0 : .idle_timeout(Some(Self::IDLE_CONNECTION_TIMEOUT))
185 0 : // Always keep at least one connection ready to go
186 0 : .min_idle(Some(1))
187 0 : .test_on_check_out(true)
188 0 : .build(manager)
189 0 : .await
190 0 : .expect("Could not build connection pool");
191 0 :
192 0 : Self { connection_pool }
193 0 : }
194 :
195 : /// A helper for use during startup, where we would like to tolerate concurrent restarts of the
196 : /// database and the storage controller, therefore the database might not be available right away
197 0 : pub async fn await_connection(
198 0 : database_url: &str,
199 0 : timeout: Duration,
200 0 : ) -> Result<(), diesel::ConnectionError> {
201 0 : let started_at = Instant::now();
202 0 : log_postgres_connstr_info(database_url)
203 0 : .map_err(|e| diesel::ConnectionError::InvalidConnectionUrl(e.to_string()))?;
204 : loop {
205 0 : match establish_connection_rustls(database_url).await {
206 : Ok(_) => {
207 0 : tracing::info!("Connected to database.");
208 0 : return Ok(());
209 : }
210 0 : Err(e) => {
211 0 : if started_at.elapsed() > timeout {
212 0 : return Err(e);
213 : } else {
214 0 : tracing::info!("Database not yet available, waiting... ({e})");
215 0 : tokio::time::sleep(Duration::from_millis(100)).await;
216 : }
217 : }
218 : }
219 : }
220 0 : }
221 :
222 : /// Execute the diesel migrations that are built into this binary
223 0 : pub(crate) async fn migration_run(&self) -> DatabaseResult<()> {
224 : use diesel_migrations::{HarnessWithOutput, MigrationHarness};
225 :
226 : // Can't use self.with_conn here as we do spawn_blocking which requires static.
227 0 : let conn = self
228 0 : .connection_pool
229 0 : .dedicated_connection()
230 0 : .await
231 0 : .map_err(|e| DatabaseError::Migration(e.to_string()))?;
232 0 : let mut async_wrapper: AsyncConnectionWrapper<AsyncPgConnection> =
233 0 : AsyncConnectionWrapper::from(conn);
234 0 : tokio::task::spawn_blocking(move || {
235 0 : let mut retry_count = 0;
236 0 : loop {
237 0 : let result = HarnessWithOutput::write_to_stdout(&mut async_wrapper)
238 0 : .run_pending_migrations(MIGRATIONS)
239 0 : .map(|_| ())
240 0 : .map_err(|e| DatabaseError::Migration(e.to_string()));
241 0 : match result {
242 0 : Ok(r) => break Ok(r),
243 : Err(
244 0 : err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
245 0 : diesel::result::DatabaseErrorKind::SerializationFailure,
246 0 : _,
247 0 : )),
248 0 : ) => {
249 0 : retry_count += 1;
250 0 : if retry_count > MAX_RETRIES {
251 0 : tracing::error!(
252 0 : "Exceeded max retries on SerializationFailure errors: {err:?}"
253 : );
254 0 : break Err(err);
255 : } else {
256 : // Retry on serialization errors: these are expected, because even though our
257 : // transactions don't fight for the same rows, they will occasionally collide
258 : // on index pages (e.g. increment_generation for unrelated shards can collide)
259 0 : tracing::debug!(
260 0 : "Retrying transaction on serialization failure {err:?}"
261 : );
262 0 : continue;
263 : }
264 : }
265 0 : Err(e) => break Err(e),
266 : }
267 : }
268 0 : })
269 0 : .await
270 0 : .map_err(|e| DatabaseError::Migration(e.to_string()))??;
271 0 : Ok(())
272 0 : }
273 :
274 : /// Wraps `with_conn` in order to collect latency and error metrics
275 0 : async fn with_measured_conn<'a, 'b, F, R>(
276 0 : &self,
277 0 : op: DatabaseOperation,
278 0 : func: F,
279 0 : ) -> DatabaseResult<R>
280 0 : where
281 0 : F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
282 0 : + Send
283 0 : + std::marker::Sync
284 0 : + 'a,
285 0 : R: Send + 'b,
286 0 : {
287 0 : let latency = &METRICS_REGISTRY
288 0 : .metrics_group
289 0 : .storage_controller_database_query_latency;
290 0 : let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op });
291 :
292 0 : let res = self.with_conn(func).await;
293 :
294 0 : if let Err(err) = &res {
295 0 : let error_counter = &METRICS_REGISTRY
296 0 : .metrics_group
297 0 : .storage_controller_database_query_error;
298 0 : error_counter.inc(DatabaseQueryErrorLabelGroup {
299 0 : error_type: err.error_label(),
300 0 : operation: op,
301 0 : })
302 0 : }
303 :
304 0 : res
305 0 : }
306 :
307 : /// Call the provided function with a Diesel database connection in a retry loop
308 0 : async fn with_conn<'a, 'b, F, R>(&self, func: F) -> DatabaseResult<R>
309 0 : where
310 0 : F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'b, 'r, DatabaseResult<R>>
311 0 : + Send
312 0 : + std::marker::Sync
313 0 : + 'a,
314 0 : R: Send + 'b,
315 0 : {
316 0 : let mut retry_count = 0;
317 : loop {
318 0 : let mut conn = self.connection_pool.get().await?;
319 0 : match conn
320 0 : .build_transaction()
321 0 : .serializable()
322 0 : .run(|c| func(c))
323 0 : .await
324 : {
325 0 : Ok(r) => break Ok(r),
326 : Err(
327 0 : err @ DatabaseError::Query(diesel::result::Error::DatabaseError(
328 0 : diesel::result::DatabaseErrorKind::SerializationFailure,
329 0 : _,
330 0 : )),
331 0 : ) => {
332 0 : retry_count += 1;
333 0 : if retry_count > MAX_RETRIES {
334 0 : tracing::error!(
335 0 : "Exceeded max retries on SerializationFailure errors: {err:?}"
336 : );
337 0 : break Err(err);
338 : } else {
339 : // Retry on serialization errors: these are expected, because even though our
340 : // transactions don't fight for the same rows, they will occasionally collide
341 : // on index pages (e.g. increment_generation for unrelated shards can collide)
342 0 : tracing::debug!("Retrying transaction on serialization failure {err:?}");
343 0 : continue;
344 : }
345 : }
346 0 : Err(e) => break Err(e),
347 : }
348 : }
349 0 : }
350 :
351 : /// When a node is first registered, persist it before using it for anything
352 0 : pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
353 0 : let np = &node.to_persistent();
354 0 : self.with_measured_conn(DatabaseOperation::InsertNode, move |conn| {
355 0 : Box::pin(async move {
356 0 : diesel::insert_into(crate::schema::nodes::table)
357 0 : .values(np)
358 0 : .execute(conn)
359 0 : .await?;
360 0 : Ok(())
361 0 : })
362 0 : })
363 0 : .await
364 0 : }
365 :
366 : /// At startup, populate the list of nodes which our shards may be placed on
367 0 : pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> {
368 0 : let nodes: Vec<NodePersistence> = self
369 0 : .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
370 0 : Box::pin(async move {
371 0 : Ok(crate::schema::nodes::table
372 0 : .load::<NodePersistence>(conn)
373 0 : .await?)
374 0 : })
375 0 : })
376 0 : .await?;
377 :
378 0 : tracing::info!("list_nodes: loaded {} nodes", nodes.len());
379 :
380 0 : Ok(nodes)
381 0 : }
382 :
383 0 : pub(crate) async fn update_node<V>(
384 0 : &self,
385 0 : input_node_id: NodeId,
386 0 : values: V,
387 0 : ) -> DatabaseResult<()>
388 0 : where
389 0 : V: diesel::AsChangeset<Target = crate::schema::nodes::table> + Clone + Send + Sync,
390 0 : V::Changeset: diesel::query_builder::QueryFragment<diesel::pg::Pg> + Send, // valid Postgres SQL
391 0 : {
392 : use crate::schema::nodes::dsl::*;
393 0 : let updated = self
394 0 : .with_measured_conn(DatabaseOperation::UpdateNode, move |conn| {
395 0 : let values = values.clone();
396 0 : Box::pin(async move {
397 0 : let updated = diesel::update(nodes)
398 0 : .filter(node_id.eq(input_node_id.0 as i64))
399 0 : .set(values)
400 0 : .execute(conn)
401 0 : .await?;
402 0 : Ok(updated)
403 0 : })
404 0 : })
405 0 : .await?;
406 :
407 0 : if updated != 1 {
408 0 : Err(DatabaseError::Logical(format!(
409 0 : "Node {node_id:?} not found for update",
410 0 : )))
411 : } else {
412 0 : Ok(())
413 : }
414 0 : }
415 :
416 0 : pub(crate) async fn update_node_scheduling_policy(
417 0 : &self,
418 0 : input_node_id: NodeId,
419 0 : input_scheduling: NodeSchedulingPolicy,
420 0 : ) -> DatabaseResult<()> {
421 : use crate::schema::nodes::dsl::*;
422 0 : self.update_node(
423 0 : input_node_id,
424 0 : scheduling_policy.eq(String::from(input_scheduling)),
425 0 : )
426 0 : .await
427 0 : }
428 :
429 0 : pub(crate) async fn update_node_on_registration(
430 0 : &self,
431 0 : input_node_id: NodeId,
432 0 : input_https_port: Option<u16>,
433 0 : ) -> DatabaseResult<()> {
434 : use crate::schema::nodes::dsl::*;
435 0 : self.update_node(
436 0 : input_node_id,
437 0 : listen_https_port.eq(input_https_port.map(|x| x as i32)),
438 0 : )
439 0 : .await
440 0 : }
441 :
442 : /// At startup, load the high level state for shards, such as their config + policy. This will
443 : /// be enriched at runtime with state discovered on pageservers.
444 : ///
445 : /// We exclude shards configured to be detached. During startup, if we see any attached locations
446 : /// for such shards, they will automatically be detached as 'orphans'.
447 0 : pub(crate) async fn load_active_tenant_shards(
448 0 : &self,
449 0 : ) -> DatabaseResult<Vec<TenantShardPersistence>> {
450 : use crate::schema::tenant_shards::dsl::*;
451 0 : self.with_measured_conn(DatabaseOperation::ListTenantShards, move |conn| {
452 0 : Box::pin(async move {
453 0 : let query = tenant_shards.filter(
454 0 : placement_policy.ne(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
455 0 : );
456 0 : let result = query.load::<TenantShardPersistence>(conn).await?;
457 :
458 0 : Ok(result)
459 0 : })
460 0 : })
461 0 : .await
462 0 : }
463 :
464 : /// When restoring a previously detached tenant into memory, load it from the database
465 0 : pub(crate) async fn load_tenant(
466 0 : &self,
467 0 : filter_tenant_id: TenantId,
468 0 : ) -> DatabaseResult<Vec<TenantShardPersistence>> {
469 : use crate::schema::tenant_shards::dsl::*;
470 0 : self.with_measured_conn(DatabaseOperation::LoadTenant, move |conn| {
471 0 : Box::pin(async move {
472 0 : let query = tenant_shards.filter(tenant_id.eq(filter_tenant_id.to_string()));
473 0 : let result = query.load::<TenantShardPersistence>(conn).await?;
474 :
475 0 : Ok(result)
476 0 : })
477 0 : })
478 0 : .await
479 0 : }
480 :
481 : /// Tenants must be persisted before we schedule them for the first time. This enables us
482 : /// to correctly retain generation monotonicity, and the externally provided placement policy & config.
483 0 : pub(crate) async fn insert_tenant_shards(
484 0 : &self,
485 0 : shards: Vec<TenantShardPersistence>,
486 0 : ) -> DatabaseResult<()> {
487 : use crate::schema::{metadata_health, tenant_shards};
488 :
489 0 : let now = chrono::Utc::now();
490 0 :
491 0 : let metadata_health_records = shards
492 0 : .iter()
493 0 : .map(|t| MetadataHealthPersistence {
494 0 : tenant_id: t.tenant_id.clone(),
495 0 : shard_number: t.shard_number,
496 0 : shard_count: t.shard_count,
497 0 : healthy: true,
498 0 : last_scrubbed_at: now,
499 0 : })
500 0 : .collect::<Vec<_>>();
501 0 :
502 0 : let shards = &shards;
503 0 : let metadata_health_records = &metadata_health_records;
504 0 : self.with_measured_conn(DatabaseOperation::InsertTenantShards, move |conn| {
505 0 : Box::pin(async move {
506 0 : diesel::insert_into(tenant_shards::table)
507 0 : .values(shards)
508 0 : .execute(conn)
509 0 : .await?;
510 :
511 0 : diesel::insert_into(metadata_health::table)
512 0 : .values(metadata_health_records)
513 0 : .execute(conn)
514 0 : .await?;
515 0 : Ok(())
516 0 : })
517 0 : })
518 0 : .await
519 0 : }
520 :
521 : /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
522 : /// the tenant from memory on this server.
523 0 : pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
524 : use crate::schema::tenant_shards::dsl::*;
525 0 : self.with_measured_conn(DatabaseOperation::DeleteTenant, move |conn| {
526 0 : Box::pin(async move {
527 0 : // `metadata_health` status (if exists) is also deleted based on the cascade behavior.
528 0 : diesel::delete(tenant_shards)
529 0 : .filter(tenant_id.eq(del_tenant_id.to_string()))
530 0 : .execute(conn)
531 0 : .await?;
532 0 : Ok(())
533 0 : })
534 0 : })
535 0 : .await
536 0 : }
537 :
538 0 : pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
539 : use crate::schema::nodes::dsl::*;
540 0 : self.with_measured_conn(DatabaseOperation::DeleteNode, move |conn| {
541 0 : Box::pin(async move {
542 0 : diesel::delete(nodes)
543 0 : .filter(node_id.eq(del_node_id.0 as i64))
544 0 : .execute(conn)
545 0 : .await?;
546 :
547 0 : Ok(())
548 0 : })
549 0 : })
550 0 : .await
551 0 : }
552 :
553 : /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient
554 : /// batched increment of the generations of all tenants whose generation_pageserver is equal to
555 : /// the node that called /re-attach.
556 : #[tracing::instrument(skip_all, fields(node_id))]
557 : pub(crate) async fn re_attach(
558 : &self,
559 : input_node_id: NodeId,
560 : ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
561 : use crate::schema::nodes::dsl::{scheduling_policy, *};
562 : use crate::schema::tenant_shards::dsl::*;
563 : let updated = self
564 0 : .with_measured_conn(DatabaseOperation::ReAttach, move |conn| {
565 0 : Box::pin(async move {
566 0 : let rows_updated = diesel::update(tenant_shards)
567 0 : .filter(generation_pageserver.eq(input_node_id.0 as i64))
568 0 : .set(generation.eq(generation + 1))
569 0 : .execute(conn)
570 0 : .await?;
571 :
572 0 : tracing::info!("Incremented {} tenants' generations", rows_updated);
573 :
574 : // TODO: UPDATE+SELECT in one query
575 :
576 0 : let updated = tenant_shards
577 0 : .filter(generation_pageserver.eq(input_node_id.0 as i64))
578 0 : .select(TenantShardPersistence::as_select())
579 0 : .load(conn)
580 0 : .await?;
581 :
582 : // If the node went through a drain and restart phase before re-attaching,
583 : // then reset it's node scheduling policy to active.
584 0 : diesel::update(nodes)
585 0 : .filter(node_id.eq(input_node_id.0 as i64))
586 0 : .filter(
587 0 : scheduling_policy
588 0 : .eq(String::from(NodeSchedulingPolicy::PauseForRestart))
589 0 : .or(scheduling_policy
590 0 : .eq(String::from(NodeSchedulingPolicy::Draining)))
591 0 : .or(scheduling_policy
592 0 : .eq(String::from(NodeSchedulingPolicy::Filling))),
593 0 : )
594 0 : .set(scheduling_policy.eq(String::from(NodeSchedulingPolicy::Active)))
595 0 : .execute(conn)
596 0 : .await?;
597 :
598 0 : Ok(updated)
599 0 : })
600 0 : })
601 : .await?;
602 :
603 : let mut result = HashMap::new();
604 : for tsp in updated {
605 : let tenant_shard_id = TenantShardId {
606 : tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
607 0 : .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
608 : shard_number: ShardNumber(tsp.shard_number as u8),
609 : shard_count: ShardCount::new(tsp.shard_count as u8),
610 : };
611 :
612 : let Some(g) = tsp.generation else {
613 : // If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
614 : // we only set generation_pageserver when setting generation.
615 : return Err(DatabaseError::Logical(
616 : "Generation should always be set after incrementing".to_string(),
617 : ));
618 : };
619 : result.insert(tenant_shard_id, Generation::new(g as u32));
620 : }
621 :
622 : Ok(result)
623 : }
624 :
625 : /// Reconciler calls this immediately before attaching to a new pageserver, to acquire a unique, monotonically
626 : /// advancing generation number. We also store the NodeId for which the generation was issued, so that in
627 : /// [`Self::re_attach`] we can do a bulk UPDATE on the generations for that node.
628 0 : pub(crate) async fn increment_generation(
629 0 : &self,
630 0 : tenant_shard_id: TenantShardId,
631 0 : node_id: NodeId,
632 0 : ) -> anyhow::Result<Generation> {
633 : use crate::schema::tenant_shards::dsl::*;
634 0 : let updated = self
635 0 : .with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| {
636 0 : Box::pin(async move {
637 0 : let updated = diesel::update(tenant_shards)
638 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
639 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
640 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
641 0 : .set((
642 0 : generation.eq(generation + 1),
643 0 : generation_pageserver.eq(node_id.0 as i64),
644 0 : ))
645 0 : // TODO: only returning() the generation column
646 0 : .returning(TenantShardPersistence::as_returning())
647 0 : .get_result(conn)
648 0 : .await?;
649 :
650 0 : Ok(updated)
651 0 : })
652 0 : })
653 0 : .await?;
654 :
655 : // Generation is always non-null in the rseult: if the generation column had been NULL, then we
656 : // should have experienced an SQL Confilict error while executing a query that tries to increment it.
657 0 : debug_assert!(updated.generation.is_some());
658 0 : let Some(g) = updated.generation else {
659 0 : return Err(DatabaseError::Logical(
660 0 : "Generation should always be set after incrementing".to_string(),
661 0 : )
662 0 : .into());
663 : };
664 :
665 0 : Ok(Generation::new(g as u32))
666 0 : }
667 :
668 : /// When we want to call out to the running shards for a tenant, e.g. during timeline CRUD operations,
669 : /// we need to know where the shard is attached, _and_ the generation, so that we can re-check the generation
670 : /// afterwards to confirm that our timeline CRUD operation is truly persistent (it must have happened in the
671 : /// latest generation)
672 : ///
673 : /// If the tenant doesn't exist, an empty vector is returned.
674 : ///
675 : /// Output is sorted by shard number
676 0 : pub(crate) async fn tenant_generations(
677 0 : &self,
678 0 : filter_tenant_id: TenantId,
679 0 : ) -> Result<Vec<ShardGenerationState>, DatabaseError> {
680 : use crate::schema::tenant_shards::dsl::*;
681 0 : let rows = self
682 0 : .with_measured_conn(DatabaseOperation::TenantGenerations, move |conn| {
683 0 : Box::pin(async move {
684 0 : let result = tenant_shards
685 0 : .filter(tenant_id.eq(filter_tenant_id.to_string()))
686 0 : .select(TenantShardPersistence::as_select())
687 0 : .order(shard_number)
688 0 : .load(conn)
689 0 : .await?;
690 0 : Ok(result)
691 0 : })
692 0 : })
693 0 : .await?;
694 :
695 0 : Ok(rows
696 0 : .into_iter()
697 0 : .map(|p| ShardGenerationState {
698 0 : tenant_shard_id: p
699 0 : .get_tenant_shard_id()
700 0 : .expect("Corrupt tenant shard id in database"),
701 0 : generation: p.generation.map(|g| Generation::new(g as u32)),
702 0 : generation_pageserver: p.generation_pageserver.map(|n| NodeId(n as u64)),
703 0 : })
704 0 : .collect())
705 0 : }
706 :
707 : /// Read the generation number of specific tenant shards
708 : ///
709 : /// Output is unsorted. Output may not include values for all inputs, if they are missing in the database.
710 0 : pub(crate) async fn shard_generations(
711 0 : &self,
712 0 : mut tenant_shard_ids: impl Iterator<Item = &TenantShardId>,
713 0 : ) -> Result<Vec<(TenantShardId, Option<Generation>)>, DatabaseError> {
714 0 : let mut rows = Vec::with_capacity(tenant_shard_ids.size_hint().0);
715 :
716 : // We will chunk our input to avoid composing arbitrarily long `IN` clauses. Typically we are
717 : // called with a single digit number of IDs, but in principle we could be called with tens
718 : // of thousands (all the shards on one pageserver) from the generation validation API.
719 0 : loop {
720 0 : // A modest hardcoded chunk size to handle typical cases in a single query but never generate particularly
721 0 : // large query strings.
722 0 : let chunk_ids = tenant_shard_ids.by_ref().take(32);
723 0 :
724 0 : // Compose a comma separated list of tuples for matching on (tenant_id, shard_number, shard_count)
725 0 : let in_clause = chunk_ids
726 0 : .map(|tsid| {
727 0 : format!(
728 0 : "('{}', {}, {})",
729 0 : tsid.tenant_id, tsid.shard_number.0, tsid.shard_count.0
730 0 : )
731 0 : })
732 0 : .join(",");
733 0 :
734 0 : // We are done when our iterator gives us nothing to filter on
735 0 : if in_clause.is_empty() {
736 0 : break;
737 0 : }
738 0 :
739 0 : let in_clause = &in_clause;
740 0 : let chunk_rows = self
741 0 : .with_measured_conn(DatabaseOperation::ShardGenerations, move |conn| {
742 0 : Box::pin(async move {
743 : // diesel doesn't support multi-column IN queries, so we compose raw SQL. No escaping is required because
744 : // the inputs are strongly typed and cannot carry any user-supplied raw string content.
745 0 : let result : Vec<TenantShardPersistence> = diesel::sql_query(
746 0 : format!("SELECT * from tenant_shards where (tenant_id, shard_number, shard_count) in ({in_clause});").as_str()
747 0 : ).load(conn).await?;
748 :
749 0 : Ok(result)
750 0 : })
751 0 : })
752 0 : .await?;
753 0 : rows.extend(chunk_rows.into_iter())
754 : }
755 :
756 0 : Ok(rows
757 0 : .into_iter()
758 0 : .map(|tsp| {
759 0 : (
760 0 : tsp.get_tenant_shard_id()
761 0 : .expect("Bad tenant ID in database"),
762 0 : tsp.generation.map(|g| Generation::new(g as u32)),
763 0 : )
764 0 : })
765 0 : .collect())
766 0 : }
767 :
768 : #[allow(non_local_definitions)]
769 : /// For use when updating a persistent property of a tenant, such as its config or placement_policy.
770 : ///
771 : /// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
772 : /// API: use [`Self::increment_generation`] instead. Setting the generation via this route is a one-time thing
773 : /// that we only do the first time a tenant is set to an attached policy via /location_config.
774 0 : pub(crate) async fn update_tenant_shard(
775 0 : &self,
776 0 : tenant: TenantFilter,
777 0 : input_placement_policy: Option<PlacementPolicy>,
778 0 : input_config: Option<TenantConfig>,
779 0 : input_generation: Option<Generation>,
780 0 : input_scheduling_policy: Option<ShardSchedulingPolicy>,
781 0 : ) -> DatabaseResult<()> {
782 : use crate::schema::tenant_shards::dsl::*;
783 :
784 0 : let tenant = &tenant;
785 0 : let input_placement_policy = &input_placement_policy;
786 0 : let input_config = &input_config;
787 0 : let input_generation = &input_generation;
788 0 : let input_scheduling_policy = &input_scheduling_policy;
789 0 : self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
790 0 : Box::pin(async move {
791 0 : let query = match tenant {
792 0 : TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards)
793 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
794 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
795 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
796 0 : .into_boxed(),
797 0 : TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
798 0 : .filter(tenant_id.eq(input_tenant_id.to_string()))
799 0 : .into_boxed(),
800 : };
801 :
802 : // Clear generation_pageserver if we are moving into a state where we won't have
803 : // any attached pageservers.
804 0 : let input_generation_pageserver = match input_placement_policy {
805 0 : None | Some(PlacementPolicy::Attached(_)) => None,
806 0 : Some(PlacementPolicy::Detached | PlacementPolicy::Secondary) => Some(None),
807 : };
808 :
809 0 : #[derive(AsChangeset)]
810 : #[diesel(table_name = crate::schema::tenant_shards)]
811 : struct ShardUpdate {
812 : generation: Option<i32>,
813 : placement_policy: Option<String>,
814 : config: Option<String>,
815 : scheduling_policy: Option<String>,
816 : generation_pageserver: Option<Option<i64>>,
817 : }
818 :
819 0 : let update = ShardUpdate {
820 0 : generation: input_generation.map(|g| g.into().unwrap() as i32),
821 0 : placement_policy: input_placement_policy
822 0 : .as_ref()
823 0 : .map(|p| serde_json::to_string(&p).unwrap()),
824 0 : config: input_config
825 0 : .as_ref()
826 0 : .map(|c| serde_json::to_string(&c).unwrap()),
827 0 : scheduling_policy: input_scheduling_policy
828 0 : .map(|p| serde_json::to_string(&p).unwrap()),
829 0 : generation_pageserver: input_generation_pageserver,
830 0 : };
831 0 :
832 0 : query.set(update).execute(conn).await?;
833 :
834 0 : Ok(())
835 0 : })
836 0 : })
837 0 : .await?;
838 :
839 0 : Ok(())
840 0 : }
841 :
842 : /// Note that passing None for a shard clears the preferred AZ (rather than leaving it unmodified)
843 0 : pub(crate) async fn set_tenant_shard_preferred_azs(
844 0 : &self,
845 0 : preferred_azs: Vec<(TenantShardId, Option<AvailabilityZone>)>,
846 0 : ) -> DatabaseResult<Vec<(TenantShardId, Option<AvailabilityZone>)>> {
847 : use crate::schema::tenant_shards::dsl::*;
848 :
849 0 : let preferred_azs = preferred_azs.as_slice();
850 0 : self.with_measured_conn(DatabaseOperation::SetPreferredAzs, move |conn| {
851 0 : Box::pin(async move {
852 0 : let mut shards_updated = Vec::default();
853 :
854 0 : for (tenant_shard_id, preferred_az) in preferred_azs.iter() {
855 0 : let updated = diesel::update(tenant_shards)
856 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
857 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
858 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
859 0 : .set(preferred_az_id.eq(preferred_az.as_ref().map(|az| az.0.clone())))
860 0 : .execute(conn)
861 0 : .await?;
862 :
863 0 : if updated == 1 {
864 0 : shards_updated.push((*tenant_shard_id, preferred_az.clone()));
865 0 : }
866 : }
867 :
868 0 : Ok(shards_updated)
869 0 : })
870 0 : })
871 0 : .await
872 0 : }
873 :
874 0 : pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
875 : use crate::schema::tenant_shards::dsl::*;
876 0 : self.with_measured_conn(DatabaseOperation::Detach, move |conn| {
877 0 : Box::pin(async move {
878 0 : let updated = diesel::update(tenant_shards)
879 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
880 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
881 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
882 0 : .set((
883 0 : generation_pageserver.eq(Option::<i64>::None),
884 0 : placement_policy
885 0 : .eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
886 0 : ))
887 0 : .execute(conn)
888 0 : .await?;
889 :
890 0 : Ok(updated)
891 0 : })
892 0 : })
893 0 : .await?;
894 :
895 0 : Ok(())
896 0 : }
897 :
898 : // When we start shard splitting, we must durably mark the tenant so that
899 : // on restart, we know that we must go through recovery.
900 : //
901 : // We create the child shards here, so that they will be available for increment_generation calls
902 : // if some pageserver holding a child shard needs to restart before the overall tenant split is complete.
903 0 : pub(crate) async fn begin_shard_split(
904 0 : &self,
905 0 : old_shard_count: ShardCount,
906 0 : split_tenant_id: TenantId,
907 0 : parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
908 0 : ) -> DatabaseResult<()> {
909 : use crate::schema::tenant_shards::dsl::*;
910 0 : let parent_to_children = parent_to_children.as_slice();
911 0 : self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| {
912 0 : Box::pin(async move {
913 : // Mark parent shards as splitting
914 :
915 0 : let updated = diesel::update(tenant_shards)
916 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
917 0 : .filter(shard_count.eq(old_shard_count.literal() as i32))
918 0 : .set((splitting.eq(1),))
919 0 : .execute(conn).await?;
920 0 : if u8::try_from(updated)
921 0 : .map_err(|_| DatabaseError::Logical(
922 0 : format!("Overflow existing shard count {} while splitting", updated))
923 0 : )? != old_shard_count.count() {
924 : // Perhaps a deletion or another split raced with this attempt to split, mutating
925 : // the parent shards that we intend to split. In this case the split request should fail.
926 0 : return Err(DatabaseError::Logical(
927 0 : format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
928 0 : ));
929 0 : }
930 0 :
931 0 : // FIXME: spurious clone to sidestep closure move rules
932 0 : let parent_to_children = parent_to_children.to_vec();
933 :
934 : // Insert child shards
935 0 : for (parent_shard_id, children) in parent_to_children {
936 0 : let mut parent = crate::schema::tenant_shards::table
937 0 : .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
938 0 : .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
939 0 : .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
940 0 : .load::<TenantShardPersistence>(conn).await?;
941 0 : let parent = if parent.len() != 1 {
942 0 : return Err(DatabaseError::Logical(format!(
943 0 : "Parent shard {parent_shard_id} not found"
944 0 : )));
945 : } else {
946 0 : parent.pop().unwrap()
947 : };
948 0 : for mut shard in children {
949 : // Carry the parent's generation into the child
950 0 : shard.generation = parent.generation;
951 0 :
952 0 : debug_assert!(shard.splitting == SplitState::Splitting);
953 0 : diesel::insert_into(tenant_shards)
954 0 : .values(shard)
955 0 : .execute(conn).await?;
956 : }
957 : }
958 :
959 0 : Ok(())
960 0 : })
961 0 : })
962 0 : .await
963 0 : }
964 :
965 : // When we finish shard splitting, we must atomically clean up the old shards
966 : // and insert the new shards, and clear the splitting marker.
967 0 : pub(crate) async fn complete_shard_split(
968 0 : &self,
969 0 : split_tenant_id: TenantId,
970 0 : old_shard_count: ShardCount,
971 0 : new_shard_count: ShardCount,
972 0 : ) -> DatabaseResult<()> {
973 : use crate::schema::tenant_shards::dsl::*;
974 0 : self.with_measured_conn(DatabaseOperation::CompleteShardSplit, move |conn| {
975 0 : Box::pin(async move {
976 0 : // Sanity: child shards must still exist, as we're deleting parent shards
977 0 : let child_shards_query = tenant_shards
978 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
979 0 : .filter(shard_count.eq(new_shard_count.literal() as i32));
980 0 : let child_shards = child_shards_query
981 0 : .load::<TenantShardPersistence>(conn)
982 0 : .await?;
983 0 : if child_shards.len() != new_shard_count.count() as usize {
984 0 : return Err(DatabaseError::Logical(format!(
985 0 : "Unexpected child shard count {} while completing split to \
986 0 : count {new_shard_count:?} on tenant {split_tenant_id}",
987 0 : child_shards.len()
988 0 : )));
989 0 : }
990 0 :
991 0 : // Drop parent shards
992 0 : diesel::delete(tenant_shards)
993 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
994 0 : .filter(shard_count.eq(old_shard_count.literal() as i32))
995 0 : .execute(conn)
996 0 : .await?;
997 :
998 : // Clear sharding flag
999 0 : let updated = diesel::update(tenant_shards)
1000 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1001 0 : .filter(shard_count.eq(new_shard_count.literal() as i32))
1002 0 : .set((splitting.eq(0),))
1003 0 : .execute(conn)
1004 0 : .await?;
1005 0 : assert!(updated == new_shard_count.count() as usize);
1006 :
1007 0 : Ok(())
1008 0 : })
1009 0 : })
1010 0 : .await
1011 0 : }
1012 :
1013 : /// Used when the remote part of a shard split failed: we will revert the database state to have only
1014 : /// the parent shards, with SplitState::Idle.
1015 0 : pub(crate) async fn abort_shard_split(
1016 0 : &self,
1017 0 : split_tenant_id: TenantId,
1018 0 : new_shard_count: ShardCount,
1019 0 : ) -> DatabaseResult<AbortShardSplitStatus> {
1020 : use crate::schema::tenant_shards::dsl::*;
1021 0 : self.with_measured_conn(DatabaseOperation::AbortShardSplit, move |conn| {
1022 0 : Box::pin(async move {
1023 : // Clear the splitting state on parent shards
1024 0 : let updated = diesel::update(tenant_shards)
1025 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1026 0 : .filter(shard_count.ne(new_shard_count.literal() as i32))
1027 0 : .set((splitting.eq(0),))
1028 0 : .execute(conn)
1029 0 : .await?;
1030 :
1031 : // Parent shards are already gone: we cannot abort.
1032 0 : if updated == 0 {
1033 0 : return Ok(AbortShardSplitStatus::Complete);
1034 0 : }
1035 0 :
1036 0 : // Sanity check: if parent shards were present, their cardinality should
1037 0 : // be less than the number of child shards.
1038 0 : if updated >= new_shard_count.count() as usize {
1039 0 : return Err(DatabaseError::Logical(format!(
1040 0 : "Unexpected parent shard count {updated} while aborting split to \
1041 0 : count {new_shard_count:?} on tenant {split_tenant_id}"
1042 0 : )));
1043 0 : }
1044 0 :
1045 0 : // Erase child shards
1046 0 : diesel::delete(tenant_shards)
1047 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
1048 0 : .filter(shard_count.eq(new_shard_count.literal() as i32))
1049 0 : .execute(conn)
1050 0 : .await?;
1051 :
1052 0 : Ok(AbortShardSplitStatus::Aborted)
1053 0 : })
1054 0 : })
1055 0 : .await
1056 0 : }
1057 :
1058 : /// Stores all the latest metadata health updates durably. Updates existing entry on conflict.
1059 : ///
1060 : /// **Correctness:** `metadata_health_updates` should all belong the tenant shards managed by the storage controller.
1061 : #[allow(dead_code)]
1062 0 : pub(crate) async fn update_metadata_health_records(
1063 0 : &self,
1064 0 : healthy_records: Vec<MetadataHealthPersistence>,
1065 0 : unhealthy_records: Vec<MetadataHealthPersistence>,
1066 0 : now: chrono::DateTime<chrono::Utc>,
1067 0 : ) -> DatabaseResult<()> {
1068 : use crate::schema::metadata_health::dsl::*;
1069 :
1070 0 : let healthy_records = healthy_records.as_slice();
1071 0 : let unhealthy_records = unhealthy_records.as_slice();
1072 0 : self.with_measured_conn(DatabaseOperation::UpdateMetadataHealth, move |conn| {
1073 0 : Box::pin(async move {
1074 0 : diesel::insert_into(metadata_health)
1075 0 : .values(healthy_records)
1076 0 : .on_conflict((tenant_id, shard_number, shard_count))
1077 0 : .do_update()
1078 0 : .set((healthy.eq(true), last_scrubbed_at.eq(now)))
1079 0 : .execute(conn)
1080 0 : .await?;
1081 :
1082 0 : diesel::insert_into(metadata_health)
1083 0 : .values(unhealthy_records)
1084 0 : .on_conflict((tenant_id, shard_number, shard_count))
1085 0 : .do_update()
1086 0 : .set((healthy.eq(false), last_scrubbed_at.eq(now)))
1087 0 : .execute(conn)
1088 0 : .await?;
1089 0 : Ok(())
1090 0 : })
1091 0 : })
1092 0 : .await
1093 0 : }
1094 :
1095 : /// Lists all the metadata health records.
1096 : #[allow(dead_code)]
1097 0 : pub(crate) async fn list_metadata_health_records(
1098 0 : &self,
1099 0 : ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
1100 0 : self.with_measured_conn(DatabaseOperation::ListMetadataHealth, move |conn| {
1101 0 : Box::pin(async {
1102 0 : Ok(crate::schema::metadata_health::table
1103 0 : .load::<MetadataHealthPersistence>(conn)
1104 0 : .await?)
1105 0 : })
1106 0 : })
1107 0 : .await
1108 0 : }
1109 :
1110 : /// Lists all the metadata health records that is unhealthy.
1111 : #[allow(dead_code)]
1112 0 : pub(crate) async fn list_unhealthy_metadata_health_records(
1113 0 : &self,
1114 0 : ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
1115 : use crate::schema::metadata_health::dsl::*;
1116 0 : self.with_measured_conn(
1117 0 : DatabaseOperation::ListMetadataHealthUnhealthy,
1118 0 : move |conn| {
1119 0 : Box::pin(async {
1120 0 : DatabaseResult::Ok(
1121 0 : crate::schema::metadata_health::table
1122 0 : .filter(healthy.eq(false))
1123 0 : .load::<MetadataHealthPersistence>(conn)
1124 0 : .await?,
1125 : )
1126 0 : })
1127 0 : },
1128 0 : )
1129 0 : .await
1130 0 : }
1131 :
1132 : /// Lists all the metadata health records that have not been updated since an `earlier` time.
1133 : #[allow(dead_code)]
1134 0 : pub(crate) async fn list_outdated_metadata_health_records(
1135 0 : &self,
1136 0 : earlier: chrono::DateTime<chrono::Utc>,
1137 0 : ) -> DatabaseResult<Vec<MetadataHealthPersistence>> {
1138 : use crate::schema::metadata_health::dsl::*;
1139 :
1140 0 : self.with_measured_conn(DatabaseOperation::ListMetadataHealthOutdated, move |conn| {
1141 0 : Box::pin(async move {
1142 0 : let query = metadata_health.filter(last_scrubbed_at.lt(earlier));
1143 0 : let res = query.load::<MetadataHealthPersistence>(conn).await?;
1144 :
1145 0 : Ok(res)
1146 0 : })
1147 0 : })
1148 0 : .await
1149 0 : }
1150 :
1151 : /// Get the current entry from the `leader` table if one exists.
1152 : /// It is an error for the table to contain more than one entry.
1153 0 : pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
1154 0 : let mut leader: Vec<ControllerPersistence> = self
1155 0 : .with_measured_conn(DatabaseOperation::GetLeader, move |conn| {
1156 0 : Box::pin(async move {
1157 0 : Ok(crate::schema::controllers::table
1158 0 : .load::<ControllerPersistence>(conn)
1159 0 : .await?)
1160 0 : })
1161 0 : })
1162 0 : .await?;
1163 :
1164 0 : if leader.len() > 1 {
1165 0 : return Err(DatabaseError::Logical(format!(
1166 0 : "More than one entry present in the leader table: {leader:?}"
1167 0 : )));
1168 0 : }
1169 0 :
1170 0 : Ok(leader.pop())
1171 0 : }
1172 :
1173 : /// Update the new leader with compare-exchange semantics. If `prev` does not
1174 : /// match the current leader entry, then the update is treated as a failure.
1175 : /// When `prev` is not specified, the update is forced.
1176 0 : pub(crate) async fn update_leader(
1177 0 : &self,
1178 0 : prev: Option<ControllerPersistence>,
1179 0 : new: ControllerPersistence,
1180 0 : ) -> DatabaseResult<()> {
1181 : use crate::schema::controllers::dsl::*;
1182 :
1183 0 : let updated = self
1184 0 : .with_measured_conn(DatabaseOperation::UpdateLeader, move |conn| {
1185 0 : let prev = prev.clone();
1186 0 : let new = new.clone();
1187 0 : Box::pin(async move {
1188 0 : let updated = match &prev {
1189 0 : Some(prev) => {
1190 0 : diesel::update(controllers)
1191 0 : .filter(address.eq(prev.address.clone()))
1192 0 : .filter(started_at.eq(prev.started_at))
1193 0 : .set((
1194 0 : address.eq(new.address.clone()),
1195 0 : started_at.eq(new.started_at),
1196 0 : ))
1197 0 : .execute(conn)
1198 0 : .await?
1199 : }
1200 : None => {
1201 0 : diesel::insert_into(controllers)
1202 0 : .values(new.clone())
1203 0 : .execute(conn)
1204 0 : .await?
1205 : }
1206 : };
1207 :
1208 0 : Ok(updated)
1209 0 : })
1210 0 : })
1211 0 : .await?;
1212 :
1213 0 : if updated == 0 {
1214 0 : return Err(DatabaseError::Logical(
1215 0 : "Leader table update failed".to_string(),
1216 0 : ));
1217 0 : }
1218 0 :
1219 0 : Ok(())
1220 0 : }
1221 :
1222 : /// At startup, populate the list of nodes which our shards may be placed on
1223 0 : pub(crate) async fn list_safekeepers(&self) -> DatabaseResult<Vec<SafekeeperPersistence>> {
1224 0 : let safekeepers: Vec<SafekeeperPersistence> = self
1225 0 : .with_measured_conn(DatabaseOperation::ListNodes, move |conn| {
1226 0 : Box::pin(async move {
1227 0 : Ok(crate::schema::safekeepers::table
1228 0 : .load::<SafekeeperPersistence>(conn)
1229 0 : .await?)
1230 0 : })
1231 0 : })
1232 0 : .await?;
1233 :
1234 0 : tracing::info!("list_safekeepers: loaded {} nodes", safekeepers.len());
1235 :
1236 0 : Ok(safekeepers)
1237 0 : }
1238 :
1239 0 : pub(crate) async fn safekeeper_upsert(
1240 0 : &self,
1241 0 : record: SafekeeperUpsert,
1242 0 : ) -> Result<(), DatabaseError> {
1243 : use crate::schema::safekeepers::dsl::*;
1244 :
1245 0 : self.with_conn(move |conn| {
1246 0 : let record = record.clone();
1247 0 : Box::pin(async move {
1248 0 : let bind = record
1249 0 : .as_insert_or_update()
1250 0 : .map_err(|e| DatabaseError::Logical(format!("{e}")))?;
1251 :
1252 0 : let inserted_updated = diesel::insert_into(safekeepers)
1253 0 : .values(&bind)
1254 0 : .on_conflict(id)
1255 0 : .do_update()
1256 0 : .set(&bind)
1257 0 : .execute(conn)
1258 0 : .await?;
1259 :
1260 0 : if inserted_updated != 1 {
1261 0 : return Err(DatabaseError::Logical(format!(
1262 0 : "unexpected number of rows ({})",
1263 0 : inserted_updated
1264 0 : )));
1265 0 : }
1266 0 :
1267 0 : Ok(())
1268 0 : })
1269 0 : })
1270 0 : .await
1271 0 : }
1272 :
1273 0 : pub(crate) async fn set_safekeeper_scheduling_policy(
1274 0 : &self,
1275 0 : id_: i64,
1276 0 : scheduling_policy_: SkSchedulingPolicy,
1277 0 : ) -> Result<(), DatabaseError> {
1278 : use crate::schema::safekeepers::dsl::*;
1279 :
1280 0 : self.with_conn(move |conn| {
1281 0 : Box::pin(async move {
1282 0 : #[derive(Insertable, AsChangeset)]
1283 : #[diesel(table_name = crate::schema::safekeepers)]
1284 : struct UpdateSkSchedulingPolicy<'a> {
1285 : id: i64,
1286 : scheduling_policy: &'a str,
1287 : }
1288 0 : let scheduling_policy_ = String::from(scheduling_policy_);
1289 :
1290 0 : let rows_affected = diesel::update(safekeepers.filter(id.eq(id_)))
1291 0 : .set(scheduling_policy.eq(scheduling_policy_))
1292 0 : .execute(conn)
1293 0 : .await?;
1294 :
1295 0 : if rows_affected != 1 {
1296 0 : return Err(DatabaseError::Logical(format!(
1297 0 : "unexpected number of rows ({rows_affected})",
1298 0 : )));
1299 0 : }
1300 0 :
1301 0 : Ok(())
1302 0 : })
1303 0 : })
1304 0 : .await
1305 0 : }
1306 :
1307 : /// Persist timeline. Returns if the timeline was newly inserted. If it wasn't, we haven't done any writes.
1308 0 : pub(crate) async fn insert_timeline(&self, entry: TimelinePersistence) -> DatabaseResult<bool> {
1309 : use crate::schema::timelines;
1310 :
1311 0 : let entry = &entry;
1312 0 : self.with_measured_conn(DatabaseOperation::InsertTimeline, move |conn| {
1313 0 : Box::pin(async move {
1314 0 : let inserted_updated = diesel::insert_into(timelines::table)
1315 0 : .values(entry)
1316 0 : .on_conflict((timelines::tenant_id, timelines::timeline_id))
1317 0 : .do_nothing()
1318 0 : .execute(conn)
1319 0 : .await?;
1320 :
1321 0 : match inserted_updated {
1322 0 : 0 => Ok(false),
1323 0 : 1 => Ok(true),
1324 0 : _ => Err(DatabaseError::Logical(format!(
1325 0 : "unexpected number of rows ({})",
1326 0 : inserted_updated
1327 0 : ))),
1328 : }
1329 0 : })
1330 0 : })
1331 0 : .await
1332 0 : }
1333 :
1334 : /// Load timeline from db. Returns `None` if not present.
1335 0 : pub(crate) async fn get_timeline(
1336 0 : &self,
1337 0 : tenant_id: TenantId,
1338 0 : timeline_id: TimelineId,
1339 0 : ) -> DatabaseResult<Option<TimelinePersistence>> {
1340 : use crate::schema::timelines::dsl;
1341 :
1342 0 : let tenant_id = &tenant_id;
1343 0 : let timeline_id = &timeline_id;
1344 0 : let timeline_from_db = self
1345 0 : .with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
1346 0 : Box::pin(async move {
1347 0 : let mut from_db: Vec<TimelineFromDb> = dsl::timelines
1348 0 : .filter(
1349 0 : dsl::tenant_id
1350 0 : .eq(&tenant_id.to_string())
1351 0 : .and(dsl::timeline_id.eq(&timeline_id.to_string())),
1352 0 : )
1353 0 : .load(conn)
1354 0 : .await?;
1355 0 : if from_db.is_empty() {
1356 0 : return Ok(None);
1357 0 : }
1358 0 : if from_db.len() != 1 {
1359 0 : return Err(DatabaseError::Logical(format!(
1360 0 : "unexpected number of rows ({})",
1361 0 : from_db.len()
1362 0 : )));
1363 0 : }
1364 0 :
1365 0 : Ok(Some(from_db.pop().unwrap().into_persistence()))
1366 0 : })
1367 0 : })
1368 0 : .await?;
1369 :
1370 0 : Ok(timeline_from_db)
1371 0 : }
1372 :
1373 : /// Set `delete_at` for the given timeline
1374 0 : pub(crate) async fn timeline_set_deleted_at(
1375 0 : &self,
1376 0 : tenant_id: TenantId,
1377 0 : timeline_id: TimelineId,
1378 0 : ) -> DatabaseResult<()> {
1379 : use crate::schema::timelines;
1380 :
1381 0 : let deletion_time = chrono::Local::now().to_utc();
1382 0 : self.with_measured_conn(DatabaseOperation::InsertTimeline, move |conn| {
1383 0 : Box::pin(async move {
1384 0 : let updated = diesel::update(timelines::table)
1385 0 : .filter(timelines::tenant_id.eq(tenant_id.to_string()))
1386 0 : .filter(timelines::timeline_id.eq(timeline_id.to_string()))
1387 0 : .set(timelines::deleted_at.eq(Some(deletion_time)))
1388 0 : .execute(conn)
1389 0 : .await?;
1390 :
1391 0 : match updated {
1392 0 : 0 => Ok(()),
1393 0 : 1 => Ok(()),
1394 0 : _ => Err(DatabaseError::Logical(format!(
1395 0 : "unexpected number of rows ({})",
1396 0 : updated
1397 0 : ))),
1398 : }
1399 0 : })
1400 0 : })
1401 0 : .await
1402 0 : }
1403 :
1404 : /// Load timeline from db. Returns `None` if not present.
1405 : ///
1406 : /// Only works if `deleted_at` is set, so you should call [`Self::timeline_set_deleted_at`] before.
1407 0 : pub(crate) async fn delete_timeline(
1408 0 : &self,
1409 0 : tenant_id: TenantId,
1410 0 : timeline_id: TimelineId,
1411 0 : ) -> DatabaseResult<()> {
1412 : use crate::schema::timelines::dsl;
1413 :
1414 0 : let tenant_id = &tenant_id;
1415 0 : let timeline_id = &timeline_id;
1416 0 : self.with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
1417 0 : Box::pin(async move {
1418 0 : diesel::delete(dsl::timelines)
1419 0 : .filter(dsl::tenant_id.eq(&tenant_id.to_string()))
1420 0 : .filter(dsl::timeline_id.eq(&timeline_id.to_string()))
1421 0 : .filter(dsl::deleted_at.is_not_null())
1422 0 : .execute(conn)
1423 0 : .await?;
1424 0 : Ok(())
1425 0 : })
1426 0 : })
1427 0 : .await?;
1428 :
1429 0 : Ok(())
1430 0 : }
1431 :
1432 : /// Loads a list of all timelines from database.
1433 0 : pub(crate) async fn list_timelines_for_tenant(
1434 0 : &self,
1435 0 : tenant_id: TenantId,
1436 0 : ) -> DatabaseResult<Vec<TimelinePersistence>> {
1437 : use crate::schema::timelines::dsl;
1438 :
1439 0 : let tenant_id = &tenant_id;
1440 0 : let timelines = self
1441 0 : .with_measured_conn(DatabaseOperation::GetTimeline, move |conn| {
1442 0 : Box::pin(async move {
1443 0 : let timelines: Vec<TimelineFromDb> = dsl::timelines
1444 0 : .filter(dsl::tenant_id.eq(&tenant_id.to_string()))
1445 0 : .load(conn)
1446 0 : .await?;
1447 0 : Ok(timelines)
1448 0 : })
1449 0 : })
1450 0 : .await?;
1451 :
1452 0 : let timelines = timelines
1453 0 : .into_iter()
1454 0 : .map(TimelineFromDb::into_persistence)
1455 0 : .collect();
1456 0 : Ok(timelines)
1457 0 : }
1458 :
1459 : /// Persist pending op. Returns if it was newly inserted. If it wasn't, we haven't done any writes.
1460 0 : pub(crate) async fn insert_pending_op(
1461 0 : &self,
1462 0 : entry: TimelinePendingOpPersistence,
1463 0 : ) -> DatabaseResult<bool> {
1464 : use crate::schema::safekeeper_timeline_pending_ops as skpo;
1465 : // This overrides the `filter` fn used in other functions, so contain the mayhem via a function-local use
1466 : use diesel::query_dsl::methods::FilterDsl;
1467 :
1468 0 : let entry = &entry;
1469 0 : self.with_measured_conn(DatabaseOperation::InsertTimelineReconcile, move |conn| {
1470 0 : Box::pin(async move {
1471 : // For simplicity it makes sense to keep only the last operation
1472 : // per (tenant, timeline, sk) tuple: if we migrated a timeline
1473 : // from node and adding it back it is not necessary to remove
1474 : // data on it. Hence, generation is not part of primary key and
1475 : // we override any rows with lower generations here.
1476 0 : let inserted_updated = diesel::insert_into(skpo::table)
1477 0 : .values(entry)
1478 0 : .on_conflict((skpo::tenant_id, skpo::timeline_id, skpo::sk_id))
1479 0 : .do_update()
1480 0 : .set(entry)
1481 0 : .filter(skpo::generation.lt(entry.generation))
1482 0 : .execute(conn)
1483 0 : .await?;
1484 :
1485 0 : match inserted_updated {
1486 0 : 0 => Ok(false),
1487 0 : 1 => Ok(true),
1488 0 : _ => Err(DatabaseError::Logical(format!(
1489 0 : "unexpected number of rows ({})",
1490 0 : inserted_updated
1491 0 : ))),
1492 : }
1493 0 : })
1494 0 : })
1495 0 : .await
1496 0 : }
1497 : /// Remove persisted pending op.
1498 0 : pub(crate) async fn remove_pending_op(
1499 0 : &self,
1500 0 : tenant_id: TenantId,
1501 0 : timeline_id: Option<TimelineId>,
1502 0 : sk_id: NodeId,
1503 0 : generation: u32,
1504 0 : ) -> DatabaseResult<()> {
1505 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1506 :
1507 0 : let tenant_id = &tenant_id;
1508 0 : let timeline_id = &timeline_id;
1509 0 : self.with_measured_conn(DatabaseOperation::RemoveTimelineReconcile, move |conn| {
1510 0 : let timeline_id_str = timeline_id.map(|tid| tid.to_string()).unwrap_or_default();
1511 0 : Box::pin(async move {
1512 0 : diesel::delete(dsl::safekeeper_timeline_pending_ops)
1513 0 : .filter(dsl::tenant_id.eq(tenant_id.to_string()))
1514 0 : .filter(dsl::timeline_id.eq(timeline_id_str))
1515 0 : .filter(dsl::sk_id.eq(sk_id.0 as i64))
1516 0 : .filter(dsl::generation.eq(generation as i32))
1517 0 : .execute(conn)
1518 0 : .await?;
1519 0 : Ok(())
1520 0 : })
1521 0 : })
1522 0 : .await
1523 0 : }
1524 :
1525 : /// Load pending operations from db, joined together with timeline data.
1526 0 : pub(crate) async fn list_pending_ops_with_timelines(
1527 0 : &self,
1528 0 : ) -> DatabaseResult<Vec<(TimelinePendingOpPersistence, Option<TimelinePersistence>)>> {
1529 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1530 : use crate::schema::timelines;
1531 :
1532 0 : let timeline_from_db = self
1533 0 : .with_measured_conn(
1534 0 : DatabaseOperation::ListTimelineReconcileStartup,
1535 0 : move |conn| {
1536 0 : Box::pin(async move {
1537 0 : let from_db: Vec<(TimelinePendingOpPersistence, Option<TimelineFromDb>)> =
1538 0 : dsl::safekeeper_timeline_pending_ops
1539 0 : .left_join(
1540 0 : timelines::table.on(timelines::tenant_id
1541 0 : .eq(dsl::tenant_id)
1542 0 : .and(timelines::timeline_id.eq(dsl::timeline_id))),
1543 0 : )
1544 0 : .select((
1545 0 : TimelinePendingOpPersistence::as_select(),
1546 0 : Option::<TimelineFromDb>::as_select(),
1547 0 : ))
1548 0 : .load(conn)
1549 0 : .await?;
1550 0 : Ok(from_db)
1551 0 : })
1552 0 : },
1553 0 : )
1554 0 : .await?;
1555 :
1556 0 : Ok(timeline_from_db
1557 0 : .into_iter()
1558 0 : .map(|(op, tl_opt)| (op, tl_opt.map(|tl_opt| tl_opt.into_persistence())))
1559 0 : .collect())
1560 0 : }
1561 : /// List pending operations for a given timeline (including tenant-global ones)
1562 0 : pub(crate) async fn list_pending_ops_for_timeline(
1563 0 : &self,
1564 0 : tenant_id: TenantId,
1565 0 : timeline_id: TimelineId,
1566 0 : ) -> DatabaseResult<Vec<TimelinePendingOpPersistence>> {
1567 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1568 :
1569 0 : let timelines_from_db = self
1570 0 : .with_measured_conn(DatabaseOperation::ListTimelineReconcile, move |conn| {
1571 0 : Box::pin(async move {
1572 0 : let from_db: Vec<TimelinePendingOpPersistence> =
1573 0 : dsl::safekeeper_timeline_pending_ops
1574 0 : .filter(dsl::tenant_id.eq(tenant_id.to_string()))
1575 0 : .filter(
1576 0 : dsl::timeline_id
1577 0 : .eq(timeline_id.to_string())
1578 0 : .or(dsl::timeline_id.eq("")),
1579 0 : )
1580 0 : .load(conn)
1581 0 : .await?;
1582 0 : Ok(from_db)
1583 0 : })
1584 0 : })
1585 0 : .await?;
1586 :
1587 0 : Ok(timelines_from_db)
1588 0 : }
1589 :
1590 : /// Delete all pending ops for the given timeline.
1591 : ///
1592 : /// Use this only at timeline deletion, otherwise use generation based APIs
1593 0 : pub(crate) async fn remove_pending_ops_for_timeline(
1594 0 : &self,
1595 0 : tenant_id: TenantId,
1596 0 : timeline_id: Option<TimelineId>,
1597 0 : ) -> DatabaseResult<()> {
1598 : use crate::schema::safekeeper_timeline_pending_ops::dsl;
1599 :
1600 0 : let tenant_id = &tenant_id;
1601 0 : let timeline_id = &timeline_id;
1602 0 : self.with_measured_conn(DatabaseOperation::RemoveTimelineReconcile, move |conn| {
1603 0 : let timeline_id_str = timeline_id.map(|tid| tid.to_string()).unwrap_or_default();
1604 0 : Box::pin(async move {
1605 0 : diesel::delete(dsl::safekeeper_timeline_pending_ops)
1606 0 : .filter(dsl::tenant_id.eq(tenant_id.to_string()))
1607 0 : .filter(dsl::timeline_id.eq(timeline_id_str))
1608 0 : .execute(conn)
1609 0 : .await?;
1610 0 : Ok(())
1611 0 : })
1612 0 : })
1613 0 : .await?;
1614 :
1615 0 : Ok(())
1616 0 : }
1617 : }
1618 :
1619 0 : pub(crate) fn load_certs() -> anyhow::Result<Arc<rustls::RootCertStore>> {
1620 0 : let der_certs = rustls_native_certs::load_native_certs();
1621 0 :
1622 0 : if !der_certs.errors.is_empty() {
1623 0 : anyhow::bail!("could not parse certificates: {:?}", der_certs.errors);
1624 0 : }
1625 0 :
1626 0 : let mut store = rustls::RootCertStore::empty();
1627 0 : store.add_parsable_certificates(der_certs.certs);
1628 0 : Ok(Arc::new(store))
1629 0 : }
1630 :
1631 : #[derive(Debug)]
1632 : /// A verifier that accepts all certificates (but logs an error still)
1633 : struct AcceptAll(Arc<WebPkiServerVerifier>);
1634 : impl ServerCertVerifier for AcceptAll {
1635 0 : fn verify_server_cert(
1636 0 : &self,
1637 0 : end_entity: &rustls::pki_types::CertificateDer<'_>,
1638 0 : intermediates: &[rustls::pki_types::CertificateDer<'_>],
1639 0 : server_name: &rustls::pki_types::ServerName<'_>,
1640 0 : ocsp_response: &[u8],
1641 0 : now: rustls::pki_types::UnixTime,
1642 0 : ) -> Result<ServerCertVerified, rustls::Error> {
1643 0 : let r =
1644 0 : self.0
1645 0 : .verify_server_cert(end_entity, intermediates, server_name, ocsp_response, now);
1646 0 : if let Err(err) = r {
1647 0 : tracing::info!(
1648 : ?server_name,
1649 0 : "ignoring db connection TLS validation error: {err:?}"
1650 : );
1651 0 : return Ok(ServerCertVerified::assertion());
1652 0 : }
1653 0 : r
1654 0 : }
1655 0 : fn verify_tls12_signature(
1656 0 : &self,
1657 0 : message: &[u8],
1658 0 : cert: &rustls::pki_types::CertificateDer<'_>,
1659 0 : dss: &rustls::DigitallySignedStruct,
1660 0 : ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
1661 0 : self.0.verify_tls12_signature(message, cert, dss)
1662 0 : }
1663 0 : fn verify_tls13_signature(
1664 0 : &self,
1665 0 : message: &[u8],
1666 0 : cert: &rustls::pki_types::CertificateDer<'_>,
1667 0 : dss: &rustls::DigitallySignedStruct,
1668 0 : ) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
1669 0 : self.0.verify_tls13_signature(message, cert, dss)
1670 0 : }
1671 0 : fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
1672 0 : self.0.supported_verify_schemes()
1673 0 : }
1674 : }
1675 :
1676 : /// Loads the root certificates and constructs a client config suitable for connecting.
1677 : /// This function is blocking.
1678 0 : fn client_config_with_root_certs() -> anyhow::Result<rustls::ClientConfig> {
1679 0 : let client_config =
1680 0 : rustls::ClientConfig::builder_with_provider(Arc::new(ring::default_provider()))
1681 0 : .with_safe_default_protocol_versions()
1682 0 : .expect("ring should support the default protocol versions");
1683 : static DO_CERT_CHECKS: std::sync::OnceLock<bool> = std::sync::OnceLock::new();
1684 0 : let do_cert_checks =
1685 0 : DO_CERT_CHECKS.get_or_init(|| std::env::var("STORCON_DB_CERT_CHECKS").is_ok());
1686 0 : Ok(if *do_cert_checks {
1687 0 : client_config
1688 0 : .with_root_certificates(load_certs()?)
1689 0 : .with_no_client_auth()
1690 : } else {
1691 0 : let verifier = AcceptAll(
1692 : WebPkiServerVerifier::builder_with_provider(
1693 0 : load_certs()?,
1694 0 : Arc::new(ring::default_provider()),
1695 0 : )
1696 0 : .build()?,
1697 : );
1698 0 : client_config
1699 0 : .dangerous()
1700 0 : .with_custom_certificate_verifier(Arc::new(verifier))
1701 0 : .with_no_client_auth()
1702 : })
1703 0 : }
1704 :
1705 0 : fn establish_connection_rustls(config: &str) -> BoxFuture<ConnectionResult<AsyncPgConnection>> {
1706 0 : let fut = async {
1707 : // We first set up the way we want rustls to work.
1708 0 : let rustls_config = client_config_with_root_certs()
1709 0 : .map_err(|err| ConnectionError::BadConnection(format!("{err:?}")))?;
1710 0 : let tls = tokio_postgres_rustls::MakeRustlsConnect::new(rustls_config);
1711 0 : let (client, conn) = tokio_postgres::connect(config, tls)
1712 0 : .await
1713 0 : .map_err(|e| ConnectionError::BadConnection(e.to_string()))?;
1714 :
1715 0 : AsyncPgConnection::try_from_client_and_connection(client, conn).await
1716 0 : };
1717 0 : fut.boxed()
1718 0 : }
1719 :
1720 : #[cfg_attr(test, test)]
1721 1 : fn test_config_debug_censors_password() {
1722 1 : let has_pw =
1723 1 : "host=/var/lib/postgresql,localhost port=1234 user=specialuser password='NOT ALLOWED TAG'";
1724 1 : let has_pw_cfg = has_pw.parse::<tokio_postgres::Config>().unwrap();
1725 1 : assert!(format!("{has_pw_cfg:?}").contains("specialuser"));
1726 : // Ensure that the password is not leaked by the debug impl
1727 1 : assert!(!format!("{has_pw_cfg:?}").contains("NOT ALLOWED TAG"));
1728 1 : }
1729 :
1730 0 : fn log_postgres_connstr_info(config_str: &str) -> anyhow::Result<()> {
1731 0 : let config = config_str
1732 0 : .parse::<tokio_postgres::Config>()
1733 0 : .map_err(|_e| anyhow::anyhow!("Couldn't parse config str"))?;
1734 : // We use debug formatting here, and use a unit test to ensure that we don't leak the password.
1735 : // To make extra sure the test gets ran, run it every time the function is called
1736 : // (this is rather cold code, we can afford it).
1737 : #[cfg(not(test))]
1738 0 : test_config_debug_censors_password();
1739 0 : tracing::info!("database connection config: {config:?}");
1740 0 : Ok(())
1741 0 : }
1742 :
1743 : /// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably
1744 : #[derive(
1745 0 : QueryableByName, Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq,
1746 : )]
1747 : #[diesel(table_name = crate::schema::tenant_shards)]
1748 : pub(crate) struct TenantShardPersistence {
1749 : #[serde(default)]
1750 : pub(crate) tenant_id: String,
1751 : #[serde(default)]
1752 : pub(crate) shard_number: i32,
1753 : #[serde(default)]
1754 : pub(crate) shard_count: i32,
1755 : #[serde(default)]
1756 : pub(crate) shard_stripe_size: i32,
1757 :
1758 : // Latest generation number: next time we attach, increment this
1759 : // and use the incremented number when attaching.
1760 : //
1761 : // Generation is only None when first onboarding a tenant, where it may
1762 : // be in PlacementPolicy::Secondary and therefore have no valid generation state.
1763 : pub(crate) generation: Option<i32>,
1764 :
1765 : // Currently attached pageserver
1766 : #[serde(rename = "pageserver")]
1767 : pub(crate) generation_pageserver: Option<i64>,
1768 :
1769 : #[serde(default)]
1770 : pub(crate) placement_policy: String,
1771 : #[serde(default)]
1772 : pub(crate) splitting: SplitState,
1773 : #[serde(default)]
1774 : pub(crate) config: String,
1775 : #[serde(default)]
1776 : pub(crate) scheduling_policy: String,
1777 :
1778 : // Hint that we should attempt to schedule this tenant shard the given
1779 : // availability zone in order to minimise the chances of cross-AZ communication
1780 : // with compute.
1781 : pub(crate) preferred_az_id: Option<String>,
1782 : }
1783 :
1784 : impl TenantShardPersistence {
1785 0 : fn get_shard_count(&self) -> Result<ShardCount, ShardConfigError> {
1786 0 : self.shard_count
1787 0 : .try_into()
1788 0 : .map(ShardCount)
1789 0 : .map_err(|_| ShardConfigError::InvalidCount)
1790 0 : }
1791 :
1792 0 : fn get_shard_number(&self) -> Result<ShardNumber, ShardConfigError> {
1793 0 : self.shard_number
1794 0 : .try_into()
1795 0 : .map(ShardNumber)
1796 0 : .map_err(|_| ShardConfigError::InvalidNumber)
1797 0 : }
1798 :
1799 0 : fn get_stripe_size(&self) -> Result<ShardStripeSize, ShardConfigError> {
1800 0 : self.shard_stripe_size
1801 0 : .try_into()
1802 0 : .map(ShardStripeSize)
1803 0 : .map_err(|_| ShardConfigError::InvalidStripeSize)
1804 0 : }
1805 :
1806 0 : pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
1807 0 : if self.shard_count == 0 {
1808 : // NB: carry over the stripe size from the persisted record, to avoid consistency check
1809 : // failures if the persisted value differs from the default stripe size. The stripe size
1810 : // doesn't really matter for unsharded tenants anyway.
1811 : Ok(ShardIdentity::unsharded_with_stripe_size(
1812 0 : self.get_stripe_size()?,
1813 : ))
1814 : } else {
1815 : Ok(ShardIdentity::new(
1816 0 : self.get_shard_number()?,
1817 0 : self.get_shard_count()?,
1818 0 : self.get_stripe_size()?,
1819 0 : )?)
1820 : }
1821 0 : }
1822 :
1823 0 : pub(crate) fn get_tenant_shard_id(&self) -> anyhow::Result<TenantShardId> {
1824 0 : Ok(TenantShardId {
1825 0 : tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
1826 0 : shard_number: self.get_shard_number()?,
1827 0 : shard_count: self.get_shard_count()?,
1828 : })
1829 0 : }
1830 : }
1831 :
1832 : /// Parts of [`crate::node::Node`] that are stored durably
1833 0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)]
1834 : #[diesel(table_name = crate::schema::nodes)]
1835 : pub(crate) struct NodePersistence {
1836 : pub(crate) node_id: i64,
1837 : pub(crate) scheduling_policy: String,
1838 : pub(crate) listen_http_addr: String,
1839 : pub(crate) listen_http_port: i32,
1840 : pub(crate) listen_pg_addr: String,
1841 : pub(crate) listen_pg_port: i32,
1842 : pub(crate) availability_zone_id: String,
1843 : pub(crate) listen_https_port: Option<i32>,
1844 : }
1845 :
1846 : /// Tenant metadata health status that are stored durably.
1847 0 : #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
1848 : #[diesel(table_name = crate::schema::metadata_health)]
1849 : pub(crate) struct MetadataHealthPersistence {
1850 : #[serde(default)]
1851 : pub(crate) tenant_id: String,
1852 : #[serde(default)]
1853 : pub(crate) shard_number: i32,
1854 : #[serde(default)]
1855 : pub(crate) shard_count: i32,
1856 :
1857 : pub(crate) healthy: bool,
1858 : pub(crate) last_scrubbed_at: chrono::DateTime<chrono::Utc>,
1859 : }
1860 :
1861 : impl MetadataHealthPersistence {
1862 0 : pub fn new(
1863 0 : tenant_shard_id: TenantShardId,
1864 0 : healthy: bool,
1865 0 : last_scrubbed_at: chrono::DateTime<chrono::Utc>,
1866 0 : ) -> Self {
1867 0 : let tenant_id = tenant_shard_id.tenant_id.to_string();
1868 0 : let shard_number = tenant_shard_id.shard_number.0 as i32;
1869 0 : let shard_count = tenant_shard_id.shard_count.literal() as i32;
1870 0 :
1871 0 : MetadataHealthPersistence {
1872 0 : tenant_id,
1873 0 : shard_number,
1874 0 : shard_count,
1875 0 : healthy,
1876 0 : last_scrubbed_at,
1877 0 : }
1878 0 : }
1879 :
1880 : #[allow(dead_code)]
1881 0 : pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
1882 0 : Ok(TenantShardId {
1883 0 : tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
1884 0 : shard_number: ShardNumber(self.shard_number as u8),
1885 0 : shard_count: ShardCount::new(self.shard_count as u8),
1886 : })
1887 0 : }
1888 : }
1889 :
1890 : impl From<MetadataHealthPersistence> for MetadataHealthRecord {
1891 0 : fn from(value: MetadataHealthPersistence) -> Self {
1892 0 : MetadataHealthRecord {
1893 0 : tenant_shard_id: value
1894 0 : .get_tenant_shard_id()
1895 0 : .expect("stored tenant id should be valid"),
1896 0 : healthy: value.healthy,
1897 0 : last_scrubbed_at: value.last_scrubbed_at,
1898 0 : }
1899 0 : }
1900 : }
1901 :
1902 : #[derive(
1903 0 : Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq, Debug, Clone,
1904 : )]
1905 : #[diesel(table_name = crate::schema::controllers)]
1906 : pub(crate) struct ControllerPersistence {
1907 : pub(crate) address: String,
1908 : pub(crate) started_at: chrono::DateTime<chrono::Utc>,
1909 : }
1910 :
1911 : // What we store in the database
1912 0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Eq, PartialEq, Debug, Clone)]
1913 : #[diesel(table_name = crate::schema::safekeepers)]
1914 : pub(crate) struct SafekeeperPersistence {
1915 : pub(crate) id: i64,
1916 : pub(crate) region_id: String,
1917 : /// 1 is special, it means just created (not currently posted to storcon).
1918 : /// Zero or negative is not really expected.
1919 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
1920 : pub(crate) version: i64,
1921 : pub(crate) host: String,
1922 : pub(crate) port: i32,
1923 : pub(crate) http_port: i32,
1924 : pub(crate) availability_zone_id: String,
1925 : pub(crate) scheduling_policy: SkSchedulingPolicyFromSql,
1926 : pub(crate) https_port: Option<i32>,
1927 : }
1928 :
1929 : /// Wrapper struct around [`SkSchedulingPolicy`] because both it and [`FromSql`] are from foreign crates,
1930 : /// and we don't want to make [`safekeeper_api`] depend on [`diesel`].
1931 0 : #[derive(Serialize, Deserialize, FromSqlRow, Eq, PartialEq, Debug, Copy, Clone)]
1932 : pub(crate) struct SkSchedulingPolicyFromSql(pub(crate) SkSchedulingPolicy);
1933 :
1934 : impl From<SkSchedulingPolicy> for SkSchedulingPolicyFromSql {
1935 0 : fn from(value: SkSchedulingPolicy) -> Self {
1936 0 : SkSchedulingPolicyFromSql(value)
1937 0 : }
1938 : }
1939 :
1940 : impl FromSql<diesel::sql_types::VarChar, Pg> for SkSchedulingPolicyFromSql {
1941 0 : fn from_sql(
1942 0 : bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
1943 0 : ) -> diesel::deserialize::Result<Self> {
1944 0 : let bytes = bytes.as_bytes();
1945 0 : match core::str::from_utf8(bytes) {
1946 0 : Ok(s) => match SkSchedulingPolicy::from_str(s) {
1947 0 : Ok(policy) => Ok(SkSchedulingPolicyFromSql(policy)),
1948 0 : Err(e) => Err(format!("can't parse: {e}").into()),
1949 : },
1950 0 : Err(e) => Err(format!("invalid UTF-8 for scheduling policy: {e}").into()),
1951 : }
1952 0 : }
1953 : }
1954 :
1955 : impl SafekeeperPersistence {
1956 0 : pub(crate) fn from_upsert(
1957 0 : upsert: SafekeeperUpsert,
1958 0 : scheduling_policy: SkSchedulingPolicy,
1959 0 : ) -> Self {
1960 0 : crate::persistence::SafekeeperPersistence {
1961 0 : id: upsert.id,
1962 0 : region_id: upsert.region_id,
1963 0 : version: upsert.version,
1964 0 : host: upsert.host,
1965 0 : port: upsert.port,
1966 0 : http_port: upsert.http_port,
1967 0 : https_port: upsert.https_port,
1968 0 : availability_zone_id: upsert.availability_zone_id,
1969 0 : scheduling_policy: SkSchedulingPolicyFromSql(scheduling_policy),
1970 0 : }
1971 0 : }
1972 0 : pub(crate) fn as_describe_response(&self) -> Result<SafekeeperDescribeResponse, DatabaseError> {
1973 0 : Ok(SafekeeperDescribeResponse {
1974 0 : id: NodeId(self.id as u64),
1975 0 : region_id: self.region_id.clone(),
1976 0 : version: self.version,
1977 0 : host: self.host.clone(),
1978 0 : port: self.port,
1979 0 : http_port: self.http_port,
1980 0 : https_port: self.https_port,
1981 0 : availability_zone_id: self.availability_zone_id.clone(),
1982 0 : scheduling_policy: self.scheduling_policy.0,
1983 0 : })
1984 0 : }
1985 : }
1986 :
1987 : /// What we expect from the upsert http api
1988 0 : #[derive(Serialize, Deserialize, Eq, PartialEq, Debug, Clone)]
1989 : pub(crate) struct SafekeeperUpsert {
1990 : pub(crate) id: i64,
1991 : pub(crate) region_id: String,
1992 : /// 1 is special, it means just created (not currently posted to storcon).
1993 : /// Zero or negative is not really expected.
1994 : /// Otherwise the number from `release-$(number_of_commits_on_branch)` tag.
1995 : pub(crate) version: i64,
1996 : pub(crate) host: String,
1997 : pub(crate) port: i32,
1998 : /// The active flag will not be stored in the database and will be ignored.
1999 : pub(crate) active: Option<bool>,
2000 : pub(crate) http_port: i32,
2001 : pub(crate) https_port: Option<i32>,
2002 : pub(crate) availability_zone_id: String,
2003 : }
2004 :
2005 : impl SafekeeperUpsert {
2006 0 : fn as_insert_or_update(&self) -> anyhow::Result<InsertUpdateSafekeeper<'_>> {
2007 0 : if self.version < 0 {
2008 0 : anyhow::bail!("negative version: {}", self.version);
2009 0 : }
2010 0 : Ok(InsertUpdateSafekeeper {
2011 0 : id: self.id,
2012 0 : region_id: &self.region_id,
2013 0 : version: self.version,
2014 0 : host: &self.host,
2015 0 : port: self.port,
2016 0 : http_port: self.http_port,
2017 0 : https_port: self.https_port,
2018 0 : availability_zone_id: &self.availability_zone_id,
2019 0 : // None means a wish to not update this column. We expose abilities to update it via other means.
2020 0 : scheduling_policy: None,
2021 0 : })
2022 0 : }
2023 : }
2024 :
2025 0 : #[derive(Insertable, AsChangeset)]
2026 : #[diesel(table_name = crate::schema::safekeepers)]
2027 : struct InsertUpdateSafekeeper<'a> {
2028 : id: i64,
2029 : region_id: &'a str,
2030 : version: i64,
2031 : host: &'a str,
2032 : port: i32,
2033 : http_port: i32,
2034 : https_port: Option<i32>,
2035 : availability_zone_id: &'a str,
2036 : scheduling_policy: Option<&'a str>,
2037 : }
2038 :
2039 0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
2040 : #[diesel(sql_type = crate::schema::sql_types::PgLsn)]
2041 : pub(crate) struct LsnWrapper(pub(crate) Lsn);
2042 :
2043 : impl From<Lsn> for LsnWrapper {
2044 0 : fn from(value: Lsn) -> Self {
2045 0 : LsnWrapper(value)
2046 0 : }
2047 : }
2048 :
2049 : impl FromSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
2050 0 : fn from_sql(
2051 0 : bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
2052 0 : ) -> diesel::deserialize::Result<Self> {
2053 0 : let byte_arr: diesel::deserialize::Result<[u8; 8]> = bytes
2054 0 : .as_bytes()
2055 0 : .try_into()
2056 0 : .map_err(|_| "Can't obtain lsn from sql".into());
2057 0 : Ok(LsnWrapper(Lsn(u64::from_be_bytes(byte_arr?))))
2058 0 : }
2059 : }
2060 :
2061 : impl ToSql<crate::schema::sql_types::PgLsn, Pg> for LsnWrapper {
2062 0 : fn to_sql<'b>(
2063 0 : &'b self,
2064 0 : out: &mut diesel::serialize::Output<'b, '_, Pg>,
2065 0 : ) -> diesel::serialize::Result {
2066 0 : out.write_all(&u64::to_be_bytes(self.0.0))
2067 0 : .map(|_| IsNull::No)
2068 0 : .map_err(Into::into)
2069 0 : }
2070 : }
2071 :
2072 0 : #[derive(Insertable, AsChangeset, Clone)]
2073 : #[diesel(table_name = crate::schema::timelines)]
2074 : pub(crate) struct TimelinePersistence {
2075 : pub(crate) tenant_id: String,
2076 : pub(crate) timeline_id: String,
2077 : pub(crate) start_lsn: LsnWrapper,
2078 : pub(crate) generation: i32,
2079 : pub(crate) sk_set: Vec<i64>,
2080 : pub(crate) new_sk_set: Option<Vec<i64>>,
2081 : pub(crate) cplane_notified_generation: i32,
2082 : pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
2083 : }
2084 :
2085 : /// This is separate from [TimelinePersistence] only because postgres allows NULLs
2086 : /// in arrays and there is no way to forbid that at schema level. Hence diesel
2087 : /// wants `sk_set` to be `Vec<Option<i64>>` instead of `Vec<i64>` for
2088 : /// Queryable/Selectable. It does however allow insertions without redundant
2089 : /// Option(s), so [TimelinePersistence] doesn't have them.
2090 0 : #[derive(Queryable, Selectable)]
2091 : #[diesel(table_name = crate::schema::timelines)]
2092 : pub(crate) struct TimelineFromDb {
2093 : pub(crate) tenant_id: String,
2094 : pub(crate) timeline_id: String,
2095 : pub(crate) start_lsn: LsnWrapper,
2096 : pub(crate) generation: i32,
2097 : pub(crate) sk_set: Vec<Option<i64>>,
2098 : pub(crate) new_sk_set: Option<Vec<Option<i64>>>,
2099 : pub(crate) cplane_notified_generation: i32,
2100 : pub(crate) deleted_at: Option<chrono::DateTime<chrono::Utc>>,
2101 : }
2102 :
2103 : impl TimelineFromDb {
2104 0 : fn into_persistence(self) -> TimelinePersistence {
2105 0 : // We should never encounter null entries in the sets, but we need to filter them out.
2106 0 : // There is no way to forbid this in the schema that diesel recognizes (to our knowledge).
2107 0 : let sk_set = self.sk_set.into_iter().flatten().collect::<Vec<_>>();
2108 0 : let new_sk_set = self
2109 0 : .new_sk_set
2110 0 : .map(|s| s.into_iter().flatten().collect::<Vec<_>>());
2111 0 : TimelinePersistence {
2112 0 : tenant_id: self.tenant_id,
2113 0 : timeline_id: self.timeline_id,
2114 0 : start_lsn: self.start_lsn,
2115 0 : generation: self.generation,
2116 0 : sk_set,
2117 0 : new_sk_set,
2118 0 : cplane_notified_generation: self.cplane_notified_generation,
2119 0 : deleted_at: self.deleted_at,
2120 0 : }
2121 0 : }
2122 : }
2123 :
2124 0 : #[derive(Insertable, AsChangeset, Queryable, Selectable, Clone)]
2125 : #[diesel(table_name = crate::schema::safekeeper_timeline_pending_ops)]
2126 : pub(crate) struct TimelinePendingOpPersistence {
2127 : pub(crate) sk_id: i64,
2128 : pub(crate) tenant_id: String,
2129 : pub(crate) timeline_id: String,
2130 : pub(crate) generation: i32,
2131 : pub(crate) op_kind: SafekeeperTimelineOpKind,
2132 : }
2133 :
2134 0 : #[derive(Serialize, Deserialize, FromSqlRow, AsExpression, Eq, PartialEq, Debug, Copy, Clone)]
2135 : #[diesel(sql_type = diesel::sql_types::VarChar)]
2136 : pub(crate) enum SafekeeperTimelineOpKind {
2137 : Pull,
2138 : Exclude,
2139 : Delete,
2140 : }
2141 :
2142 : impl FromSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
2143 0 : fn from_sql(
2144 0 : bytes: <Pg as diesel::backend::Backend>::RawValue<'_>,
2145 0 : ) -> diesel::deserialize::Result<Self> {
2146 0 : let bytes = bytes.as_bytes();
2147 0 : match core::str::from_utf8(bytes) {
2148 0 : Ok(s) => match s {
2149 0 : "pull" => Ok(SafekeeperTimelineOpKind::Pull),
2150 0 : "exclude" => Ok(SafekeeperTimelineOpKind::Exclude),
2151 0 : "delete" => Ok(SafekeeperTimelineOpKind::Delete),
2152 0 : _ => Err(format!("can't parse: {s}").into()),
2153 : },
2154 0 : Err(e) => Err(format!("invalid UTF-8 for op_kind: {e}").into()),
2155 : }
2156 0 : }
2157 : }
2158 :
2159 : impl ToSql<diesel::sql_types::VarChar, Pg> for SafekeeperTimelineOpKind {
2160 0 : fn to_sql<'b>(
2161 0 : &'b self,
2162 0 : out: &mut diesel::serialize::Output<'b, '_, Pg>,
2163 0 : ) -> diesel::serialize::Result {
2164 0 : let kind_str = match self {
2165 0 : SafekeeperTimelineOpKind::Pull => "pull",
2166 0 : SafekeeperTimelineOpKind::Exclude => "exclude",
2167 0 : SafekeeperTimelineOpKind::Delete => "delete",
2168 : };
2169 0 : out.write_all(kind_str.as_bytes())
2170 0 : .map(|_| IsNull::No)
2171 0 : .map_err(Into::into)
2172 0 : }
2173 : }
|