Line data Source code
1 : pub(crate) mod split_state;
2 : use std::collections::HashMap;
3 : use std::str::FromStr;
4 : use std::time::Duration;
5 : use std::time::Instant;
6 :
7 : use self::split_state::SplitState;
8 : use camino::Utf8Path;
9 : use camino::Utf8PathBuf;
10 : use diesel::pg::PgConnection;
11 : use diesel::prelude::*;
12 : use diesel::Connection;
13 : use pageserver_api::controller_api::ShardSchedulingPolicy;
14 : use pageserver_api::controller_api::{NodeSchedulingPolicy, PlacementPolicy};
15 : use pageserver_api::models::TenantConfig;
16 : use pageserver_api::shard::ShardConfigError;
17 : use pageserver_api::shard::ShardIdentity;
18 : use pageserver_api::shard::ShardStripeSize;
19 : use pageserver_api::shard::{ShardCount, ShardNumber, TenantShardId};
20 : use serde::{Deserialize, Serialize};
21 : use utils::generation::Generation;
22 : use utils::id::{NodeId, TenantId};
23 :
24 : use crate::metrics::{
25 : DatabaseQueryErrorLabelGroup, DatabaseQueryLatencyLabelGroup, METRICS_REGISTRY,
26 : };
27 : use crate::node::Node;
28 :
29 : /// ## What do we store?
30 : ///
31 : /// The storage controller service does not store most of its state durably.
32 : ///
33 : /// The essential things to store durably are:
34 : /// - generation numbers, as these must always advance monotonically to ensure data safety.
35 : /// - Tenant's PlacementPolicy and TenantConfig, as the source of truth for these is something external.
36 : /// - Node's scheduling policies, as the source of truth for these is something external.
37 : ///
38 : /// Other things we store durably as an implementation detail:
39 : /// - Node's host/port: this could be avoided it we made nodes emit a self-registering heartbeat,
40 : /// but it is operationally simpler to make this service the authority for which nodes
41 : /// it talks to.
42 : ///
43 : /// ## Performance/efficiency
44 : ///
45 : /// The storage controller service does not go via the database for most things: there are
46 : /// a couple of places where we must, and where efficiency matters:
47 : /// - Incrementing generation numbers: the Reconciler has to wait for this to complete
48 : /// before it can attach a tenant, so this acts as a bound on how fast things like
49 : /// failover can happen.
50 : /// - Pageserver re-attach: we will increment many shards' generations when this happens,
51 : /// so it is important to avoid e.g. issuing O(N) queries.
52 : ///
53 : /// Database calls relating to nodes have low performance requirements, as they are very rarely
54 : /// updated, and reads of nodes are always from memory, not the database. We only require that
55 : /// we can UPDATE a node's scheduling mode reasonably quickly to mark a bad node offline.
56 : pub struct Persistence {
57 : connection_pool: diesel::r2d2::Pool<diesel::r2d2::ConnectionManager<PgConnection>>,
58 :
59 : // In test environments, we support loading+saving a JSON file. This is temporary, for the benefit of
60 : // test_compatibility.py, so that we don't have to commit to making the database contents fully backward/forward
61 : // compatible just yet.
62 : json_path: Option<Utf8PathBuf>,
63 : }
64 :
65 : /// Legacy format, for use in JSON compat objects in test environment
66 0 : #[derive(Serialize, Deserialize)]
67 : struct JsonPersistence {
68 : tenants: HashMap<TenantShardId, TenantShardPersistence>,
69 : }
70 :
71 0 : #[derive(thiserror::Error, Debug)]
72 : pub(crate) enum DatabaseError {
73 : #[error(transparent)]
74 : Query(#[from] diesel::result::Error),
75 : #[error(transparent)]
76 : Connection(#[from] diesel::result::ConnectionError),
77 : #[error(transparent)]
78 : ConnectionPool(#[from] r2d2::Error),
79 : #[error("Logical error: {0}")]
80 : Logical(String),
81 : }
82 :
83 : #[derive(measured::FixedCardinalityLabel, Copy, Clone)]
84 : pub(crate) enum DatabaseOperation {
85 : InsertNode,
86 : UpdateNode,
87 : DeleteNode,
88 : ListNodes,
89 : BeginShardSplit,
90 : CompleteShardSplit,
91 : AbortShardSplit,
92 : Detach,
93 : ReAttach,
94 : IncrementGeneration,
95 : ListTenantShards,
96 : InsertTenantShards,
97 : UpdateTenantShard,
98 : DeleteTenant,
99 : UpdateTenantConfig,
100 : }
101 :
102 : #[must_use]
103 : pub(crate) enum AbortShardSplitStatus {
104 : /// We aborted the split in the database by reverting to the parent shards
105 : Aborted,
106 : /// The split had already been persisted.
107 : Complete,
108 : }
109 :
110 : pub(crate) type DatabaseResult<T> = Result<T, DatabaseError>;
111 :
112 : /// Some methods can operate on either a whole tenant or a single shard
113 : pub(crate) enum TenantFilter {
114 : Tenant(TenantId),
115 : Shard(TenantShardId),
116 : }
117 :
118 : impl Persistence {
119 : // The default postgres connection limit is 100. We use up to 99, to leave one free for a human admin under
120 : // normal circumstances. This assumes we have exclusive use of the database cluster to which we connect.
121 : pub const MAX_CONNECTIONS: u32 = 99;
122 :
123 : // We don't want to keep a lot of connections alive: close them down promptly if they aren't being used.
124 : const IDLE_CONNECTION_TIMEOUT: Duration = Duration::from_secs(10);
125 : const MAX_CONNECTION_LIFETIME: Duration = Duration::from_secs(60);
126 :
127 0 : pub fn new(database_url: String, json_path: Option<Utf8PathBuf>) -> Self {
128 0 : let manager = diesel::r2d2::ConnectionManager::<PgConnection>::new(database_url);
129 0 :
130 0 : // We will use a connection pool: this is primarily to _limit_ our connection count, rather than to optimize time
131 0 : // to execute queries (database queries are not generally on latency-sensitive paths).
132 0 : let connection_pool = diesel::r2d2::Pool::builder()
133 0 : .max_size(Self::MAX_CONNECTIONS)
134 0 : .max_lifetime(Some(Self::MAX_CONNECTION_LIFETIME))
135 0 : .idle_timeout(Some(Self::IDLE_CONNECTION_TIMEOUT))
136 0 : // Always keep at least one connection ready to go
137 0 : .min_idle(Some(1))
138 0 : .test_on_check_out(true)
139 0 : .build(manager)
140 0 : .expect("Could not build connection pool");
141 0 :
142 0 : Self {
143 0 : connection_pool,
144 0 : json_path,
145 0 : }
146 0 : }
147 :
148 : /// A helper for use during startup, where we would like to tolerate concurrent restarts of the
149 : /// database and the storage controller, therefore the database might not be available right away
150 0 : pub async fn await_connection(
151 0 : database_url: &str,
152 0 : timeout: Duration,
153 0 : ) -> Result<(), diesel::ConnectionError> {
154 0 : let started_at = Instant::now();
155 0 : loop {
156 0 : match PgConnection::establish(database_url) {
157 : Ok(_) => {
158 0 : tracing::info!("Connected to database.");
159 0 : return Ok(());
160 : }
161 0 : Err(e) => {
162 0 : if started_at.elapsed() > timeout {
163 0 : return Err(e);
164 : } else {
165 0 : tracing::info!("Database not yet available, waiting... ({e})");
166 0 : tokio::time::sleep(Duration::from_millis(100)).await;
167 : }
168 : }
169 : }
170 : }
171 0 : }
172 :
173 : /// Wraps `with_conn` in order to collect latency and error metrics
174 0 : async fn with_measured_conn<F, R>(&self, op: DatabaseOperation, func: F) -> DatabaseResult<R>
175 0 : where
176 0 : F: FnOnce(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
177 0 : R: Send + 'static,
178 0 : {
179 0 : let latency = &METRICS_REGISTRY
180 0 : .metrics_group
181 0 : .storage_controller_database_query_latency;
182 0 : let _timer = latency.start_timer(DatabaseQueryLatencyLabelGroup { operation: op });
183 :
184 0 : let res = self.with_conn(func).await;
185 :
186 0 : if let Err(err) = &res {
187 0 : let error_counter = &METRICS_REGISTRY
188 0 : .metrics_group
189 0 : .storage_controller_database_query_error;
190 0 : error_counter.inc(DatabaseQueryErrorLabelGroup {
191 0 : error_type: err.error_label(),
192 0 : operation: op,
193 0 : })
194 0 : }
195 :
196 0 : res
197 0 : }
198 :
199 : /// Call the provided function in a tokio blocking thread, with a Diesel database connection.
200 0 : async fn with_conn<F, R>(&self, func: F) -> DatabaseResult<R>
201 0 : where
202 0 : F: FnOnce(&mut PgConnection) -> DatabaseResult<R> + Send + 'static,
203 0 : R: Send + 'static,
204 0 : {
205 0 : let mut conn = self.connection_pool.get()?;
206 0 : tokio::task::spawn_blocking(move || -> DatabaseResult<R> { func(&mut conn) })
207 0 : .await
208 0 : .expect("Task panic")
209 0 : }
210 :
211 : /// When a node is first registered, persist it before using it for anything
212 0 : pub(crate) async fn insert_node(&self, node: &Node) -> DatabaseResult<()> {
213 0 : let np = node.to_persistent();
214 0 : self.with_measured_conn(
215 0 : DatabaseOperation::InsertNode,
216 0 : move |conn| -> DatabaseResult<()> {
217 0 : diesel::insert_into(crate::schema::nodes::table)
218 0 : .values(&np)
219 0 : .execute(conn)?;
220 0 : Ok(())
221 0 : },
222 0 : )
223 0 : .await
224 0 : }
225 :
226 : /// At startup, populate the list of nodes which our shards may be placed on
227 0 : pub(crate) async fn list_nodes(&self) -> DatabaseResult<Vec<NodePersistence>> {
228 0 : let nodes: Vec<NodePersistence> = self
229 0 : .with_measured_conn(
230 0 : DatabaseOperation::ListNodes,
231 0 : move |conn| -> DatabaseResult<_> {
232 0 : Ok(crate::schema::nodes::table.load::<NodePersistence>(conn)?)
233 0 : },
234 0 : )
235 0 : .await?;
236 :
237 0 : tracing::info!("list_nodes: loaded {} nodes", nodes.len());
238 :
239 0 : Ok(nodes)
240 0 : }
241 :
242 0 : pub(crate) async fn update_node(
243 0 : &self,
244 0 : input_node_id: NodeId,
245 0 : input_scheduling: NodeSchedulingPolicy,
246 0 : ) -> DatabaseResult<()> {
247 : use crate::schema::nodes::dsl::*;
248 0 : let updated = self
249 0 : .with_measured_conn(DatabaseOperation::UpdateNode, move |conn| {
250 0 : let updated = diesel::update(nodes)
251 0 : .filter(node_id.eq(input_node_id.0 as i64))
252 0 : .set((scheduling_policy.eq(String::from(input_scheduling)),))
253 0 : .execute(conn)?;
254 0 : Ok(updated)
255 0 : })
256 0 : .await?;
257 :
258 0 : if updated != 1 {
259 0 : Err(DatabaseError::Logical(format!(
260 0 : "Node {node_id:?} not found for update",
261 0 : )))
262 : } else {
263 0 : Ok(())
264 : }
265 0 : }
266 :
267 : /// At startup, load the high level state for shards, such as their config + policy. This will
268 : /// be enriched at runtime with state discovered on pageservers.
269 0 : pub(crate) async fn list_tenant_shards(&self) -> DatabaseResult<Vec<TenantShardPersistence>> {
270 0 : let loaded = self
271 0 : .with_measured_conn(
272 0 : DatabaseOperation::ListTenantShards,
273 0 : move |conn| -> DatabaseResult<_> {
274 0 : Ok(crate::schema::tenant_shards::table.load::<TenantShardPersistence>(conn)?)
275 0 : },
276 0 : )
277 0 : .await?;
278 :
279 0 : if loaded.is_empty() {
280 0 : if let Some(path) = &self.json_path {
281 0 : if tokio::fs::try_exists(path)
282 0 : .await
283 0 : .map_err(|e| DatabaseError::Logical(format!("Error stat'ing JSON file: {e}")))?
284 : {
285 0 : tracing::info!("Importing from legacy JSON format at {path}");
286 0 : return self.list_tenant_shards_json(path).await;
287 0 : }
288 0 : }
289 0 : }
290 0 : Ok(loaded)
291 0 : }
292 :
293 : /// Shim for automated compatibility tests: load tenants from a JSON file instead of database
294 0 : pub(crate) async fn list_tenant_shards_json(
295 0 : &self,
296 0 : path: &Utf8Path,
297 0 : ) -> DatabaseResult<Vec<TenantShardPersistence>> {
298 0 : let bytes = tokio::fs::read(path)
299 0 : .await
300 0 : .map_err(|e| DatabaseError::Logical(format!("Failed to load JSON: {e}")))?;
301 :
302 0 : let mut decoded = serde_json::from_slice::<JsonPersistence>(&bytes)
303 0 : .map_err(|e| DatabaseError::Logical(format!("Deserialization error: {e}")))?;
304 0 : for shard in decoded.tenants.values_mut() {
305 0 : if shard.placement_policy == "\"Single\"" {
306 0 : // Backward compat for test data after PR https://github.com/neondatabase/neon/pull/7165
307 0 : shard.placement_policy = "{\"Attached\":0}".to_string();
308 0 : }
309 :
310 0 : if shard.scheduling_policy.is_empty() {
311 0 : shard.scheduling_policy =
312 0 : serde_json::to_string(&ShardSchedulingPolicy::default()).unwrap();
313 0 : }
314 : }
315 :
316 0 : let tenants: Vec<TenantShardPersistence> = decoded.tenants.into_values().collect();
317 0 :
318 0 : // Synchronize database with what is in the JSON file
319 0 : self.insert_tenant_shards(tenants.clone()).await?;
320 :
321 0 : Ok(tenants)
322 0 : }
323 :
324 : /// For use in testing environments, where we dump out JSON on shutdown.
325 0 : pub async fn write_tenants_json(&self) -> anyhow::Result<()> {
326 0 : let Some(path) = &self.json_path else {
327 0 : anyhow::bail!("Cannot write JSON if path isn't set (test environment bug)");
328 : };
329 0 : tracing::info!("Writing state to {path}...");
330 0 : let tenants = self.list_tenant_shards().await?;
331 0 : let mut tenants_map = HashMap::new();
332 0 : for tsp in tenants {
333 0 : let tenant_shard_id = TenantShardId {
334 0 : tenant_id: TenantId::from_str(tsp.tenant_id.as_str())?,
335 0 : shard_number: ShardNumber(tsp.shard_number as u8),
336 0 : shard_count: ShardCount::new(tsp.shard_count as u8),
337 0 : };
338 0 :
339 0 : tenants_map.insert(tenant_shard_id, tsp);
340 : }
341 0 : let json = serde_json::to_string(&JsonPersistence {
342 0 : tenants: tenants_map,
343 0 : })?;
344 :
345 0 : tokio::fs::write(path, &json).await?;
346 0 : tracing::info!("Wrote {} bytes to {path}...", json.len());
347 :
348 0 : Ok(())
349 0 : }
350 :
351 : /// Tenants must be persisted before we schedule them for the first time. This enables us
352 : /// to correctly retain generation monotonicity, and the externally provided placement policy & config.
353 0 : pub(crate) async fn insert_tenant_shards(
354 0 : &self,
355 0 : shards: Vec<TenantShardPersistence>,
356 0 : ) -> DatabaseResult<()> {
357 0 : use crate::schema::tenant_shards::dsl::*;
358 0 : self.with_measured_conn(
359 0 : DatabaseOperation::InsertTenantShards,
360 0 : move |conn| -> DatabaseResult<()> {
361 0 : conn.transaction(|conn| -> QueryResult<()> {
362 0 : for tenant in &shards {
363 0 : diesel::insert_into(tenant_shards)
364 0 : .values(tenant)
365 0 : .execute(conn)?;
366 : }
367 0 : Ok(())
368 0 : })?;
369 0 : Ok(())
370 0 : },
371 0 : )
372 0 : .await
373 0 : }
374 :
375 : /// Ordering: call this _after_ deleting the tenant on pageservers, but _before_ dropping state for
376 : /// the tenant from memory on this server.
377 0 : pub(crate) async fn delete_tenant(&self, del_tenant_id: TenantId) -> DatabaseResult<()> {
378 0 : use crate::schema::tenant_shards::dsl::*;
379 0 : self.with_measured_conn(
380 0 : DatabaseOperation::DeleteTenant,
381 0 : move |conn| -> DatabaseResult<()> {
382 0 : diesel::delete(tenant_shards)
383 0 : .filter(tenant_id.eq(del_tenant_id.to_string()))
384 0 : .execute(conn)?;
385 :
386 0 : Ok(())
387 0 : },
388 0 : )
389 0 : .await
390 0 : }
391 :
392 0 : pub(crate) async fn delete_node(&self, del_node_id: NodeId) -> DatabaseResult<()> {
393 0 : use crate::schema::nodes::dsl::*;
394 0 : self.with_measured_conn(
395 0 : DatabaseOperation::DeleteNode,
396 0 : move |conn| -> DatabaseResult<()> {
397 0 : diesel::delete(nodes)
398 0 : .filter(node_id.eq(del_node_id.0 as i64))
399 0 : .execute(conn)?;
400 :
401 0 : Ok(())
402 0 : },
403 0 : )
404 0 : .await
405 0 : }
406 :
407 : /// When a tenant invokes the /re-attach API, this function is responsible for doing an efficient
408 : /// batched increment of the generations of all tenants whose generation_pageserver is equal to
409 : /// the node that called /re-attach.
410 0 : #[tracing::instrument(skip_all, fields(node_id))]
411 : pub(crate) async fn re_attach(
412 : &self,
413 : node_id: NodeId,
414 : ) -> DatabaseResult<HashMap<TenantShardId, Generation>> {
415 : use crate::schema::tenant_shards::dsl::*;
416 : let updated = self
417 0 : .with_measured_conn(DatabaseOperation::ReAttach, move |conn| {
418 0 : let rows_updated = diesel::update(tenant_shards)
419 0 : .filter(generation_pageserver.eq(node_id.0 as i64))
420 0 : .set(generation.eq(generation + 1))
421 0 : .execute(conn)?;
422 :
423 0 : tracing::info!("Incremented {} tenants' generations", rows_updated);
424 :
425 : // TODO: UPDATE+SELECT in one query
426 :
427 0 : let updated = tenant_shards
428 0 : .filter(generation_pageserver.eq(node_id.0 as i64))
429 0 : .select(TenantShardPersistence::as_select())
430 0 : .load(conn)?;
431 0 : Ok(updated)
432 0 : })
433 : .await?;
434 :
435 : let mut result = HashMap::new();
436 : for tsp in updated {
437 : let tenant_shard_id = TenantShardId {
438 : tenant_id: TenantId::from_str(tsp.tenant_id.as_str())
439 0 : .map_err(|e| DatabaseError::Logical(format!("Malformed tenant id: {e}")))?,
440 : shard_number: ShardNumber(tsp.shard_number as u8),
441 : shard_count: ShardCount::new(tsp.shard_count as u8),
442 : };
443 :
444 : let Some(g) = tsp.generation else {
445 : // If the generation_pageserver column was non-NULL, then the generation column should also be non-NULL:
446 : // we only set generation_pageserver when setting generation.
447 : return Err(DatabaseError::Logical(
448 : "Generation should always be set after incrementing".to_string(),
449 : ));
450 : };
451 : result.insert(tenant_shard_id, Generation::new(g as u32));
452 : }
453 :
454 : Ok(result)
455 : }
456 :
457 : /// Reconciler calls this immediately before attaching to a new pageserver, to acquire a unique, monotonically
458 : /// advancing generation number. We also store the NodeId for which the generation was issued, so that in
459 : /// [`Self::re_attach`] we can do a bulk UPDATE on the generations for that node.
460 0 : pub(crate) async fn increment_generation(
461 0 : &self,
462 0 : tenant_shard_id: TenantShardId,
463 0 : node_id: NodeId,
464 0 : ) -> anyhow::Result<Generation> {
465 : use crate::schema::tenant_shards::dsl::*;
466 0 : let updated = self
467 0 : .with_measured_conn(DatabaseOperation::IncrementGeneration, move |conn| {
468 0 : let updated = diesel::update(tenant_shards)
469 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
470 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
471 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
472 0 : .set((
473 0 : generation.eq(generation + 1),
474 0 : generation_pageserver.eq(node_id.0 as i64),
475 0 : ))
476 0 : // TODO: only returning() the generation column
477 0 : .returning(TenantShardPersistence::as_returning())
478 0 : .get_result(conn)?;
479 :
480 0 : Ok(updated)
481 0 : })
482 0 : .await?;
483 :
484 : // Generation is always non-null in the rseult: if the generation column had been NULL, then we
485 : // should have experienced an SQL Confilict error while executing a query that tries to increment it.
486 0 : debug_assert!(updated.generation.is_some());
487 0 : let Some(g) = updated.generation else {
488 0 : return Err(DatabaseError::Logical(
489 0 : "Generation should always be set after incrementing".to_string(),
490 0 : )
491 0 : .into());
492 : };
493 :
494 0 : Ok(Generation::new(g as u32))
495 0 : }
496 :
497 : /// For use when updating a persistent property of a tenant, such as its config or placement_policy.
498 : ///
499 : /// Do not use this for settting generation, unless in the special onboarding code path (/location_config)
500 : /// API: use [`Self::increment_generation`] instead. Setting the generation via this route is a one-time thing
501 : /// that we only do the first time a tenant is set to an attached policy via /location_config.
502 0 : pub(crate) async fn update_tenant_shard(
503 0 : &self,
504 0 : tenant: TenantFilter,
505 0 : input_placement_policy: Option<PlacementPolicy>,
506 0 : input_config: Option<TenantConfig>,
507 0 : input_generation: Option<Generation>,
508 0 : input_scheduling_policy: Option<ShardSchedulingPolicy>,
509 0 : ) -> DatabaseResult<()> {
510 0 : use crate::schema::tenant_shards::dsl::*;
511 0 :
512 0 : self.with_measured_conn(DatabaseOperation::UpdateTenantShard, move |conn| {
513 0 : let query = match tenant {
514 0 : TenantFilter::Shard(tenant_shard_id) => diesel::update(tenant_shards)
515 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
516 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
517 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
518 0 : .into_boxed(),
519 0 : TenantFilter::Tenant(input_tenant_id) => diesel::update(tenant_shards)
520 0 : .filter(tenant_id.eq(input_tenant_id.to_string()))
521 0 : .into_boxed(),
522 : };
523 :
524 0 : #[derive(AsChangeset)]
525 : #[diesel(table_name = crate::schema::tenant_shards)]
526 : struct ShardUpdate {
527 : generation: Option<i32>,
528 : placement_policy: Option<String>,
529 : config: Option<String>,
530 : scheduling_policy: Option<String>,
531 : }
532 :
533 0 : let update = ShardUpdate {
534 0 : generation: input_generation.map(|g| g.into().unwrap() as i32),
535 0 : placement_policy: input_placement_policy
536 0 : .map(|p| serde_json::to_string(&p).unwrap()),
537 0 : config: input_config.map(|c| serde_json::to_string(&c).unwrap()),
538 0 : scheduling_policy: input_scheduling_policy
539 0 : .map(|p| serde_json::to_string(&p).unwrap()),
540 0 : };
541 0 :
542 0 : query.set(update).execute(conn)?;
543 :
544 0 : Ok(())
545 0 : })
546 0 : .await?;
547 :
548 0 : Ok(())
549 0 : }
550 :
551 0 : pub(crate) async fn detach(&self, tenant_shard_id: TenantShardId) -> anyhow::Result<()> {
552 0 : use crate::schema::tenant_shards::dsl::*;
553 0 : self.with_measured_conn(DatabaseOperation::Detach, move |conn| {
554 0 : let updated = diesel::update(tenant_shards)
555 0 : .filter(tenant_id.eq(tenant_shard_id.tenant_id.to_string()))
556 0 : .filter(shard_number.eq(tenant_shard_id.shard_number.0 as i32))
557 0 : .filter(shard_count.eq(tenant_shard_id.shard_count.literal() as i32))
558 0 : .set((
559 0 : generation_pageserver.eq(Option::<i64>::None),
560 0 : placement_policy.eq(serde_json::to_string(&PlacementPolicy::Detached).unwrap()),
561 0 : ))
562 0 : .execute(conn)?;
563 :
564 0 : Ok(updated)
565 0 : })
566 0 : .await?;
567 :
568 0 : Ok(())
569 0 : }
570 :
571 : // When we start shard splitting, we must durably mark the tenant so that
572 : // on restart, we know that we must go through recovery.
573 : //
574 : // We create the child shards here, so that they will be available for increment_generation calls
575 : // if some pageserver holding a child shard needs to restart before the overall tenant split is complete.
576 0 : pub(crate) async fn begin_shard_split(
577 0 : &self,
578 0 : old_shard_count: ShardCount,
579 0 : split_tenant_id: TenantId,
580 0 : parent_to_children: Vec<(TenantShardId, Vec<TenantShardPersistence>)>,
581 0 : ) -> DatabaseResult<()> {
582 0 : use crate::schema::tenant_shards::dsl::*;
583 0 : self.with_measured_conn(DatabaseOperation::BeginShardSplit, move |conn| -> DatabaseResult<()> {
584 0 : conn.transaction(|conn| -> DatabaseResult<()> {
585 : // Mark parent shards as splitting
586 :
587 0 : let updated = diesel::update(tenant_shards)
588 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
589 0 : .filter(shard_count.eq(old_shard_count.literal() as i32))
590 0 : .set((splitting.eq(1),))
591 0 : .execute(conn)?;
592 0 : if u8::try_from(updated)
593 0 : .map_err(|_| DatabaseError::Logical(
594 0 : format!("Overflow existing shard count {} while splitting", updated))
595 0 : )? != old_shard_count.count() {
596 : // Perhaps a deletion or another split raced with this attempt to split, mutating
597 : // the parent shards that we intend to split. In this case the split request should fail.
598 0 : return Err(DatabaseError::Logical(
599 0 : format!("Unexpected existing shard count {updated} when preparing tenant for split (expected {})", old_shard_count.count())
600 0 : ));
601 0 : }
602 0 :
603 0 : // FIXME: spurious clone to sidestep closure move rules
604 0 : let parent_to_children = parent_to_children.clone();
605 :
606 : // Insert child shards
607 0 : for (parent_shard_id, children) in parent_to_children {
608 0 : let mut parent = crate::schema::tenant_shards::table
609 0 : .filter(tenant_id.eq(parent_shard_id.tenant_id.to_string()))
610 0 : .filter(shard_number.eq(parent_shard_id.shard_number.0 as i32))
611 0 : .filter(shard_count.eq(parent_shard_id.shard_count.literal() as i32))
612 0 : .load::<TenantShardPersistence>(conn)?;
613 0 : let parent = if parent.len() != 1 {
614 0 : return Err(DatabaseError::Logical(format!(
615 0 : "Parent shard {parent_shard_id} not found"
616 0 : )));
617 : } else {
618 0 : parent.pop().unwrap()
619 : };
620 0 : for mut shard in children {
621 : // Carry the parent's generation into the child
622 0 : shard.generation = parent.generation;
623 0 :
624 0 : debug_assert!(shard.splitting == SplitState::Splitting);
625 0 : diesel::insert_into(tenant_shards)
626 0 : .values(shard)
627 0 : .execute(conn)?;
628 : }
629 : }
630 :
631 0 : Ok(())
632 0 : })?;
633 :
634 0 : Ok(())
635 0 : })
636 0 : .await
637 0 : }
638 :
639 : // When we finish shard splitting, we must atomically clean up the old shards
640 : // and insert the new shards, and clear the splitting marker.
641 0 : pub(crate) async fn complete_shard_split(
642 0 : &self,
643 0 : split_tenant_id: TenantId,
644 0 : old_shard_count: ShardCount,
645 0 : ) -> DatabaseResult<()> {
646 0 : use crate::schema::tenant_shards::dsl::*;
647 0 : self.with_measured_conn(
648 0 : DatabaseOperation::CompleteShardSplit,
649 0 : move |conn| -> DatabaseResult<()> {
650 0 : conn.transaction(|conn| -> QueryResult<()> {
651 0 : // Drop parent shards
652 0 : diesel::delete(tenant_shards)
653 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
654 0 : .filter(shard_count.eq(old_shard_count.literal() as i32))
655 0 : .execute(conn)?;
656 :
657 : // Clear sharding flag
658 0 : let updated = diesel::update(tenant_shards)
659 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
660 0 : .set((splitting.eq(0),))
661 0 : .execute(conn)?;
662 0 : debug_assert!(updated > 0);
663 :
664 0 : Ok(())
665 0 : })?;
666 :
667 0 : Ok(())
668 0 : },
669 0 : )
670 0 : .await
671 0 : }
672 :
673 : /// Used when the remote part of a shard split failed: we will revert the database state to have only
674 : /// the parent shards, with SplitState::Idle.
675 0 : pub(crate) async fn abort_shard_split(
676 0 : &self,
677 0 : split_tenant_id: TenantId,
678 0 : new_shard_count: ShardCount,
679 0 : ) -> DatabaseResult<AbortShardSplitStatus> {
680 0 : use crate::schema::tenant_shards::dsl::*;
681 0 : self.with_measured_conn(
682 0 : DatabaseOperation::AbortShardSplit,
683 0 : move |conn| -> DatabaseResult<AbortShardSplitStatus> {
684 0 : let aborted =
685 0 : conn.transaction(|conn| -> DatabaseResult<AbortShardSplitStatus> {
686 : // Clear the splitting state on parent shards
687 0 : let updated = diesel::update(tenant_shards)
688 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
689 0 : .filter(shard_count.ne(new_shard_count.literal() as i32))
690 0 : .set((splitting.eq(0),))
691 0 : .execute(conn)?;
692 :
693 : // Parent shards are already gone: we cannot abort.
694 0 : if updated == 0 {
695 0 : return Ok(AbortShardSplitStatus::Complete);
696 0 : }
697 0 :
698 0 : // Sanity check: if parent shards were present, their cardinality should
699 0 : // be less than the number of child shards.
700 0 : if updated >= new_shard_count.count() as usize {
701 0 : return Err(DatabaseError::Logical(format!(
702 0 : "Unexpected parent shard count {updated} while aborting split to \
703 0 : count {new_shard_count:?} on tenant {split_tenant_id}"
704 0 : )));
705 0 : }
706 0 :
707 0 : // Erase child shards
708 0 : diesel::delete(tenant_shards)
709 0 : .filter(tenant_id.eq(split_tenant_id.to_string()))
710 0 : .filter(shard_count.eq(new_shard_count.literal() as i32))
711 0 : .execute(conn)?;
712 :
713 0 : Ok(AbortShardSplitStatus::Aborted)
714 0 : })?;
715 :
716 0 : Ok(aborted)
717 0 : },
718 0 : )
719 0 : .await
720 0 : }
721 : }
722 :
723 : /// Parts of [`crate::tenant_shard::TenantShard`] that are stored durably
724 0 : #[derive(Queryable, Selectable, Insertable, Serialize, Deserialize, Clone, Eq, PartialEq)]
725 : #[diesel(table_name = crate::schema::tenant_shards)]
726 : pub(crate) struct TenantShardPersistence {
727 : #[serde(default)]
728 : pub(crate) tenant_id: String,
729 : #[serde(default)]
730 : pub(crate) shard_number: i32,
731 : #[serde(default)]
732 : pub(crate) shard_count: i32,
733 : #[serde(default)]
734 : pub(crate) shard_stripe_size: i32,
735 :
736 : // Latest generation number: next time we attach, increment this
737 : // and use the incremented number when attaching.
738 : //
739 : // Generation is only None when first onboarding a tenant, where it may
740 : // be in PlacementPolicy::Secondary and therefore have no valid generation state.
741 : pub(crate) generation: Option<i32>,
742 :
743 : // Currently attached pageserver
744 : #[serde(rename = "pageserver")]
745 : pub(crate) generation_pageserver: Option<i64>,
746 :
747 : #[serde(default)]
748 : pub(crate) placement_policy: String,
749 : #[serde(default)]
750 : pub(crate) splitting: SplitState,
751 : #[serde(default)]
752 : pub(crate) config: String,
753 : #[serde(default)]
754 : pub(crate) scheduling_policy: String,
755 : }
756 :
757 : impl TenantShardPersistence {
758 0 : pub(crate) fn get_shard_identity(&self) -> Result<ShardIdentity, ShardConfigError> {
759 0 : if self.shard_count == 0 {
760 0 : Ok(ShardIdentity::unsharded())
761 : } else {
762 0 : Ok(ShardIdentity::new(
763 0 : ShardNumber(self.shard_number as u8),
764 0 : ShardCount::new(self.shard_count as u8),
765 0 : ShardStripeSize(self.shard_stripe_size as u32),
766 0 : )?)
767 : }
768 0 : }
769 :
770 0 : pub(crate) fn get_tenant_shard_id(&self) -> Result<TenantShardId, hex::FromHexError> {
771 0 : Ok(TenantShardId {
772 0 : tenant_id: TenantId::from_str(self.tenant_id.as_str())?,
773 0 : shard_number: ShardNumber(self.shard_number as u8),
774 0 : shard_count: ShardCount::new(self.shard_count as u8),
775 : })
776 0 : }
777 : }
778 :
779 : /// Parts of [`crate::node::Node`] that are stored durably
780 0 : #[derive(Serialize, Deserialize, Queryable, Selectable, Insertable, Eq, PartialEq)]
781 : #[diesel(table_name = crate::schema::nodes)]
782 : pub(crate) struct NodePersistence {
783 : pub(crate) node_id: i64,
784 : pub(crate) scheduling_policy: String,
785 : pub(crate) listen_http_addr: String,
786 : pub(crate) listen_http_port: i32,
787 : pub(crate) listen_pg_addr: String,
788 : pub(crate) listen_pg_port: i32,
789 : }
|