Line data Source code
1 : use std::{
2 : borrow::Cow,
3 : cmp::Ordering,
4 : collections::{BTreeMap, HashMap, HashSet},
5 : str::FromStr,
6 : sync::Arc,
7 : time::{Duration, Instant},
8 : };
9 :
10 : use crate::{
11 : id_lock_map::IdLockMap,
12 : persistence::{AbortShardSplitStatus, TenantFilter},
13 : reconciler::ReconcileError,
14 : scheduler::{ScheduleContext, ScheduleMode},
15 : };
16 : use anyhow::Context;
17 : use control_plane::storage_controller::{
18 : AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
19 : };
20 : use diesel::result::DatabaseErrorKind;
21 : use futures::{stream::FuturesUnordered, StreamExt};
22 : use hyper::StatusCode;
23 : use itertools::Itertools;
24 : use pageserver_api::{
25 : controller_api::{
26 : NodeAvailability, NodeRegisterRequest, NodeSchedulingPolicy, PlacementPolicy,
27 : ShardSchedulingPolicy, TenantCreateResponse, TenantCreateResponseShard,
28 : TenantDescribeResponse, TenantDescribeResponseShard, TenantLocateResponse,
29 : TenantPolicyRequest, TenantShardMigrateRequest, TenantShardMigrateResponse,
30 : UtilizationScore,
31 : },
32 : models::{SecondaryProgress, TenantConfigRequest},
33 : };
34 :
35 : use crate::pageserver_client::PageserverClient;
36 : use pageserver_api::{
37 : models::{
38 : self, LocationConfig, LocationConfigListResponse, LocationConfigMode,
39 : PageserverUtilization, ShardParameters, TenantConfig, TenantCreateRequest,
40 : TenantLocationConfigRequest, TenantLocationConfigResponse, TenantShardLocation,
41 : TenantShardSplitRequest, TenantShardSplitResponse, TenantTimeTravelRequest,
42 : TimelineCreateRequest, TimelineInfo,
43 : },
44 : shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId},
45 : upcall_api::{
46 : ReAttachRequest, ReAttachResponse, ReAttachResponseTenant, ValidateRequest,
47 : ValidateResponse, ValidateResponseTenant,
48 : },
49 : };
50 : use pageserver_client::mgmt_api;
51 : use tokio::sync::OwnedRwLockWriteGuard;
52 : use tokio_util::sync::CancellationToken;
53 : use tracing::instrument;
54 : use utils::{
55 : completion::Barrier,
56 : generation::Generation,
57 : http::error::ApiError,
58 : id::{NodeId, TenantId, TimelineId},
59 : sync::gate::Gate,
60 : };
61 :
62 : use crate::{
63 : compute_hook::{self, ComputeHook},
64 : heartbeater::{Heartbeater, PageserverState},
65 : node::{AvailabilityTransition, Node},
66 : persistence::{split_state::SplitState, DatabaseError, Persistence, TenantShardPersistence},
67 : reconciler::attached_location_conf,
68 : scheduler::Scheduler,
69 : tenant_shard::{
70 : IntentState, ObservedState, ObservedStateLocation, ReconcileResult, ReconcileWaitError,
71 : ReconcilerWaiter, TenantShard,
72 : },
73 : };
74 :
75 : // For operations that should be quick, like attaching a new tenant
76 : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
77 :
78 : // For operations that might be slow, like migrating a tenant with
79 : // some data in it.
80 : const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
81 :
82 : // If we receive a call using Secondary mode initially, it will omit generation. We will initialize
83 : // tenant shards into this generation, and as long as it remains in this generation, we will accept
84 : // input generation from future requests as authoritative.
85 : const INITIAL_GENERATION: Generation = Generation::new(0);
86 :
87 : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
88 : /// up on unresponsive pageservers and proceed.
89 : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
90 :
91 : pub const MAX_UNAVAILABLE_INTERVAL_DEFAULT: Duration = Duration::from_secs(30);
92 :
93 : // Top level state available to all HTTP handlers
94 : struct ServiceState {
95 : tenants: BTreeMap<TenantShardId, TenantShard>,
96 :
97 : nodes: Arc<HashMap<NodeId, Node>>,
98 :
99 : scheduler: Scheduler,
100 : }
101 :
102 : impl ServiceState {
103 0 : fn new(
104 0 : nodes: HashMap<NodeId, Node>,
105 0 : tenants: BTreeMap<TenantShardId, TenantShard>,
106 0 : scheduler: Scheduler,
107 0 : ) -> Self {
108 0 : Self {
109 0 : tenants,
110 0 : nodes: Arc::new(nodes),
111 0 : scheduler,
112 0 : }
113 0 : }
114 :
115 0 : fn parts_mut(
116 0 : &mut self,
117 0 : ) -> (
118 0 : &mut Arc<HashMap<NodeId, Node>>,
119 0 : &mut BTreeMap<TenantShardId, TenantShard>,
120 0 : &mut Scheduler,
121 0 : ) {
122 0 : (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
123 0 : }
124 : }
125 :
126 : #[derive(Clone)]
127 : pub struct Config {
128 : // All pageservers managed by one instance of this service must have
129 : // the same public key. This JWT token will be used to authenticate
130 : // this service to the pageservers it manages.
131 : pub jwt_token: Option<String>,
132 :
133 : // This JWT token will be used to authenticate this service to the control plane.
134 : pub control_plane_jwt_token: Option<String>,
135 :
136 : /// Where the compute hook should send notifications of pageserver attachment locations
137 : /// (this URL points to the control plane in prod). If this is None, the compute hook will
138 : /// assume it is running in a test environment and try to update neon_local.
139 : pub compute_hook_url: Option<String>,
140 :
141 : /// Grace period within which a pageserver does not respond to heartbeats, but is still
142 : /// considered active. Once the grace period elapses, the next heartbeat failure will
143 : /// mark the pagseserver offline.
144 : pub max_unavailable_interval: Duration,
145 : }
146 :
147 : impl From<DatabaseError> for ApiError {
148 0 : fn from(err: DatabaseError) -> ApiError {
149 0 : match err {
150 0 : DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
151 : // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
152 : DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
153 0 : ApiError::ShuttingDown
154 : }
155 0 : DatabaseError::Logical(reason) => {
156 0 : ApiError::InternalServerError(anyhow::anyhow!(reason))
157 : }
158 : }
159 0 : }
160 : }
161 :
162 : pub struct Service {
163 : inner: Arc<std::sync::RwLock<ServiceState>>,
164 : config: Config,
165 : persistence: Arc<Persistence>,
166 : compute_hook: Arc<ComputeHook>,
167 : result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResult>,
168 :
169 : heartbeater: Heartbeater,
170 :
171 : // Channel for background cleanup from failed operations that require cleanup, such as shard split
172 : abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
173 :
174 : // Locking on a tenant granularity (covers all shards in the tenant):
175 : // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
176 : // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
177 : tenant_op_locks: IdLockMap<TenantId>,
178 :
179 : // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
180 : // that transition it to/from Active.
181 : node_op_locks: IdLockMap<NodeId>,
182 :
183 : // Process shutdown will fire this token
184 : cancel: CancellationToken,
185 :
186 : // Background tasks will hold this gate
187 : gate: Gate,
188 :
189 : /// This waits for initial reconciliation with pageservers to complete. Until this barrier
190 : /// passes, it isn't safe to do any actions that mutate tenants.
191 : pub(crate) startup_complete: Barrier,
192 : }
193 :
194 : impl From<ReconcileWaitError> for ApiError {
195 0 : fn from(value: ReconcileWaitError) -> Self {
196 0 : match value {
197 0 : ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
198 0 : e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
199 0 : e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
200 : }
201 0 : }
202 : }
203 :
204 : #[allow(clippy::large_enum_variant)]
205 : enum TenantCreateOrUpdate {
206 : Create(TenantCreateRequest),
207 : Update(Vec<ShardUpdate>),
208 : }
209 :
210 : struct ShardSplitParams {
211 : old_shard_count: ShardCount,
212 : new_shard_count: ShardCount,
213 : new_stripe_size: Option<ShardStripeSize>,
214 : targets: Vec<ShardSplitTarget>,
215 : policy: PlacementPolicy,
216 : config: TenantConfig,
217 : shard_ident: ShardIdentity,
218 : }
219 :
220 : // When preparing for a shard split, we may either choose to proceed with the split,
221 : // or find that the work is already done and return NoOp.
222 : enum ShardSplitAction {
223 : Split(ShardSplitParams),
224 : NoOp(TenantShardSplitResponse),
225 : }
226 :
227 : // A parent shard which will be split
228 : struct ShardSplitTarget {
229 : parent_id: TenantShardId,
230 : node: Node,
231 : child_ids: Vec<TenantShardId>,
232 : }
233 :
234 : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
235 : /// might not be available. We therefore use a queue of abort operations processed in the background.
236 : struct TenantShardSplitAbort {
237 : tenant_id: TenantId,
238 : /// The target values from the request that failed
239 : new_shard_count: ShardCount,
240 : new_stripe_size: Option<ShardStripeSize>,
241 : /// Until this abort op is complete, no other operations may be done on the tenant
242 : _tenant_lock: tokio::sync::OwnedRwLockWriteGuard<()>,
243 : }
244 :
245 0 : #[derive(thiserror::Error, Debug)]
246 : enum TenantShardSplitAbortError {
247 : #[error(transparent)]
248 : Database(#[from] DatabaseError),
249 : #[error(transparent)]
250 : Remote(#[from] mgmt_api::Error),
251 : #[error("Unavailable")]
252 : Unavailable,
253 : }
254 :
255 : struct ShardUpdate {
256 : tenant_shard_id: TenantShardId,
257 : placement_policy: PlacementPolicy,
258 : tenant_config: TenantConfig,
259 :
260 : /// If this is None, generation is not updated.
261 : generation: Option<Generation>,
262 : }
263 :
264 : impl Service {
265 0 : pub fn get_config(&self) -> &Config {
266 0 : &self.config
267 0 : }
268 :
269 : /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
270 : /// view of the world, and determine which pageservers are responsive.
271 0 : #[instrument(skip_all)]
272 : async fn startup_reconcile(self: &Arc<Service>) {
273 : // For all tenant shards, a vector of observed states on nodes (where None means
274 : // indeterminate, same as in [`ObservedStateLocation`])
275 : let mut observed: HashMap<TenantShardId, Vec<(NodeId, Option<LocationConfig>)>> =
276 : HashMap::new();
277 :
278 : // Startup reconciliation does I/O to other services: whether they
279 : // are responsive or not, we should aim to finish within our deadline, because:
280 : // - If we don't, a k8s readiness hook watching /ready will kill us.
281 : // - While we're waiting for startup reconciliation, we are not fully
282 : // available for end user operations like creating/deleting tenants and timelines.
283 : //
284 : // We set multiple deadlines to break up the time available between the phases of work: this is
285 : // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
286 : let start_at = Instant::now();
287 : let node_scan_deadline = start_at
288 : .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
289 : .expect("Reconcile timeout is a modest constant");
290 :
291 : let compute_notify_deadline = start_at
292 : .checked_add((STARTUP_RECONCILE_TIMEOUT / 4) * 3)
293 : .expect("Reconcile timeout is a modest constant");
294 :
295 : // Accumulate a list of any tenant locations that ought to be detached
296 : let mut cleanup = Vec::new();
297 :
298 : let node_listings = self.scan_node_locations(node_scan_deadline).await;
299 : // Send initial heartbeat requests to nodes that replied to the location listing above.
300 : let nodes_online = self.initial_heartbeat_round(node_listings.keys()).await;
301 :
302 : for (node_id, list_response) in node_listings {
303 : let tenant_shards = list_response.tenant_shards;
304 0 : tracing::info!(
305 0 : "Received {} shard statuses from pageserver {}, setting it to Active",
306 0 : tenant_shards.len(),
307 0 : node_id
308 0 : );
309 :
310 : for (tenant_shard_id, conf_opt) in tenant_shards {
311 : let shard_observations = observed.entry(tenant_shard_id).or_default();
312 : shard_observations.push((node_id, conf_opt));
313 : }
314 : }
315 :
316 : // List of tenants for which we will attempt to notify compute of their location at startup
317 : let mut compute_notifications = Vec::new();
318 :
319 : // Populate intent and observed states for all tenants, based on reported state on pageservers
320 : let shard_count = {
321 : let mut locked = self.inner.write().unwrap();
322 : let (nodes, tenants, scheduler) = locked.parts_mut();
323 :
324 : // Mark nodes online if they responded to us: nodes are offline by default after a restart.
325 : let mut new_nodes = (**nodes).clone();
326 : for (node_id, node) in new_nodes.iter_mut() {
327 : if let Some(utilization) = nodes_online.get(node_id) {
328 : node.set_availability(NodeAvailability::Active(UtilizationScore(
329 : utilization.utilization_score,
330 : )));
331 : scheduler.node_upsert(node);
332 : }
333 : }
334 : *nodes = Arc::new(new_nodes);
335 :
336 : for (tenant_shard_id, shard_observations) in observed {
337 : for (node_id, observed_loc) in shard_observations {
338 : let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
339 : cleanup.push((tenant_shard_id, node_id));
340 : continue;
341 : };
342 : tenant_shard
343 : .observed
344 : .locations
345 : .insert(node_id, ObservedStateLocation { conf: observed_loc });
346 : }
347 : }
348 :
349 : // Populate each tenant's intent state
350 : let mut schedule_context = ScheduleContext::default();
351 : for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
352 : if tenant_shard_id.shard_number == ShardNumber(0) {
353 : // Reset scheduling context each time we advance to the next Tenant
354 : schedule_context = ScheduleContext::default();
355 : }
356 :
357 : tenant_shard.intent_from_observed(scheduler);
358 : if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
359 : // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
360 : // not enough pageservers are available. The tenant may well still be available
361 : // to clients.
362 0 : tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
363 : } else {
364 : // If we're both intending and observed to be attached at a particular node, we will
365 : // emit a compute notification for this. In the case where our observed state does not
366 : // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
367 : if let Some(attached_at) = tenant_shard.stably_attached() {
368 : compute_notifications.push((
369 : *tenant_shard_id,
370 : attached_at,
371 : tenant_shard.shard.stripe_size,
372 : ));
373 : }
374 : }
375 : }
376 :
377 : tenants.len()
378 : };
379 :
380 : // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
381 : // generation_pageserver in the database.
382 :
383 : // Emit compute hook notifications for all tenants which are already stably attached. Other tenants
384 : // will emit compute hook notifications when they reconcile.
385 : //
386 : // Ordering: we must complete these notification attempts before doing any other reconciliation for the
387 : // tenants named here, because otherwise our calls to notify() might race with more recent values
388 : // generated by reconciliation.
389 : let notify_failures = self
390 : .compute_notify_many(compute_notifications, compute_notify_deadline)
391 : .await;
392 :
393 : // Compute notify is fallible. If it fails here, do not delay overall startup: set the
394 : // flag on these shards that they have a pending notification.
395 : // Update tenant state for any that failed to do their initial compute notify, so that they'll retry later.
396 : {
397 : let mut locked = self.inner.write().unwrap();
398 : for tenant_shard_id in notify_failures.into_iter() {
399 : if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
400 : shard.pending_compute_notification = true;
401 : }
402 : }
403 : }
404 :
405 : // Finally, now that the service is up and running, launch reconcile operations for any tenants
406 : // which require it: under normal circumstances this should only include tenants that were in some
407 : // transient state before we restarted, or any tenants whose compute hooks failed above.
408 : let reconcile_tasks = self.reconcile_all();
409 : // We will not wait for these reconciliation tasks to run here: we're now done with startup and
410 : // normal operations may proceed.
411 :
412 : // Clean up any tenants that were found on pageservers but are not known to us. Do this in the
413 : // background because it does not need to complete in order to proceed with other work.
414 : if !cleanup.is_empty() {
415 0 : tracing::info!("Cleaning up {} locations in the background", cleanup.len());
416 : tokio::task::spawn({
417 : let cleanup_self = self.clone();
418 0 : async move { cleanup_self.cleanup_locations(cleanup).await }
419 : });
420 : }
421 :
422 0 : tracing::info!("Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)");
423 : }
424 :
425 0 : async fn initial_heartbeat_round<'a>(
426 0 : &self,
427 0 : node_ids: impl Iterator<Item = &'a NodeId>,
428 0 : ) -> HashMap<NodeId, PageserverUtilization> {
429 0 : assert!(!self.startup_complete.is_ready());
430 :
431 0 : let all_nodes = {
432 0 : let locked = self.inner.read().unwrap();
433 0 : locked.nodes.clone()
434 0 : };
435 0 :
436 0 : let mut nodes_to_heartbeat = HashMap::new();
437 0 : for node_id in node_ids {
438 0 : match all_nodes.get(node_id) {
439 0 : Some(node) => {
440 0 : nodes_to_heartbeat.insert(*node_id, node.clone());
441 0 : }
442 : None => {
443 0 : tracing::warn!("Node {node_id} was removed during start-up");
444 : }
445 : }
446 : }
447 :
448 0 : let res = self
449 0 : .heartbeater
450 0 : .heartbeat(Arc::new(nodes_to_heartbeat))
451 0 : .await;
452 :
453 0 : let mut online_nodes = HashMap::new();
454 0 : if let Ok(deltas) = res {
455 0 : for (node_id, status) in deltas.0 {
456 0 : match status {
457 0 : PageserverState::Available { utilization, .. } => {
458 0 : online_nodes.insert(node_id, utilization);
459 0 : }
460 0 : PageserverState::Offline => {}
461 : }
462 : }
463 0 : }
464 :
465 0 : online_nodes
466 0 : }
467 :
468 : /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
469 : ///
470 : /// The result includes only nodes which responded within the deadline
471 0 : async fn scan_node_locations(
472 0 : &self,
473 0 : deadline: Instant,
474 0 : ) -> HashMap<NodeId, LocationConfigListResponse> {
475 0 : let nodes = {
476 0 : let locked = self.inner.read().unwrap();
477 0 : locked.nodes.clone()
478 0 : };
479 0 :
480 0 : let mut node_results = HashMap::new();
481 0 :
482 0 : let mut node_list_futs = FuturesUnordered::new();
483 :
484 0 : for node in nodes.values() {
485 0 : node_list_futs.push({
486 0 : async move {
487 0 : tracing::info!("Scanning shards on node {node}...");
488 0 : let timeout = Duration::from_secs(1);
489 0 : let response = node
490 0 : .with_client_retries(
491 0 : |client| async move { client.list_location_config().await },
492 0 : &self.config.jwt_token,
493 0 : 1,
494 0 : 5,
495 0 : timeout,
496 0 : &self.cancel,
497 0 : )
498 0 : .await;
499 0 : (node.get_id(), response)
500 0 : }
501 0 : });
502 0 : }
503 :
504 : loop {
505 0 : let (node_id, result) = tokio::select! {
506 0 : next = node_list_futs.next() => {
507 : match next {
508 : Some(result) => result,
509 : None =>{
510 : // We got results for all our nodes
511 : break;
512 : }
513 :
514 : }
515 : },
516 : _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
517 : // Give up waiting for anyone who hasn't responded: we will yield the results that we have
518 0 : tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
519 : break;
520 : }
521 : };
522 :
523 0 : let Some(list_response) = result else {
524 0 : tracing::info!("Shutdown during startup_reconcile");
525 0 : break;
526 : };
527 :
528 0 : match list_response {
529 0 : Err(e) => {
530 0 : tracing::warn!("Could not scan node {} ({e})", node_id);
531 : }
532 0 : Ok(listing) => {
533 0 : node_results.insert(node_id, listing);
534 0 : }
535 : }
536 : }
537 :
538 0 : node_results
539 0 : }
540 :
541 : /// Used during [`Self::startup_reconcile`]: detach a list of unknown-to-us tenants from pageservers.
542 : ///
543 : /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
544 : /// tenants, then it is probably something incompletely deleted before: we will not fight with any
545 : /// other task trying to attach it.
546 0 : #[instrument(skip_all)]
547 : async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
548 : let nodes = self.inner.read().unwrap().nodes.clone();
549 :
550 : for (tenant_shard_id, node_id) in cleanup {
551 : // A node reported a tenant_shard_id which is unknown to us: detach it.
552 : let Some(node) = nodes.get(&node_id) else {
553 : // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
554 : // a location to clean up on a node that has since been removed.
555 0 : tracing::info!(
556 0 : "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
557 0 : );
558 : continue;
559 : };
560 :
561 : if self.cancel.is_cancelled() {
562 : break;
563 : }
564 :
565 : let client = PageserverClient::new(
566 : node.get_id(),
567 : node.base_url(),
568 : self.config.jwt_token.as_deref(),
569 : );
570 : match client
571 : .location_config(
572 : tenant_shard_id,
573 : LocationConfig {
574 : mode: LocationConfigMode::Detached,
575 : generation: None,
576 : secondary_conf: None,
577 : shard_number: tenant_shard_id.shard_number.0,
578 : shard_count: tenant_shard_id.shard_count.literal(),
579 : shard_stripe_size: 0,
580 : tenant_conf: models::TenantConfig::default(),
581 : },
582 : None,
583 : false,
584 : )
585 : .await
586 : {
587 : Ok(()) => {
588 0 : tracing::info!(
589 0 : "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
590 0 : );
591 : }
592 : Err(e) => {
593 : // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
594 : // break anything.
595 0 : tracing::error!(
596 0 : "Failed to detach unknkown shard {tenant_shard_id} on pageserver {node_id}: {e}"
597 0 : );
598 : }
599 : }
600 : }
601 : }
602 :
603 : /// Used during [`Self::startup_reconcile`]: issue many concurrent compute notifications.
604 : ///
605 : /// Returns a set of any shards for which notifications where not acked within the deadline.
606 0 : async fn compute_notify_many(
607 0 : &self,
608 0 : notifications: Vec<(TenantShardId, NodeId, ShardStripeSize)>,
609 0 : deadline: Instant,
610 0 : ) -> HashSet<TenantShardId> {
611 0 : let attempt_shards = notifications.iter().map(|i| i.0).collect::<HashSet<_>>();
612 0 : let mut success_shards = HashSet::new();
613 0 :
614 0 : // Construct an async stream of futures to invoke the compute notify function: we do this
615 0 : // in order to subsequently use .buffered() on the stream to execute with bounded parallelism.
616 0 : let mut stream = futures::stream::iter(notifications.into_iter())
617 0 : .map(|(tenant_shard_id, node_id, stripe_size)| {
618 0 : let compute_hook = self.compute_hook.clone();
619 0 : let cancel = self.cancel.clone();
620 0 : async move {
621 0 : if let Err(e) = compute_hook
622 0 : .notify(tenant_shard_id, node_id, stripe_size, &cancel)
623 0 : .await
624 : {
625 0 : tracing::error!(
626 0 : %tenant_shard_id,
627 0 : %node_id,
628 0 : "Failed to notify compute on startup for shard: {e}"
629 0 : );
630 0 : None
631 : } else {
632 0 : Some(tenant_shard_id)
633 : }
634 0 : }
635 0 : })
636 0 : .buffered(compute_hook::API_CONCURRENCY);
637 :
638 0 : loop {
639 0 : tokio::select! {
640 0 : next = stream.next() => {
641 : match next {
642 : Some(Some(success_shard)) => {
643 : // A notification succeeded
644 : success_shards.insert(success_shard);
645 : },
646 : Some(None) => {
647 : // A notification that failed
648 : },
649 : None => {
650 0 : tracing::info!("Successfully sent all compute notifications");
651 : break;
652 : }
653 : }
654 : },
655 : _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
656 : // Give up sending any that didn't succeed yet
657 0 : tracing::info!("Reached deadline while sending compute notifications");
658 : break;
659 : }
660 0 : };
661 0 : }
662 :
663 0 : attempt_shards
664 0 : .difference(&success_shards)
665 0 : .cloned()
666 0 : .collect()
667 0 : }
668 :
669 : /// Long running background task that periodically wakes up and looks for shards that need
670 : /// reconciliation. Reconciliation is fallible, so any reconciliation tasks that fail during
671 : /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
672 : /// for those retries.
673 0 : #[instrument(skip_all)]
674 : async fn background_reconcile(&self) {
675 : self.startup_complete.clone().wait().await;
676 :
677 : const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
678 :
679 : let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
680 : while !self.cancel.is_cancelled() {
681 0 : tokio::select! {
682 0 : _ = interval.tick() => {
683 0 : let reconciles_spawned = self.reconcile_all();
684 0 : if reconciles_spawned == 0 {
685 0 : // Run optimizer only when we didn't find any other work to do
686 0 : self.optimize_all();
687 0 : }
688 0 : }
689 0 : _ = self.cancel.cancelled() => return
690 0 : }
691 : }
692 : }
693 0 : #[instrument(skip_all)]
694 : async fn spawn_heartbeat_driver(&self) {
695 : self.startup_complete.clone().wait().await;
696 :
697 : const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5);
698 :
699 : let mut interval = tokio::time::interval(HEARTBEAT_INTERVAL);
700 : while !self.cancel.is_cancelled() {
701 0 : tokio::select! {
702 0 : _ = interval.tick() => { }
703 0 : _ = self.cancel.cancelled() => return
704 0 : };
705 :
706 : let nodes = {
707 : let locked = self.inner.read().unwrap();
708 : locked.nodes.clone()
709 : };
710 :
711 : let res = self.heartbeater.heartbeat(nodes).await;
712 : if let Ok(deltas) = res {
713 : for (node_id, state) in deltas.0 {
714 : let new_availability = match state {
715 : PageserverState::Available { utilization, .. } => NodeAvailability::Active(
716 : UtilizationScore(utilization.utilization_score),
717 : ),
718 : PageserverState::Offline => NodeAvailability::Offline,
719 : };
720 : let res = self
721 : .node_configure(node_id, Some(new_availability), None)
722 : .await;
723 :
724 : match res {
725 : Ok(()) => {}
726 : Err(ApiError::NotFound(_)) => {
727 : // This should be rare, but legitimate since the heartbeats are done
728 : // on a snapshot of the nodes.
729 0 : tracing::info!("Node {} was not found after heartbeat round", node_id);
730 : }
731 : Err(err) => {
732 0 : tracing::error!(
733 0 : "Failed to update node {} after heartbeat round: {}",
734 0 : node_id,
735 0 : err
736 0 : );
737 : }
738 : }
739 : }
740 : }
741 : }
742 : }
743 :
744 : /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
745 : /// was successful, this will update the observed state of the tenant such that subsequent
746 : /// calls to [`TenantShard::maybe_reconcile`] will do nothing.
747 0 : #[instrument(skip_all, fields(
748 0 : tenant_id=%result.tenant_shard_id.tenant_id, shard_id=%result.tenant_shard_id.shard_slug(),
749 0 : sequence=%result.sequence
750 0 : ))]
751 : fn process_result(&self, result: ReconcileResult) {
752 : let mut locked = self.inner.write().unwrap();
753 : let Some(tenant) = locked.tenants.get_mut(&result.tenant_shard_id) else {
754 : // A reconciliation result might race with removing a tenant: drop results for
755 : // tenants that aren't in our map.
756 : return;
757 : };
758 :
759 : // Usually generation should only be updated via this path, so the max() isn't
760 : // needed, but it is used to handle out-of-band updates via. e.g. test hook.
761 : tenant.generation = std::cmp::max(tenant.generation, result.generation);
762 :
763 : // If the reconciler signals that it failed to notify compute, set this state on
764 : // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
765 : tenant.pending_compute_notification = result.pending_compute_notification;
766 :
767 : // Let the TenantShard know it is idle.
768 : tenant.reconcile_complete(result.sequence);
769 :
770 : match result.result {
771 : Ok(()) => {
772 : for (node_id, loc) in &result.observed.locations {
773 : if let Some(conf) = &loc.conf {
774 0 : tracing::info!("Updating observed location {}: {:?}", node_id, conf);
775 : } else {
776 0 : tracing::info!("Setting observed location {} to None", node_id,)
777 : }
778 : }
779 : tenant.observed = result.observed;
780 : tenant.waiter.advance(result.sequence);
781 : }
782 : Err(e) => {
783 : match e {
784 : ReconcileError::Cancel => {
785 0 : tracing::info!("Reconciler was cancelled");
786 : }
787 : ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
788 : // This might be due to the reconciler getting cancelled, or it might
789 : // be due to the `Node` being marked offline.
790 0 : tracing::info!("Reconciler cancelled during pageserver API call");
791 : }
792 : _ => {
793 0 : tracing::warn!("Reconcile error: {}", e);
794 : }
795 : }
796 :
797 : // Ordering: populate last_error before advancing error_seq,
798 : // so that waiters will see the correct error after waiting.
799 : *(tenant.last_error.lock().unwrap()) = format!("{e}");
800 : tenant.error_waiter.advance(result.sequence);
801 :
802 : for (node_id, o) in result.observed.locations {
803 : tenant.observed.locations.insert(node_id, o);
804 : }
805 : }
806 : }
807 : }
808 :
809 0 : async fn process_results(
810 0 : &self,
811 0 : mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResult>,
812 0 : ) {
813 0 : loop {
814 0 : // Wait for the next result, or for cancellation
815 0 : let result = tokio::select! {
816 0 : r = result_rx.recv() => {
817 : match r {
818 : Some(result) => {result},
819 : None => {break;}
820 : }
821 : }
822 : _ = self.cancel.cancelled() => {
823 : break;
824 : }
825 0 : };
826 0 :
827 0 : self.process_result(result);
828 0 : }
829 0 : }
830 :
831 0 : async fn process_aborts(
832 0 : &self,
833 0 : mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
834 0 : ) {
835 : loop {
836 : // Wait for the next result, or for cancellation
837 0 : let op = tokio::select! {
838 0 : r = abort_rx.recv() => {
839 : match r {
840 : Some(op) => {op},
841 : None => {break;}
842 : }
843 : }
844 : _ = self.cancel.cancelled() => {
845 : break;
846 : }
847 : };
848 :
849 : // Retry until shutdown: we must keep this request object alive until it is properly
850 : // processed, as it holds a lock guard that prevents other operations trying to do things
851 : // to the tenant while it is in a weird part-split state.
852 0 : while !self.cancel.is_cancelled() {
853 0 : match self.abort_tenant_shard_split(&op).await {
854 0 : Ok(_) => break,
855 0 : Err(e) => {
856 0 : tracing::warn!(
857 0 : "Failed to abort shard split on {}, will retry: {e}",
858 0 : op.tenant_id
859 0 : );
860 :
861 : // If a node is unavailable, we hope that it has been properly marked Offline
862 : // when we retry, so that the abort op will succeed. If the abort op is failing
863 : // for some other reason, we will keep retrying forever, or until a human notices
864 : // and does something about it (either fixing a pageserver or restarting the controller).
865 0 : tokio::time::timeout(Duration::from_secs(5), self.cancel.cancelled())
866 0 : .await
867 0 : .ok();
868 : }
869 : }
870 : }
871 : }
872 0 : }
873 :
874 0 : pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
875 0 : let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
876 0 : let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
877 0 :
878 0 : tracing::info!("Loading nodes from database...");
879 0 : let nodes = persistence
880 0 : .list_nodes()
881 0 : .await?
882 0 : .into_iter()
883 0 : .map(Node::from_persistent)
884 0 : .collect::<Vec<_>>();
885 0 : let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
886 0 : tracing::info!("Loaded {} nodes from database.", nodes.len());
887 :
888 0 : tracing::info!("Loading shards from database...");
889 0 : let mut tenant_shard_persistence = persistence.list_tenant_shards().await?;
890 0 : tracing::info!(
891 0 : "Loaded {} shards from database.",
892 0 : tenant_shard_persistence.len()
893 0 : );
894 :
895 : // If any shard splits were in progress, reset the database state to abort them
896 0 : let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
897 0 : HashMap::new();
898 0 : for tsp in &mut tenant_shard_persistence {
899 0 : let shard = tsp.get_shard_identity()?;
900 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
901 0 : let entry = tenant_shard_count_min_max
902 0 : .entry(tenant_shard_id.tenant_id)
903 0 : .or_insert_with(|| (shard.count, shard.count));
904 0 : entry.0 = std::cmp::min(entry.0, shard.count);
905 0 : entry.1 = std::cmp::max(entry.1, shard.count);
906 : }
907 :
908 0 : for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
909 0 : if count_min != count_max {
910 : // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
911 : // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
912 : // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
913 0 : tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
914 0 : let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
915 :
916 : // We may never see the Complete status here: if the split was complete, we wouldn't have
917 : // identified this tenant has having mismatching min/max counts.
918 0 : assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
919 :
920 : // Clear the splitting status in-memory, to reflect that we just aborted in the database
921 0 : tenant_shard_persistence.iter_mut().for_each(|tsp| {
922 0 : // Set idle split state on those shards that we will retain.
923 0 : let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
924 0 : if tsp_tenant_id == tenant_id
925 0 : && tsp.get_shard_identity().unwrap().count == count_min
926 0 : {
927 0 : tsp.splitting = SplitState::Idle;
928 0 : } else if tsp_tenant_id == tenant_id {
929 : // Leave the splitting state on the child shards: this will be used next to
930 : // drop them.
931 0 : tracing::info!(
932 0 : "Shard {tsp_tenant_id} will be dropped after shard split abort",
933 0 : );
934 0 : }
935 0 : });
936 0 :
937 0 : // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
938 0 : tenant_shard_persistence.retain(|tsp| {
939 0 : TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
940 0 : || tsp.splitting == SplitState::Idle
941 0 : });
942 0 : }
943 : }
944 :
945 0 : let mut tenants = BTreeMap::new();
946 0 :
947 0 : let mut scheduler = Scheduler::new(nodes.values());
948 0 :
949 0 : #[cfg(feature = "testing")]
950 0 : {
951 0 : // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
952 0 : // tests only store the shards, not the nodes. The nodes will be loaded shortly
953 0 : // after when pageservers start up and register.
954 0 : let mut node_ids = HashSet::new();
955 0 : for tsp in &tenant_shard_persistence {
956 0 : if let Some(node_id) = tsp.generation_pageserver {
957 0 : node_ids.insert(node_id);
958 0 : }
959 : }
960 0 : for node_id in node_ids {
961 0 : tracing::info!("Creating node {} in scheduler for tests", node_id);
962 0 : let node = Node::new(
963 0 : NodeId(node_id as u64),
964 0 : "".to_string(),
965 0 : 123,
966 0 : "".to_string(),
967 0 : 123,
968 0 : );
969 0 :
970 0 : scheduler.node_upsert(&node);
971 : }
972 : }
973 0 : for tsp in tenant_shard_persistence {
974 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
975 :
976 : // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
977 : // it with what we can infer: the node for which a generation was most recently issued.
978 0 : let mut intent = IntentState::new();
979 0 : if let Some(generation_pageserver) = tsp.generation_pageserver {
980 0 : intent.set_attached(&mut scheduler, Some(NodeId(generation_pageserver as u64)));
981 0 : }
982 0 : let new_tenant = TenantShard::from_persistent(tsp, intent)?;
983 :
984 0 : tenants.insert(tenant_shard_id, new_tenant);
985 : }
986 :
987 0 : let (startup_completion, startup_complete) = utils::completion::channel();
988 0 :
989 0 : let cancel = CancellationToken::new();
990 0 : let heartbeater = Heartbeater::new(
991 0 : config.jwt_token.clone(),
992 0 : config.max_unavailable_interval,
993 0 : cancel.clone(),
994 0 : );
995 0 : let this = Arc::new(Self {
996 0 : inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
997 0 : nodes, tenants, scheduler,
998 0 : ))),
999 0 : config: config.clone(),
1000 0 : persistence,
1001 0 : compute_hook: Arc::new(ComputeHook::new(config)),
1002 0 : result_tx,
1003 0 : heartbeater,
1004 0 : abort_tx,
1005 0 : startup_complete: startup_complete.clone(),
1006 0 : cancel,
1007 0 : gate: Gate::default(),
1008 0 : tenant_op_locks: Default::default(),
1009 0 : node_op_locks: Default::default(),
1010 0 : });
1011 0 :
1012 0 : let result_task_this = this.clone();
1013 0 : tokio::task::spawn(async move {
1014 : // Block shutdown until we're done (we must respect self.cancel)
1015 0 : if let Ok(_gate) = result_task_this.gate.enter() {
1016 0 : result_task_this.process_results(result_rx).await
1017 0 : }
1018 0 : });
1019 0 :
1020 0 : tokio::task::spawn({
1021 0 : let this = this.clone();
1022 0 : async move {
1023 : // Block shutdown until we're done (we must respect self.cancel)
1024 0 : if let Ok(_gate) = this.gate.enter() {
1025 0 : this.process_aborts(abort_rx).await
1026 0 : }
1027 0 : }
1028 0 : });
1029 0 :
1030 0 : tokio::task::spawn({
1031 0 : let this = this.clone();
1032 0 : async move {
1033 0 : if let Ok(_gate) = this.gate.enter() {
1034 0 : loop {
1035 0 : tokio::select! {
1036 0 : _ = this.cancel.cancelled() => {
1037 0 : break;
1038 0 : },
1039 0 : _ = tokio::time::sleep(Duration::from_secs(60)) => {}
1040 0 : };
1041 0 : this.tenant_op_locks.housekeeping();
1042 0 : }
1043 0 : }
1044 0 : }
1045 0 : });
1046 0 :
1047 0 : tokio::task::spawn({
1048 0 : let this = this.clone();
1049 0 : // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
1050 0 : // is done.
1051 0 : let startup_completion = startup_completion.clone();
1052 0 : async move {
1053 : // Block shutdown until we're done (we must respect self.cancel)
1054 0 : let Ok(_gate) = this.gate.enter() else {
1055 0 : return;
1056 : };
1057 :
1058 0 : this.startup_reconcile().await;
1059 0 : drop(startup_completion);
1060 0 : }
1061 0 : });
1062 0 :
1063 0 : tokio::task::spawn({
1064 0 : let this = this.clone();
1065 0 : let startup_complete = startup_complete.clone();
1066 0 : async move {
1067 0 : startup_complete.wait().await;
1068 0 : this.background_reconcile().await;
1069 0 : }
1070 0 : });
1071 0 :
1072 0 : tokio::task::spawn({
1073 0 : let this = this.clone();
1074 0 : let startup_complete = startup_complete.clone();
1075 0 : async move {
1076 0 : startup_complete.wait().await;
1077 0 : this.spawn_heartbeat_driver().await;
1078 0 : }
1079 0 : });
1080 0 :
1081 0 : Ok(this)
1082 0 : }
1083 :
1084 0 : pub(crate) async fn attach_hook(
1085 0 : &self,
1086 0 : attach_req: AttachHookRequest,
1087 0 : ) -> anyhow::Result<AttachHookResponse> {
1088 0 : // This is a test hook. To enable using it on tenants that were created directly with
1089 0 : // the pageserver API (not via this service), we will auto-create any missing tenant
1090 0 : // shards with default state.
1091 0 : let insert = {
1092 0 : let locked = self.inner.write().unwrap();
1093 0 : !locked.tenants.contains_key(&attach_req.tenant_shard_id)
1094 0 : };
1095 0 : if insert {
1096 0 : let tsp = TenantShardPersistence {
1097 0 : tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
1098 0 : shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
1099 0 : shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
1100 0 : shard_stripe_size: 0,
1101 0 : generation: Some(0),
1102 0 : generation_pageserver: None,
1103 0 : placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
1104 0 : config: serde_json::to_string(&TenantConfig::default()).unwrap(),
1105 0 : splitting: SplitState::default(),
1106 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
1107 0 : .unwrap(),
1108 0 : };
1109 0 :
1110 0 : match self.persistence.insert_tenant_shards(vec![tsp]).await {
1111 0 : Err(e) => match e {
1112 : DatabaseError::Query(diesel::result::Error::DatabaseError(
1113 : DatabaseErrorKind::UniqueViolation,
1114 : _,
1115 : )) => {
1116 0 : tracing::info!(
1117 0 : "Raced with another request to insert tenant {}",
1118 0 : attach_req.tenant_shard_id
1119 0 : )
1120 : }
1121 0 : _ => return Err(e.into()),
1122 : },
1123 : Ok(()) => {
1124 0 : tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
1125 :
1126 0 : let mut locked = self.inner.write().unwrap();
1127 0 : locked.tenants.insert(
1128 0 : attach_req.tenant_shard_id,
1129 0 : TenantShard::new(
1130 0 : attach_req.tenant_shard_id,
1131 0 : ShardIdentity::unsharded(),
1132 0 : PlacementPolicy::Attached(0),
1133 0 : ),
1134 0 : );
1135 0 : tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
1136 : }
1137 : }
1138 0 : }
1139 :
1140 0 : let new_generation = if let Some(req_node_id) = attach_req.node_id {
1141 0 : let maybe_tenant_conf = {
1142 0 : let locked = self.inner.write().unwrap();
1143 0 : locked
1144 0 : .tenants
1145 0 : .get(&attach_req.tenant_shard_id)
1146 0 : .map(|t| t.config.clone())
1147 0 : };
1148 0 :
1149 0 : match maybe_tenant_conf {
1150 0 : Some(conf) => {
1151 0 : let new_generation = self
1152 0 : .persistence
1153 0 : .increment_generation(attach_req.tenant_shard_id, req_node_id)
1154 0 : .await?;
1155 :
1156 : // Persist the placement policy update. This is required
1157 : // when we reattaching a detached tenant.
1158 0 : self.persistence
1159 0 : .update_tenant_shard(
1160 0 : TenantFilter::Shard(attach_req.tenant_shard_id),
1161 0 : Some(PlacementPolicy::Attached(0)),
1162 0 : Some(conf),
1163 0 : None,
1164 0 : None,
1165 0 : )
1166 0 : .await?;
1167 0 : Some(new_generation)
1168 : }
1169 : None => {
1170 0 : anyhow::bail!("Attach hook handling raced with tenant removal")
1171 : }
1172 : }
1173 : } else {
1174 0 : self.persistence.detach(attach_req.tenant_shard_id).await?;
1175 0 : None
1176 : };
1177 :
1178 0 : let mut locked = self.inner.write().unwrap();
1179 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
1180 0 :
1181 0 : let tenant_shard = tenants
1182 0 : .get_mut(&attach_req.tenant_shard_id)
1183 0 : .expect("Checked for existence above");
1184 :
1185 0 : if let Some(new_generation) = new_generation {
1186 0 : tenant_shard.generation = Some(new_generation);
1187 0 : tenant_shard.policy = PlacementPolicy::Attached(0);
1188 0 : } else {
1189 : // This is a detach notification. We must update placement policy to avoid re-attaching
1190 : // during background scheduling/reconciliation, or during storage controller restart.
1191 0 : assert!(attach_req.node_id.is_none());
1192 0 : tenant_shard.policy = PlacementPolicy::Detached;
1193 : }
1194 :
1195 0 : if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
1196 0 : tracing::info!(
1197 0 : tenant_id = %attach_req.tenant_shard_id,
1198 0 : ps_id = %attaching_pageserver,
1199 0 : generation = ?tenant_shard.generation,
1200 0 : "issuing",
1201 0 : );
1202 0 : } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
1203 0 : tracing::info!(
1204 0 : tenant_id = %attach_req.tenant_shard_id,
1205 0 : %ps_id,
1206 0 : generation = ?tenant_shard.generation,
1207 0 : "dropping",
1208 0 : );
1209 : } else {
1210 0 : tracing::info!(
1211 0 : tenant_id = %attach_req.tenant_shard_id,
1212 0 : "no-op: tenant already has no pageserver");
1213 : }
1214 0 : tenant_shard
1215 0 : .intent
1216 0 : .set_attached(scheduler, attach_req.node_id);
1217 0 :
1218 0 : tracing::info!(
1219 0 : "attach_hook: tenant {} set generation {:?}, pageserver {}",
1220 0 : attach_req.tenant_shard_id,
1221 0 : tenant_shard.generation,
1222 0 : // TODO: this is an odd number of 0xf's
1223 0 : attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff))
1224 0 : );
1225 :
1226 : // Trick the reconciler into not doing anything for this tenant: this helps
1227 : // tests that manually configure a tenant on the pagesrever, and then call this
1228 : // attach hook: they don't want background reconciliation to modify what they
1229 : // did to the pageserver.
1230 : #[cfg(feature = "testing")]
1231 : {
1232 0 : if let Some(node_id) = attach_req.node_id {
1233 0 : tenant_shard.observed.locations = HashMap::from([(
1234 0 : node_id,
1235 0 : ObservedStateLocation {
1236 0 : conf: Some(attached_location_conf(
1237 0 : tenant_shard.generation.unwrap(),
1238 0 : &tenant_shard.shard,
1239 0 : &tenant_shard.config,
1240 0 : false,
1241 0 : )),
1242 0 : },
1243 0 : )]);
1244 0 : } else {
1245 0 : tenant_shard.observed.locations.clear();
1246 0 : }
1247 : }
1248 :
1249 0 : Ok(AttachHookResponse {
1250 0 : gen: attach_req
1251 0 : .node_id
1252 0 : .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
1253 0 : })
1254 0 : }
1255 :
1256 0 : pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
1257 0 : let locked = self.inner.read().unwrap();
1258 0 :
1259 0 : let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
1260 0 :
1261 0 : InspectResponse {
1262 0 : attachment: tenant_shard.and_then(|s| {
1263 0 : s.intent
1264 0 : .get_attached()
1265 0 : .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
1266 0 : }),
1267 0 : }
1268 0 : }
1269 :
1270 : // When the availability state of a node transitions to active, we must do a full reconciliation
1271 : // of LocationConfigs on that node. This is because while a node was offline:
1272 : // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
1273 : // - aborting a tenant shard split might have left rogue child shards behind on this node.
1274 : //
1275 : // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
1276 : // Reconcilers might communicate with the node, and these must not overlap with the work we do in
1277 : // this function.
1278 : //
1279 : // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
1280 : // for written for a single node rather than as a batch job for all nodes.
1281 0 : #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
1282 : async fn node_activate_reconcile(
1283 : &self,
1284 : mut node: Node,
1285 : _lock: &OwnedRwLockWriteGuard<()>,
1286 : ) -> Result<(), ApiError> {
1287 : // This Node is a mutable local copy: we will set it active so that we can use its
1288 : // API client to reconcile with the node. The Node in [`Self::nodes`] will get updated
1289 : // later.
1290 : node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
1291 :
1292 : let configs = match node
1293 : .with_client_retries(
1294 0 : |client| async move { client.list_location_config().await },
1295 : &self.config.jwt_token,
1296 : 1,
1297 : 5,
1298 : SHORT_RECONCILE_TIMEOUT,
1299 : &self.cancel,
1300 : )
1301 : .await
1302 : {
1303 : None => {
1304 : // We're shutting down (the Node's cancellation token can't have fired, because
1305 : // we're the only scope that has a reference to it, and we didn't fire it).
1306 : return Err(ApiError::ShuttingDown);
1307 : }
1308 : Some(Err(e)) => {
1309 : // This node didn't succeed listing its locations: it may not proceed to active state
1310 : // as it is apparently unavailable.
1311 : return Err(ApiError::PreconditionFailed(
1312 : format!("Failed to query node location configs, cannot activate ({e})").into(),
1313 : ));
1314 : }
1315 : Some(Ok(configs)) => configs,
1316 : };
1317 0 : tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
1318 :
1319 : let mut cleanup = Vec::new();
1320 : {
1321 : let mut locked = self.inner.write().unwrap();
1322 :
1323 : for (tenant_shard_id, observed_loc) in configs.tenant_shards {
1324 : let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
1325 : cleanup.push(tenant_shard_id);
1326 : continue;
1327 : };
1328 : tenant_shard
1329 : .observed
1330 : .locations
1331 : .insert(node.get_id(), ObservedStateLocation { conf: observed_loc });
1332 : }
1333 : }
1334 :
1335 : for tenant_shard_id in cleanup {
1336 0 : tracing::info!("Detaching {tenant_shard_id}");
1337 : match node
1338 : .with_client_retries(
1339 0 : |client| async move {
1340 0 : let config = LocationConfig {
1341 0 : mode: LocationConfigMode::Detached,
1342 0 : generation: None,
1343 0 : secondary_conf: None,
1344 0 : shard_number: tenant_shard_id.shard_number.0,
1345 0 : shard_count: tenant_shard_id.shard_count.literal(),
1346 0 : shard_stripe_size: 0,
1347 0 : tenant_conf: models::TenantConfig::default(),
1348 0 : };
1349 0 : client
1350 0 : .location_config(tenant_shard_id, config, None, false)
1351 0 : .await
1352 0 : },
1353 : &self.config.jwt_token,
1354 : 1,
1355 : 5,
1356 : SHORT_RECONCILE_TIMEOUT,
1357 : &self.cancel,
1358 : )
1359 : .await
1360 : {
1361 : None => {
1362 : // We're shutting down (the Node's cancellation token can't have fired, because
1363 : // we're the only scope that has a reference to it, and we didn't fire it).
1364 : return Err(ApiError::ShuttingDown);
1365 : }
1366 : Some(Err(e)) => {
1367 : // Do not let the node proceed to Active state if it is not responsive to requests
1368 : // to detach. This could happen if e.g. a shutdown bug in the pageserver is preventing
1369 : // detach completing: we should not let this node back into the set of nodes considered
1370 : // okay for scheduling.
1371 : return Err(ApiError::Conflict(format!(
1372 : "Node {node} failed to detach {tenant_shard_id}: {e}"
1373 : )));
1374 : }
1375 : Some(Ok(_)) => {}
1376 : };
1377 : }
1378 :
1379 : Ok(())
1380 : }
1381 :
1382 0 : pub(crate) async fn re_attach(
1383 0 : &self,
1384 0 : reattach_req: ReAttachRequest,
1385 0 : ) -> Result<ReAttachResponse, ApiError> {
1386 0 : if let Some(register_req) = reattach_req.register {
1387 0 : self.node_register(register_req).await?;
1388 0 : }
1389 :
1390 : // Ordering: we must persist generation number updates before making them visible in the in-memory state
1391 0 : let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
1392 :
1393 0 : tracing::info!(
1394 0 : node_id=%reattach_req.node_id,
1395 0 : "Incremented {} tenant shards' generations",
1396 0 : incremented_generations.len()
1397 0 : );
1398 :
1399 : // Apply the updated generation to our in-memory state, and
1400 : // gather discover secondary locations.
1401 0 : let mut locked = self.inner.write().unwrap();
1402 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
1403 0 :
1404 0 : let mut response = ReAttachResponse {
1405 0 : tenants: Vec::new(),
1406 0 : };
1407 :
1408 : // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
1409 : // to call location_conf API with an old generation. Wait for cancellation to complete
1410 : // before responding to this request. Requires well implemented CancellationToken logic
1411 : // all the way to where we call location_conf. Even then, there can still be a location_conf
1412 : // request in flight over the network: TODO handle that by making location_conf API refuse
1413 : // to go backward in generations.
1414 :
1415 : // Scan through all shards, applying updates for ones where we updated generation
1416 : // and identifying shards that intend to have a secondary location on this node.
1417 0 : for (tenant_shard_id, shard) in tenants {
1418 0 : if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
1419 0 : let new_gen = *new_gen;
1420 0 : response.tenants.push(ReAttachResponseTenant {
1421 0 : id: *tenant_shard_id,
1422 0 : gen: Some(new_gen.into().unwrap()),
1423 0 : // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
1424 0 : // execution. If a pageserver is restarted during that process, then the reconcile pass will
1425 0 : // fail, and start from scratch, so it doesn't make sense for us to try and preserve
1426 0 : // the stale/multi states at this point.
1427 0 : mode: LocationConfigMode::AttachedSingle,
1428 0 : });
1429 0 :
1430 0 : shard.generation = std::cmp::max(shard.generation, Some(new_gen));
1431 0 : if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
1432 : // Why can we update `observed` even though we're not sure our response will be received
1433 : // by the pageserver? Because the pageserver will not proceed with startup until
1434 : // it has processed response: if it loses it, we'll see another request and increment
1435 : // generation again, avoiding any uncertainty about dirtiness of tenant's state.
1436 0 : if let Some(conf) = observed.conf.as_mut() {
1437 0 : conf.generation = new_gen.into();
1438 0 : }
1439 0 : } else {
1440 0 : // This node has no observed state for the shard: perhaps it was offline
1441 0 : // when the pageserver restarted. Insert a None, so that the Reconciler
1442 0 : // will be prompted to learn the location's state before it makes changes.
1443 0 : shard
1444 0 : .observed
1445 0 : .locations
1446 0 : .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
1447 0 : }
1448 0 : } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
1449 0 : // Ordering: pageserver will not accept /location_config requests until it has
1450 0 : // finished processing the response from re-attach. So we can update our in-memory state
1451 0 : // now, and be confident that we are not stamping on the result of some later location config.
1452 0 : // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
1453 0 : // so we might update observed state here, and then get over-written by some racing
1454 0 : // ReconcileResult. The impact is low however, since we have set state on pageserver something
1455 0 : // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
1456 0 :
1457 0 : response.tenants.push(ReAttachResponseTenant {
1458 0 : id: *tenant_shard_id,
1459 0 : gen: None,
1460 0 : mode: LocationConfigMode::Secondary,
1461 0 : });
1462 0 :
1463 0 : // We must not update observed, because we have no guarantee that our
1464 0 : // response will be received by the pageserver. This could leave it
1465 0 : // falsely dirty, but the resulting reconcile should be idempotent.
1466 0 : }
1467 : }
1468 :
1469 : // We consider a node Active once we have composed a re-attach response, but we
1470 : // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
1471 : // implicitly synchronizes the LocationConfigs on the node.
1472 : //
1473 : // Setting a node active unblocks any Reconcilers that might write to the location config API,
1474 : // but those requests will not be accepted by the node until it has finished processing
1475 : // the re-attach response.
1476 0 : if let Some(node) = nodes.get(&reattach_req.node_id) {
1477 0 : if !node.is_available() {
1478 0 : let mut new_nodes = (**nodes).clone();
1479 0 : if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
1480 0 : node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
1481 0 : scheduler.node_upsert(node);
1482 0 : }
1483 0 : let new_nodes = Arc::new(new_nodes);
1484 0 : *nodes = new_nodes;
1485 0 : }
1486 0 : }
1487 :
1488 0 : Ok(response)
1489 0 : }
1490 :
1491 0 : pub(crate) fn validate(&self, validate_req: ValidateRequest) -> ValidateResponse {
1492 0 : let locked = self.inner.read().unwrap();
1493 0 :
1494 0 : let mut response = ValidateResponse {
1495 0 : tenants: Vec::new(),
1496 0 : };
1497 :
1498 0 : for req_tenant in validate_req.tenants {
1499 0 : if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
1500 0 : let valid = tenant_shard.generation == Some(Generation::new(req_tenant.gen));
1501 0 : tracing::info!(
1502 0 : "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
1503 0 : req_tenant.id,
1504 0 : req_tenant.gen,
1505 0 : tenant_shard.generation
1506 0 : );
1507 0 : response.tenants.push(ValidateResponseTenant {
1508 0 : id: req_tenant.id,
1509 0 : valid,
1510 0 : });
1511 0 : } else {
1512 0 : // After tenant deletion, we may approve any validation. This avoids
1513 0 : // spurious warnings on the pageserver if it has pending LSN updates
1514 0 : // at the point a deletion happens.
1515 0 : response.tenants.push(ValidateResponseTenant {
1516 0 : id: req_tenant.id,
1517 0 : valid: true,
1518 0 : });
1519 0 : }
1520 : }
1521 0 : response
1522 0 : }
1523 :
1524 0 : pub(crate) async fn tenant_create(
1525 0 : &self,
1526 0 : create_req: TenantCreateRequest,
1527 0 : ) -> Result<TenantCreateResponse, ApiError> {
1528 0 : let tenant_id = create_req.new_tenant_id.tenant_id;
1529 :
1530 : // Exclude any concurrent attempts to create/access the same tenant ID
1531 0 : let _tenant_lock = self
1532 0 : .tenant_op_locks
1533 0 : .exclusive(create_req.new_tenant_id.tenant_id)
1534 0 : .await;
1535 :
1536 0 : let (response, waiters) = self.do_tenant_create(create_req).await?;
1537 :
1538 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
1539 : // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
1540 : // accept compute notifications while it is in the process of creating. Reconciliation will
1541 : // be retried in the background.
1542 0 : tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
1543 0 : }
1544 0 : Ok(response)
1545 0 : }
1546 :
1547 0 : pub(crate) async fn do_tenant_create(
1548 0 : &self,
1549 0 : create_req: TenantCreateRequest,
1550 0 : ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
1551 0 : let placement_policy = create_req
1552 0 : .placement_policy
1553 0 : .clone()
1554 0 : // As a default, zero secondaries is convenient for tests that don't choose a policy.
1555 0 : .unwrap_or(PlacementPolicy::Attached(0));
1556 :
1557 : // This service expects to handle sharding itself: it is an error to try and directly create
1558 : // a particular shard here.
1559 0 : let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
1560 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
1561 0 : "Attempted to create a specific shard, this API is for creating the whole tenant"
1562 0 : )));
1563 : } else {
1564 0 : create_req.new_tenant_id.tenant_id
1565 0 : };
1566 0 :
1567 0 : tracing::info!(
1568 0 : "Creating tenant {}, shard_count={:?}",
1569 0 : create_req.new_tenant_id,
1570 0 : create_req.shard_parameters.count,
1571 0 : );
1572 :
1573 0 : let create_ids = (0..create_req.shard_parameters.count.count())
1574 0 : .map(|i| TenantShardId {
1575 0 : tenant_id,
1576 0 : shard_number: ShardNumber(i),
1577 0 : shard_count: create_req.shard_parameters.count,
1578 0 : })
1579 0 : .collect::<Vec<_>>();
1580 :
1581 : // If the caller specifies a None generation, it means "start from default". This is different
1582 : // to [`Self::tenant_location_config`], where a None generation is used to represent
1583 : // an incompletely-onboarded tenant.
1584 0 : let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
1585 0 : tracing::info!(
1586 0 : "tenant_create: secondary mode, generation is_some={}",
1587 0 : create_req.generation.is_some()
1588 0 : );
1589 0 : create_req.generation.map(Generation::new)
1590 : } else {
1591 0 : tracing::info!(
1592 0 : "tenant_create: not secondary mode, generation is_some={}",
1593 0 : create_req.generation.is_some()
1594 0 : );
1595 0 : Some(
1596 0 : create_req
1597 0 : .generation
1598 0 : .map(Generation::new)
1599 0 : .unwrap_or(INITIAL_GENERATION),
1600 0 : )
1601 : };
1602 :
1603 : // Ordering: we persist tenant shards before creating them on the pageserver. This enables a caller
1604 : // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
1605 : // during the creation, rather than risking leaving orphan objects in S3.
1606 0 : let persist_tenant_shards = create_ids
1607 0 : .iter()
1608 0 : .map(|tenant_shard_id| TenantShardPersistence {
1609 0 : tenant_id: tenant_shard_id.tenant_id.to_string(),
1610 0 : shard_number: tenant_shard_id.shard_number.0 as i32,
1611 0 : shard_count: tenant_shard_id.shard_count.literal() as i32,
1612 0 : shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
1613 0 : generation: initial_generation.map(|g| g.into().unwrap() as i32),
1614 0 : // The pageserver is not known until scheduling happens: we will set this column when
1615 0 : // incrementing the generation the first time we attach to a pageserver.
1616 0 : generation_pageserver: None,
1617 0 : placement_policy: serde_json::to_string(&placement_policy).unwrap(),
1618 0 : config: serde_json::to_string(&create_req.config).unwrap(),
1619 0 : splitting: SplitState::default(),
1620 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
1621 0 : .unwrap(),
1622 0 : })
1623 0 : .collect();
1624 0 :
1625 0 : match self
1626 0 : .persistence
1627 0 : .insert_tenant_shards(persist_tenant_shards)
1628 0 : .await
1629 : {
1630 0 : Ok(_) => {}
1631 : Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
1632 : DatabaseErrorKind::UniqueViolation,
1633 : _,
1634 : ))) => {
1635 : // Unique key violation: this is probably a retry. Because the shard count is part of the unique key,
1636 : // if we see a unique key violation it means that the creation request's shard count matches the previous
1637 : // creation's shard count.
1638 0 : tracing::info!("Tenant shards already present in database, proceeding with idempotent creation...");
1639 : }
1640 : // Any other database error is unexpected and a bug.
1641 0 : Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
1642 : };
1643 :
1644 0 : let mut schedule_context = ScheduleContext::default();
1645 :
1646 0 : let (waiters, response_shards) = {
1647 0 : let mut locked = self.inner.write().unwrap();
1648 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
1649 0 :
1650 0 : let mut response_shards = Vec::new();
1651 0 : let mut schcedule_error = None;
1652 :
1653 0 : for tenant_shard_id in create_ids {
1654 0 : tracing::info!("Creating shard {tenant_shard_id}...");
1655 :
1656 : use std::collections::btree_map::Entry;
1657 0 : match tenants.entry(tenant_shard_id) {
1658 0 : Entry::Occupied(mut entry) => {
1659 0 : tracing::info!(
1660 0 : "Tenant shard {tenant_shard_id} already exists while creating"
1661 0 : );
1662 :
1663 : // TODO: schedule() should take an anti-affinity expression that pushes
1664 : // attached and secondary locations (independently) away frorm those
1665 : // pageservers also holding a shard for this tenant.
1666 :
1667 0 : entry
1668 0 : .get_mut()
1669 0 : .schedule(scheduler, &mut schedule_context)
1670 0 : .map_err(|e| {
1671 0 : ApiError::Conflict(format!(
1672 0 : "Failed to schedule shard {tenant_shard_id}: {e}"
1673 0 : ))
1674 0 : })?;
1675 :
1676 0 : if let Some(node_id) = entry.get().intent.get_attached() {
1677 0 : let generation = entry
1678 0 : .get()
1679 0 : .generation
1680 0 : .expect("Generation is set when in attached mode");
1681 0 : response_shards.push(TenantCreateResponseShard {
1682 0 : shard_id: tenant_shard_id,
1683 0 : node_id: *node_id,
1684 0 : generation: generation.into().unwrap(),
1685 0 : });
1686 0 : }
1687 :
1688 0 : continue;
1689 : }
1690 0 : Entry::Vacant(entry) => {
1691 0 : let state = entry.insert(TenantShard::new(
1692 0 : tenant_shard_id,
1693 0 : ShardIdentity::from_params(
1694 0 : tenant_shard_id.shard_number,
1695 0 : &create_req.shard_parameters,
1696 0 : ),
1697 0 : placement_policy.clone(),
1698 0 : ));
1699 0 :
1700 0 : state.generation = initial_generation;
1701 0 : state.config = create_req.config.clone();
1702 0 : if let Err(e) = state.schedule(scheduler, &mut schedule_context) {
1703 0 : schcedule_error = Some(e);
1704 0 : }
1705 :
1706 : // Only include shards in result if we are attaching: the purpose
1707 : // of the response is to tell the caller where the shards are attached.
1708 0 : if let Some(node_id) = state.intent.get_attached() {
1709 0 : let generation = state
1710 0 : .generation
1711 0 : .expect("Generation is set when in attached mode");
1712 0 : response_shards.push(TenantCreateResponseShard {
1713 0 : shard_id: tenant_shard_id,
1714 0 : node_id: *node_id,
1715 0 : generation: generation.into().unwrap(),
1716 0 : });
1717 0 : }
1718 : }
1719 : };
1720 : }
1721 :
1722 : // If we failed to schedule shards, then they are still created in the controller,
1723 : // but we return an error to the requester to avoid a silent failure when someone
1724 : // tries to e.g. create a tenant whose placement policy requires more nodes than
1725 : // are present in the system. We do this here rather than in the above loop, to
1726 : // avoid situations where we only create a subset of shards in the tenant.
1727 0 : if let Some(e) = schcedule_error {
1728 0 : return Err(ApiError::Conflict(format!(
1729 0 : "Failed to schedule shard(s): {e}"
1730 0 : )));
1731 0 : }
1732 0 :
1733 0 : let waiters = tenants
1734 0 : .range_mut(TenantShardId::tenant_range(tenant_id))
1735 0 : .filter_map(|(_shard_id, shard)| self.maybe_reconcile_shard(shard, nodes))
1736 0 : .collect::<Vec<_>>();
1737 0 : (waiters, response_shards)
1738 0 : };
1739 0 :
1740 0 : Ok((
1741 0 : TenantCreateResponse {
1742 0 : shards: response_shards,
1743 0 : },
1744 0 : waiters,
1745 0 : ))
1746 0 : }
1747 :
1748 : /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
1749 : /// wait for reconciliation to complete before responding.
1750 0 : async fn await_waiters(
1751 0 : &self,
1752 0 : waiters: Vec<ReconcilerWaiter>,
1753 0 : timeout: Duration,
1754 0 : ) -> Result<(), ReconcileWaitError> {
1755 0 : let deadline = Instant::now().checked_add(timeout).unwrap();
1756 0 : for waiter in waiters {
1757 0 : let timeout = deadline.duration_since(Instant::now());
1758 0 : waiter.wait_timeout(timeout).await?;
1759 : }
1760 :
1761 0 : Ok(())
1762 0 : }
1763 :
1764 : /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
1765 : /// and transform it into either a tenant creation of a series of shard updates.
1766 : ///
1767 : /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
1768 : /// still be returned.
1769 0 : fn tenant_location_config_prepare(
1770 0 : &self,
1771 0 : tenant_id: TenantId,
1772 0 : req: TenantLocationConfigRequest,
1773 0 : ) -> TenantCreateOrUpdate {
1774 0 : let mut updates = Vec::new();
1775 0 : let mut locked = self.inner.write().unwrap();
1776 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
1777 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
1778 :
1779 : // Use location config mode as an indicator of policy.
1780 0 : let placement_policy = match req.config.mode {
1781 0 : LocationConfigMode::Detached => PlacementPolicy::Detached,
1782 0 : LocationConfigMode::Secondary => PlacementPolicy::Secondary,
1783 : LocationConfigMode::AttachedMulti
1784 : | LocationConfigMode::AttachedSingle
1785 : | LocationConfigMode::AttachedStale => {
1786 0 : if nodes.len() > 1 {
1787 0 : PlacementPolicy::Attached(1)
1788 : } else {
1789 : // Convenience for dev/test: if we just have one pageserver, import
1790 : // tenants into non-HA mode so that scheduling will succeed.
1791 0 : PlacementPolicy::Attached(0)
1792 : }
1793 : }
1794 : };
1795 :
1796 0 : let mut create = true;
1797 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
1798 : // Saw an existing shard: this is not a creation
1799 0 : create = false;
1800 :
1801 : // Shards may have initially been created by a Secondary request, where we
1802 : // would have left generation as None.
1803 : //
1804 : // We only update generation the first time we see an attached-mode request,
1805 : // and if there is no existing generation set. The caller is responsible for
1806 : // ensuring that no non-storage-controller pageserver ever uses a higher
1807 : // generation than they passed in here.
1808 : use LocationConfigMode::*;
1809 0 : let set_generation = match req.config.mode {
1810 0 : AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
1811 0 : req.config.generation.map(Generation::new)
1812 : }
1813 0 : _ => None,
1814 : };
1815 :
1816 0 : updates.push(ShardUpdate {
1817 0 : tenant_shard_id: *shard_id,
1818 0 : placement_policy: placement_policy.clone(),
1819 0 : tenant_config: req.config.tenant_conf.clone(),
1820 0 : generation: set_generation,
1821 0 : });
1822 : }
1823 :
1824 0 : if create {
1825 : use LocationConfigMode::*;
1826 0 : let generation = match req.config.mode {
1827 0 : AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
1828 : // If a caller provided a generation in a non-attached request, ignore it
1829 : // and leave our generation as None: this enables a subsequent update to set
1830 : // the generation when setting an attached mode for the first time.
1831 0 : _ => None,
1832 : };
1833 :
1834 0 : TenantCreateOrUpdate::Create(
1835 0 : // Synthesize a creation request
1836 0 : TenantCreateRequest {
1837 0 : new_tenant_id: tenant_shard_id,
1838 0 : generation,
1839 0 : shard_parameters: ShardParameters {
1840 0 : count: tenant_shard_id.shard_count,
1841 0 : // We only import un-sharded or single-sharded tenants, so stripe
1842 0 : // size can be made up arbitrarily here.
1843 0 : stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE,
1844 0 : },
1845 0 : placement_policy: Some(placement_policy),
1846 0 : config: req.config.tenant_conf,
1847 0 : },
1848 0 : )
1849 : } else {
1850 0 : assert!(!updates.is_empty());
1851 0 : TenantCreateOrUpdate::Update(updates)
1852 : }
1853 0 : }
1854 :
1855 : /// This API is used by the cloud control plane to migrate unsharded tenants that it created
1856 : /// directly with pageservers into this service.
1857 : ///
1858 : /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
1859 : /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
1860 : /// Think of the first attempt to call this API as a transfer of absolute authority over the
1861 : /// tenant's source of generation numbers.
1862 : ///
1863 : /// The mode in this request coarse-grained control of tenants:
1864 : /// - Call with mode Attached* to upsert the tenant.
1865 : /// - Call with mode Secondary to either onboard a tenant without attaching it, or
1866 : /// to set an existing tenant to PolicyMode::Secondary
1867 : /// - Call with mode Detached to switch to PolicyMode::Detached
1868 0 : pub(crate) async fn tenant_location_config(
1869 0 : &self,
1870 0 : tenant_shard_id: TenantShardId,
1871 0 : req: TenantLocationConfigRequest,
1872 0 : ) -> Result<TenantLocationConfigResponse, ApiError> {
1873 : // We require an exclusive lock, because we are updating both persistent and in-memory state
1874 0 : let _tenant_lock = self
1875 0 : .tenant_op_locks
1876 0 : .exclusive(tenant_shard_id.tenant_id)
1877 0 : .await;
1878 :
1879 0 : if !tenant_shard_id.is_unsharded() {
1880 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
1881 0 : "This API is for importing single-sharded or unsharded tenants"
1882 0 : )));
1883 0 : }
1884 0 :
1885 0 : // First check if this is a creation or an update
1886 0 : let create_or_update = self.tenant_location_config_prepare(tenant_shard_id.tenant_id, req);
1887 0 :
1888 0 : let mut result = TenantLocationConfigResponse {
1889 0 : shards: Vec::new(),
1890 0 : stripe_size: None,
1891 0 : };
1892 0 : let waiters = match create_or_update {
1893 0 : TenantCreateOrUpdate::Create(create_req) => {
1894 0 : let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
1895 0 : result.shards = create_resp
1896 0 : .shards
1897 0 : .into_iter()
1898 0 : .map(|s| TenantShardLocation {
1899 0 : node_id: s.node_id,
1900 0 : shard_id: s.shard_id,
1901 0 : })
1902 0 : .collect();
1903 0 : waiters
1904 : }
1905 0 : TenantCreateOrUpdate::Update(updates) => {
1906 0 : // Persist updates
1907 0 : // Ordering: write to the database before applying changes in-memory, so that
1908 0 : // we will not appear time-travel backwards on a restart.
1909 0 : let mut schedule_context = ScheduleContext::default();
1910 : for ShardUpdate {
1911 0 : tenant_shard_id,
1912 0 : placement_policy,
1913 0 : tenant_config,
1914 0 : generation,
1915 0 : } in &updates
1916 : {
1917 0 : self.persistence
1918 0 : .update_tenant_shard(
1919 0 : TenantFilter::Shard(*tenant_shard_id),
1920 0 : Some(placement_policy.clone()),
1921 0 : Some(tenant_config.clone()),
1922 0 : *generation,
1923 0 : None,
1924 0 : )
1925 0 : .await?;
1926 : }
1927 :
1928 : // Apply updates in-memory
1929 0 : let mut waiters = Vec::new();
1930 0 : {
1931 0 : let mut locked = self.inner.write().unwrap();
1932 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
1933 :
1934 : for ShardUpdate {
1935 0 : tenant_shard_id,
1936 0 : placement_policy,
1937 0 : tenant_config,
1938 0 : generation: update_generation,
1939 0 : } in updates
1940 : {
1941 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
1942 0 : tracing::warn!("Shard {tenant_shard_id} removed while updating");
1943 0 : continue;
1944 : };
1945 :
1946 : // Update stripe size
1947 0 : if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
1948 0 : result.stripe_size = Some(shard.shard.stripe_size);
1949 0 : }
1950 :
1951 0 : shard.policy = placement_policy;
1952 0 : shard.config = tenant_config;
1953 0 : if let Some(generation) = update_generation {
1954 0 : shard.generation = Some(generation);
1955 0 : }
1956 :
1957 0 : shard.schedule(scheduler, &mut schedule_context)?;
1958 :
1959 0 : let maybe_waiter = self.maybe_reconcile_shard(shard, nodes);
1960 0 : if let Some(waiter) = maybe_waiter {
1961 0 : waiters.push(waiter);
1962 0 : }
1963 :
1964 0 : if let Some(node_id) = shard.intent.get_attached() {
1965 0 : result.shards.push(TenantShardLocation {
1966 0 : shard_id: tenant_shard_id,
1967 0 : node_id: *node_id,
1968 0 : })
1969 0 : }
1970 : }
1971 : }
1972 0 : waiters
1973 : }
1974 : };
1975 :
1976 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
1977 : // Do not treat a reconcile error as fatal: we have already applied any requested
1978 : // Intent changes, and the reconcile can fail for external reasons like unavailable
1979 : // compute notification API. In these cases, it is important that we do not
1980 : // cause the cloud control plane to retry forever on this API.
1981 0 : tracing::warn!(
1982 0 : "Failed to reconcile after /location_config: {e}, returning success anyway"
1983 0 : );
1984 0 : }
1985 :
1986 : // Logging the full result is useful because it lets us cross-check what the cloud control
1987 : // plane's tenant_shards table should contain.
1988 0 : tracing::info!("Complete, returning {result:?}");
1989 :
1990 0 : Ok(result)
1991 0 : }
1992 :
1993 0 : pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
1994 : // We require an exclusive lock, because we are updating persistent and in-memory state
1995 0 : let _tenant_lock = self.tenant_op_locks.exclusive(req.tenant_id).await;
1996 :
1997 0 : let tenant_id = req.tenant_id;
1998 0 : let config = req.config;
1999 0 :
2000 0 : self.persistence
2001 0 : .update_tenant_shard(
2002 0 : TenantFilter::Tenant(req.tenant_id),
2003 0 : None,
2004 0 : Some(config.clone()),
2005 0 : None,
2006 0 : None,
2007 0 : )
2008 0 : .await?;
2009 :
2010 0 : let waiters = {
2011 0 : let mut waiters = Vec::new();
2012 0 : let mut locked = self.inner.write().unwrap();
2013 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
2014 0 : for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2015 0 : shard.config = config.clone();
2016 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
2017 0 : waiters.push(waiter);
2018 0 : }
2019 : }
2020 0 : waiters
2021 : };
2022 :
2023 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
2024 : // Treat this as success because we have stored the configuration. If e.g.
2025 : // a node was unavailable at this time, it should not stop us accepting a
2026 : // configuration change.
2027 0 : tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
2028 0 : }
2029 :
2030 0 : Ok(())
2031 0 : }
2032 :
2033 0 : pub(crate) fn tenant_config_get(
2034 0 : &self,
2035 0 : tenant_id: TenantId,
2036 0 : ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
2037 0 : let config = {
2038 0 : let locked = self.inner.read().unwrap();
2039 0 :
2040 0 : match locked
2041 0 : .tenants
2042 0 : .range(TenantShardId::tenant_range(tenant_id))
2043 0 : .next()
2044 : {
2045 0 : Some((_tenant_shard_id, shard)) => shard.config.clone(),
2046 : None => {
2047 0 : return Err(ApiError::NotFound(
2048 0 : anyhow::anyhow!("Tenant not found").into(),
2049 0 : ))
2050 : }
2051 : }
2052 : };
2053 :
2054 : // Unlike the pageserver, we do not have a set of global defaults: the config is
2055 : // entirely per-tenant. Therefore the distinction between `tenant_specific_overrides`
2056 : // and `effective_config` in the response is meaningless, but we retain that syntax
2057 : // in order to remain compatible with the pageserver API.
2058 :
2059 0 : let response = HashMap::from([
2060 : (
2061 : "tenant_specific_overrides",
2062 0 : serde_json::to_value(&config)
2063 0 : .context("serializing tenant specific overrides")
2064 0 : .map_err(ApiError::InternalServerError)?,
2065 : ),
2066 : (
2067 0 : "effective_config",
2068 0 : serde_json::to_value(&config)
2069 0 : .context("serializing effective config")
2070 0 : .map_err(ApiError::InternalServerError)?,
2071 : ),
2072 : ]);
2073 :
2074 0 : Ok(response)
2075 0 : }
2076 :
2077 0 : pub(crate) async fn tenant_time_travel_remote_storage(
2078 0 : &self,
2079 0 : time_travel_req: &TenantTimeTravelRequest,
2080 0 : tenant_id: TenantId,
2081 0 : timestamp: Cow<'_, str>,
2082 0 : done_if_after: Cow<'_, str>,
2083 0 : ) -> Result<(), ApiError> {
2084 0 : let _tenant_lock = self.tenant_op_locks.exclusive(tenant_id).await;
2085 :
2086 0 : let node = {
2087 0 : let locked = self.inner.read().unwrap();
2088 : // Just a sanity check to prevent misuse: the API expects that the tenant is fully
2089 : // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
2090 : // but only at the start of the process, so it's really just to prevent operator
2091 : // mistakes.
2092 0 : for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
2093 0 : if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
2094 : {
2095 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2096 0 : "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
2097 0 : )));
2098 0 : }
2099 0 : let maybe_attached = shard
2100 0 : .observed
2101 0 : .locations
2102 0 : .iter()
2103 0 : .filter_map(|(node_id, observed_location)| {
2104 0 : observed_location
2105 0 : .conf
2106 0 : .as_ref()
2107 0 : .map(|loc| (node_id, observed_location, loc.mode))
2108 0 : })
2109 0 : .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
2110 0 : if let Some((node_id, _observed_location, mode)) = maybe_attached {
2111 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}")));
2112 0 : }
2113 : }
2114 0 : let scheduler = &locked.scheduler;
2115 : // Right now we only perform the operation on a single node without parallelization
2116 : // TODO fan out the operation to multiple nodes for better performance
2117 0 : let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?;
2118 0 : let node = locked
2119 0 : .nodes
2120 0 : .get(&node_id)
2121 0 : .expect("Pageservers may not be deleted while lock is active");
2122 0 : node.clone()
2123 0 : };
2124 0 :
2125 0 : // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
2126 0 : let mut counts = time_travel_req
2127 0 : .shard_counts
2128 0 : .iter()
2129 0 : .copied()
2130 0 : .collect::<HashSet<_>>()
2131 0 : .into_iter()
2132 0 : .collect::<Vec<_>>();
2133 0 : counts.sort_unstable();
2134 :
2135 0 : for count in counts {
2136 0 : let shard_ids = (0..count.count())
2137 0 : .map(|i| TenantShardId {
2138 0 : tenant_id,
2139 0 : shard_number: ShardNumber(i),
2140 0 : shard_count: count,
2141 0 : })
2142 0 : .collect::<Vec<_>>();
2143 0 : for tenant_shard_id in shard_ids {
2144 0 : let client = PageserverClient::new(
2145 0 : node.get_id(),
2146 0 : node.base_url(),
2147 0 : self.config.jwt_token.as_deref(),
2148 0 : );
2149 0 :
2150 0 : tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
2151 :
2152 0 : client
2153 0 : .tenant_time_travel_remote_storage(
2154 0 : tenant_shard_id,
2155 0 : ×tamp,
2156 0 : &done_if_after,
2157 0 : )
2158 0 : .await
2159 0 : .map_err(|e| {
2160 0 : ApiError::InternalServerError(anyhow::anyhow!(
2161 0 : "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
2162 0 : node
2163 0 : ))
2164 0 : })?;
2165 : }
2166 : }
2167 0 : Ok(())
2168 0 : }
2169 :
2170 0 : pub(crate) async fn tenant_secondary_download(
2171 0 : &self,
2172 0 : tenant_id: TenantId,
2173 0 : wait: Option<Duration>,
2174 0 : ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
2175 0 : let _tenant_lock = self.tenant_op_locks.shared(tenant_id).await;
2176 :
2177 : // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
2178 0 : let targets = {
2179 0 : let locked = self.inner.read().unwrap();
2180 0 : let mut targets = Vec::new();
2181 :
2182 0 : for (tenant_shard_id, shard) in
2183 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2184 : {
2185 0 : for node_id in shard.intent.get_secondary() {
2186 0 : let node = locked
2187 0 : .nodes
2188 0 : .get(node_id)
2189 0 : .expect("Pageservers may not be deleted while referenced");
2190 0 :
2191 0 : targets.push((*tenant_shard_id, node.clone()));
2192 0 : }
2193 : }
2194 0 : targets
2195 0 : };
2196 0 :
2197 0 : // Issue concurrent requests to all shards' locations
2198 0 : let mut futs = FuturesUnordered::new();
2199 0 : for (tenant_shard_id, node) in targets {
2200 0 : let client = PageserverClient::new(
2201 0 : node.get_id(),
2202 0 : node.base_url(),
2203 0 : self.config.jwt_token.as_deref(),
2204 0 : );
2205 0 : futs.push(async move {
2206 0 : let result = client
2207 0 : .tenant_secondary_download(tenant_shard_id, wait)
2208 0 : .await;
2209 0 : (result, node, tenant_shard_id)
2210 0 : })
2211 : }
2212 :
2213 : // Handle any errors returned by pageservers. This includes cases like this request racing with
2214 : // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
2215 : // well as more general cases like 503s, 500s, or timeouts.
2216 0 : let mut aggregate_progress = SecondaryProgress::default();
2217 0 : let mut aggregate_status: Option<StatusCode> = None;
2218 0 : let mut error: Option<mgmt_api::Error> = None;
2219 0 : while let Some((result, node, tenant_shard_id)) = futs.next().await {
2220 0 : match result {
2221 0 : Err(e) => {
2222 0 : // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
2223 0 : // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
2224 0 : // than they had hoped for.
2225 0 : tracing::warn!("Secondary download error from pageserver {node}: {e}",);
2226 0 : error = Some(e)
2227 : }
2228 0 : Ok((status_code, progress)) => {
2229 0 : tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
2230 0 : aggregate_progress.layers_downloaded += progress.layers_downloaded;
2231 0 : aggregate_progress.layers_total += progress.layers_total;
2232 0 : aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
2233 0 : aggregate_progress.bytes_total += progress.bytes_total;
2234 0 : aggregate_progress.heatmap_mtime =
2235 0 : std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
2236 0 : aggregate_status = match aggregate_status {
2237 0 : None => Some(status_code),
2238 0 : Some(StatusCode::OK) => Some(status_code),
2239 0 : Some(cur) => {
2240 0 : // Other status codes (e.g. 202) -- do not overwrite.
2241 0 : Some(cur)
2242 : }
2243 : };
2244 : }
2245 : }
2246 : }
2247 :
2248 : // If any of the shards return 202, indicate our result as 202.
2249 0 : match aggregate_status {
2250 : None => {
2251 0 : match error {
2252 0 : Some(e) => {
2253 0 : // No successes, and an error: surface it
2254 0 : Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
2255 : }
2256 : None => {
2257 : // No shards found
2258 0 : Err(ApiError::NotFound(
2259 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
2260 0 : ))
2261 : }
2262 : }
2263 : }
2264 0 : Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
2265 : }
2266 0 : }
2267 :
2268 0 : pub(crate) async fn tenant_delete(&self, tenant_id: TenantId) -> Result<StatusCode, ApiError> {
2269 0 : let _tenant_lock = self.tenant_op_locks.exclusive(tenant_id).await;
2270 :
2271 0 : self.ensure_attached_wait(tenant_id).await?;
2272 :
2273 : // TODO: refactor into helper
2274 0 : let targets = {
2275 0 : let locked = self.inner.read().unwrap();
2276 0 : let mut targets = Vec::new();
2277 :
2278 0 : for (tenant_shard_id, shard) in
2279 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2280 0 : {
2281 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2282 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2283 0 : })?;
2284 0 : let node = locked
2285 0 : .nodes
2286 0 : .get(&node_id)
2287 0 : .expect("Pageservers may not be deleted while referenced");
2288 0 :
2289 0 : targets.push((*tenant_shard_id, node.clone()));
2290 : }
2291 0 : targets
2292 0 : };
2293 0 :
2294 0 : // Phase 1: delete on the pageservers
2295 0 : let mut any_pending = false;
2296 0 : for (tenant_shard_id, node) in targets {
2297 0 : let client = PageserverClient::new(
2298 0 : node.get_id(),
2299 0 : node.base_url(),
2300 0 : self.config.jwt_token.as_deref(),
2301 0 : );
2302 : // TODO: this, like many other places, requires proper retry handling for 503, timeout: those should not
2303 : // surface immediately as an error to our caller.
2304 0 : let status = client.tenant_delete(tenant_shard_id).await.map_err(|e| {
2305 0 : ApiError::InternalServerError(anyhow::anyhow!(
2306 0 : "Error deleting shard {tenant_shard_id} on node {node}: {e}",
2307 0 : ))
2308 0 : })?;
2309 0 : tracing::info!(
2310 0 : "Shard {tenant_shard_id} on node {node}, delete returned {}",
2311 0 : status
2312 0 : );
2313 0 : if status == StatusCode::ACCEPTED {
2314 0 : any_pending = true;
2315 0 : }
2316 : }
2317 :
2318 0 : if any_pending {
2319 : // Caller should call us again later. When we eventually see 404s from
2320 : // all the shards, we may proceed to delete our records of the tenant.
2321 0 : tracing::info!(
2322 0 : "Tenant {} has some shards pending deletion, returning 202",
2323 0 : tenant_id
2324 0 : );
2325 0 : return Ok(StatusCode::ACCEPTED);
2326 0 : }
2327 0 :
2328 0 : // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
2329 0 : // our in-memory state and database state.
2330 0 :
2331 0 : // Ordering: we delete persistent state first: if we then
2332 0 : // crash, we will drop the in-memory state.
2333 0 :
2334 0 : // Drop persistent state.
2335 0 : self.persistence.delete_tenant(tenant_id).await?;
2336 :
2337 : // Drop in-memory state
2338 : {
2339 0 : let mut locked = self.inner.write().unwrap();
2340 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
2341 :
2342 : // Dereference Scheduler from shards before dropping them
2343 0 : for (_tenant_shard_id, shard) in
2344 0 : tenants.range_mut(TenantShardId::tenant_range(tenant_id))
2345 0 : {
2346 0 : shard.intent.clear(scheduler);
2347 0 : }
2348 :
2349 0 : tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
2350 0 : tracing::info!(
2351 0 : "Deleted tenant {tenant_id}, now have {} tenants",
2352 0 : locked.tenants.len()
2353 0 : );
2354 : };
2355 :
2356 : // Success is represented as 404, to imitate the existing pageserver deletion API
2357 0 : Ok(StatusCode::NOT_FOUND)
2358 0 : }
2359 :
2360 : /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
2361 : /// for a tenant. The TenantConfig is passed through to pageservers, whereas this function modifies
2362 : /// the tenant's policies (configuration) within the storage controller
2363 0 : pub(crate) async fn tenant_update_policy(
2364 0 : &self,
2365 0 : tenant_id: TenantId,
2366 0 : req: TenantPolicyRequest,
2367 0 : ) -> Result<(), ApiError> {
2368 : // We require an exclusive lock, because we are updating persistent and in-memory state
2369 0 : let _tenant_lock = self.tenant_op_locks.exclusive(tenant_id).await;
2370 :
2371 : let TenantPolicyRequest {
2372 0 : placement,
2373 0 : scheduling,
2374 0 : } = req;
2375 0 :
2376 0 : self.persistence
2377 0 : .update_tenant_shard(
2378 0 : TenantFilter::Tenant(tenant_id),
2379 0 : placement.clone(),
2380 0 : None,
2381 0 : None,
2382 0 : scheduling,
2383 0 : )
2384 0 : .await?;
2385 :
2386 0 : let mut schedule_context = ScheduleContext::default();
2387 0 : let mut locked = self.inner.write().unwrap();
2388 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2389 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2390 0 : if let Some(placement) = &placement {
2391 0 : shard.policy = placement.clone();
2392 0 :
2393 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2394 0 : "Updated placement policy to {placement:?}");
2395 0 : }
2396 :
2397 0 : if let Some(scheduling) = &scheduling {
2398 0 : shard.set_scheduling_policy(*scheduling);
2399 0 :
2400 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2401 0 : "Updated scheduling policy to {scheduling:?}");
2402 0 : }
2403 :
2404 : // In case scheduling is being switched back on, try it now.
2405 0 : shard.schedule(scheduler, &mut schedule_context).ok();
2406 0 : self.maybe_reconcile_shard(shard, nodes);
2407 : }
2408 :
2409 0 : Ok(())
2410 0 : }
2411 :
2412 0 : pub(crate) async fn tenant_timeline_create(
2413 0 : &self,
2414 0 : tenant_id: TenantId,
2415 0 : mut create_req: TimelineCreateRequest,
2416 0 : ) -> Result<TimelineInfo, ApiError> {
2417 0 : tracing::info!(
2418 0 : "Creating timeline {}/{}",
2419 0 : tenant_id,
2420 0 : create_req.new_timeline_id,
2421 0 : );
2422 :
2423 0 : let _tenant_lock = self.tenant_op_locks.shared(tenant_id).await;
2424 :
2425 0 : self.ensure_attached_wait(tenant_id).await?;
2426 :
2427 0 : let mut targets = {
2428 0 : let locked = self.inner.read().unwrap();
2429 0 : let mut targets = Vec::new();
2430 :
2431 0 : for (tenant_shard_id, shard) in
2432 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2433 0 : {
2434 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2435 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2436 0 : })?;
2437 0 : let node = locked
2438 0 : .nodes
2439 0 : .get(&node_id)
2440 0 : .expect("Pageservers may not be deleted while referenced");
2441 0 :
2442 0 : targets.push((*tenant_shard_id, node.clone()));
2443 : }
2444 0 : targets
2445 0 : };
2446 0 :
2447 0 : if targets.is_empty() {
2448 0 : return Err(ApiError::NotFound(
2449 0 : anyhow::anyhow!("Tenant not found").into(),
2450 0 : ));
2451 0 : };
2452 0 : let shard_zero = targets.remove(0);
2453 :
2454 0 : async fn create_one(
2455 0 : tenant_shard_id: TenantShardId,
2456 0 : node: Node,
2457 0 : jwt: Option<String>,
2458 0 : create_req: TimelineCreateRequest,
2459 0 : ) -> Result<TimelineInfo, ApiError> {
2460 0 : tracing::info!(
2461 0 : "Creating timeline on shard {}/{}, attached to node {node}",
2462 0 : tenant_shard_id,
2463 0 : create_req.new_timeline_id,
2464 0 : );
2465 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
2466 0 :
2467 0 : client
2468 0 : .timeline_create(tenant_shard_id, &create_req)
2469 0 : .await
2470 0 : .map_err(|e| match e {
2471 0 : mgmt_api::Error::ApiError(status, msg)
2472 0 : if status == StatusCode::INTERNAL_SERVER_ERROR
2473 0 : || status == StatusCode::NOT_ACCEPTABLE =>
2474 0 : {
2475 0 : // TODO: handle more error codes, e.g. 503 should be passed through. Make a general wrapper
2476 0 : // for pass-through API calls.
2477 0 : ApiError::InternalServerError(anyhow::anyhow!(msg))
2478 : }
2479 0 : _ => ApiError::Conflict(format!("Failed to create timeline: {e}")),
2480 0 : })
2481 0 : }
2482 :
2483 : // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
2484 : // use whatever LSN that shard picked when creating on subsequent shards. We arbitrarily use shard zero as the shard
2485 : // that will get the first creation request, and propagate the LSN to all the >0 shards.
2486 0 : let timeline_info = create_one(
2487 0 : shard_zero.0,
2488 0 : shard_zero.1,
2489 0 : self.config.jwt_token.clone(),
2490 0 : create_req.clone(),
2491 0 : )
2492 0 : .await?;
2493 :
2494 : // Propagate the LSN that shard zero picked, if caller didn't provide one
2495 0 : if create_req.ancestor_timeline_id.is_some() && create_req.ancestor_start_lsn.is_none() {
2496 0 : create_req.ancestor_start_lsn = timeline_info.ancestor_lsn;
2497 0 : }
2498 :
2499 : // Create timeline on remaining shards with number >0
2500 0 : if !targets.is_empty() {
2501 : // If we had multiple shards, issue requests for the remainder now.
2502 0 : let jwt = self.config.jwt_token.clone();
2503 0 : self.tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
2504 0 : let create_req = create_req.clone();
2505 0 : Box::pin(create_one(tenant_shard_id, node, jwt.clone(), create_req))
2506 0 : })
2507 0 : .await?;
2508 0 : }
2509 :
2510 0 : Ok(timeline_info)
2511 0 : }
2512 :
2513 : /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
2514 : ///
2515 : /// On success, the returned vector contains exactly the same number of elements as the input `locations`.
2516 0 : async fn tenant_for_shards<F, R>(
2517 0 : &self,
2518 0 : locations: Vec<(TenantShardId, Node)>,
2519 0 : mut req_fn: F,
2520 0 : ) -> Result<Vec<R>, ApiError>
2521 0 : where
2522 0 : F: FnMut(
2523 0 : TenantShardId,
2524 0 : Node,
2525 0 : )
2526 0 : -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
2527 0 : {
2528 0 : let mut futs = FuturesUnordered::new();
2529 0 : let mut results = Vec::with_capacity(locations.len());
2530 :
2531 0 : for (tenant_shard_id, node) in locations {
2532 0 : futs.push(req_fn(tenant_shard_id, node));
2533 0 : }
2534 :
2535 0 : while let Some(r) = futs.next().await {
2536 0 : results.push(r?);
2537 : }
2538 :
2539 0 : Ok(results)
2540 0 : }
2541 :
2542 0 : pub(crate) async fn tenant_timeline_delete(
2543 0 : &self,
2544 0 : tenant_id: TenantId,
2545 0 : timeline_id: TimelineId,
2546 0 : ) -> Result<StatusCode, ApiError> {
2547 0 : tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
2548 0 : let _tenant_lock = self.tenant_op_locks.shared(tenant_id).await;
2549 :
2550 0 : self.ensure_attached_wait(tenant_id).await?;
2551 :
2552 0 : let mut targets = {
2553 0 : let locked = self.inner.read().unwrap();
2554 0 : let mut targets = Vec::new();
2555 :
2556 0 : for (tenant_shard_id, shard) in
2557 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2558 0 : {
2559 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2560 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2561 0 : })?;
2562 0 : let node = locked
2563 0 : .nodes
2564 0 : .get(&node_id)
2565 0 : .expect("Pageservers may not be deleted while referenced");
2566 0 :
2567 0 : targets.push((*tenant_shard_id, node.clone()));
2568 : }
2569 0 : targets
2570 0 : };
2571 0 :
2572 0 : if targets.is_empty() {
2573 0 : return Err(ApiError::NotFound(
2574 0 : anyhow::anyhow!("Tenant not found").into(),
2575 0 : ));
2576 0 : }
2577 0 : let shard_zero = targets.remove(0);
2578 :
2579 0 : async fn delete_one(
2580 0 : tenant_shard_id: TenantShardId,
2581 0 : timeline_id: TimelineId,
2582 0 : node: Node,
2583 0 : jwt: Option<String>,
2584 0 : ) -> Result<StatusCode, ApiError> {
2585 0 : tracing::info!(
2586 0 : "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
2587 0 : );
2588 :
2589 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
2590 0 : client
2591 0 : .timeline_delete(tenant_shard_id, timeline_id)
2592 0 : .await
2593 0 : .map_err(|e| {
2594 0 : ApiError::InternalServerError(anyhow::anyhow!(
2595 0 : "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
2596 0 : ))
2597 0 : })
2598 0 : }
2599 :
2600 0 : let statuses = self
2601 0 : .tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
2602 0 : Box::pin(delete_one(
2603 0 : tenant_shard_id,
2604 0 : timeline_id,
2605 0 : node,
2606 0 : self.config.jwt_token.clone(),
2607 0 : ))
2608 0 : })
2609 0 : .await?;
2610 :
2611 : // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero
2612 0 : if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
2613 0 : return Ok(StatusCode::ACCEPTED);
2614 0 : }
2615 :
2616 : // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
2617 : // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
2618 0 : let shard_zero_status = delete_one(
2619 0 : shard_zero.0,
2620 0 : timeline_id,
2621 0 : shard_zero.1,
2622 0 : self.config.jwt_token.clone(),
2623 0 : )
2624 0 : .await?;
2625 :
2626 0 : Ok(shard_zero_status)
2627 0 : }
2628 :
2629 : /// When you need to send an HTTP request to the pageserver that holds shard0 of a tenant, this
2630 : /// function looks up and returns node. If the tenant isn't found, returns Err(ApiError::NotFound)
2631 0 : pub(crate) fn tenant_shard0_node(
2632 0 : &self,
2633 0 : tenant_id: TenantId,
2634 0 : ) -> Result<(Node, TenantShardId), ApiError> {
2635 0 : let locked = self.inner.read().unwrap();
2636 0 : let Some((tenant_shard_id, shard)) = locked
2637 0 : .tenants
2638 0 : .range(TenantShardId::tenant_range(tenant_id))
2639 0 : .next()
2640 : else {
2641 0 : return Err(ApiError::NotFound(
2642 0 : anyhow::anyhow!("Tenant {tenant_id} not found").into(),
2643 0 : ));
2644 : };
2645 :
2646 : // TODO: should use the ID last published to compute_hook, rather than the intent: the intent might
2647 : // point to somewhere we haven't attached yet.
2648 0 : let Some(node_id) = shard.intent.get_attached() else {
2649 0 : tracing::warn!(
2650 0 : tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
2651 0 : "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
2652 0 : shard.policy
2653 0 : );
2654 0 : return Err(ApiError::Conflict(
2655 0 : "Cannot call timeline API on non-attached tenant".to_string(),
2656 0 : ));
2657 : };
2658 :
2659 0 : let Some(node) = locked.nodes.get(node_id) else {
2660 : // This should never happen
2661 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2662 0 : "Shard refers to nonexistent node"
2663 0 : )));
2664 : };
2665 :
2666 0 : Ok((node.clone(), *tenant_shard_id))
2667 0 : }
2668 :
2669 0 : pub(crate) fn tenant_locate(
2670 0 : &self,
2671 0 : tenant_id: TenantId,
2672 0 : ) -> Result<TenantLocateResponse, ApiError> {
2673 0 : let locked = self.inner.read().unwrap();
2674 0 : tracing::info!("Locating shards for tenant {tenant_id}");
2675 :
2676 0 : let mut result = Vec::new();
2677 0 : let mut shard_params: Option<ShardParameters> = None;
2678 :
2679 0 : for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2680 : {
2681 0 : let node_id =
2682 0 : shard
2683 0 : .intent
2684 0 : .get_attached()
2685 0 : .ok_or(ApiError::BadRequest(anyhow::anyhow!(
2686 0 : "Cannot locate a tenant that is not attached"
2687 0 : )))?;
2688 :
2689 0 : let node = locked
2690 0 : .nodes
2691 0 : .get(&node_id)
2692 0 : .expect("Pageservers may not be deleted while referenced");
2693 0 :
2694 0 : result.push(node.shard_location(*tenant_shard_id));
2695 0 :
2696 0 : match &shard_params {
2697 0 : None => {
2698 0 : shard_params = Some(ShardParameters {
2699 0 : stripe_size: shard.shard.stripe_size,
2700 0 : count: shard.shard.count,
2701 0 : });
2702 0 : }
2703 0 : Some(params) => {
2704 0 : if params.stripe_size != shard.shard.stripe_size {
2705 : // This should never happen. We enforce at runtime because it's simpler than
2706 : // adding an extra per-tenant data structure to store the things that should be the same
2707 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2708 0 : "Inconsistent shard stripe size parameters!"
2709 0 : )));
2710 0 : }
2711 : }
2712 : }
2713 : }
2714 :
2715 0 : if result.is_empty() {
2716 0 : return Err(ApiError::NotFound(
2717 0 : anyhow::anyhow!("No shards for this tenant ID found").into(),
2718 0 : ));
2719 0 : }
2720 0 : let shard_params = shard_params.expect("result is non-empty, therefore this is set");
2721 0 : tracing::info!(
2722 0 : "Located tenant {} with params {:?} on shards {}",
2723 0 : tenant_id,
2724 0 : shard_params,
2725 0 : result
2726 0 : .iter()
2727 0 : .map(|s| format!("{:?}", s))
2728 0 : .collect::<Vec<_>>()
2729 0 : .join(",")
2730 0 : );
2731 :
2732 0 : Ok(TenantLocateResponse {
2733 0 : shards: result,
2734 0 : shard_params,
2735 0 : })
2736 0 : }
2737 :
2738 : /// Returns None if the input iterator of shards does not include a shard with number=0
2739 0 : fn tenant_describe_impl<'a>(
2740 0 : &self,
2741 0 : shards: impl Iterator<Item = &'a TenantShard>,
2742 0 : ) -> Option<TenantDescribeResponse> {
2743 0 : let mut shard_zero = None;
2744 0 : let mut describe_shards = Vec::new();
2745 :
2746 0 : for shard in shards {
2747 0 : if shard.tenant_shard_id.is_shard_zero() {
2748 0 : shard_zero = Some(shard);
2749 0 : }
2750 :
2751 0 : describe_shards.push(TenantDescribeResponseShard {
2752 0 : tenant_shard_id: shard.tenant_shard_id,
2753 0 : node_attached: *shard.intent.get_attached(),
2754 0 : node_secondary: shard.intent.get_secondary().to_vec(),
2755 0 : last_error: shard.last_error.lock().unwrap().clone(),
2756 0 : is_reconciling: shard.reconciler.is_some(),
2757 0 : is_pending_compute_notification: shard.pending_compute_notification,
2758 0 : is_splitting: matches!(shard.splitting, SplitState::Splitting),
2759 0 : scheduling_policy: *shard.get_scheduling_policy(),
2760 : })
2761 : }
2762 :
2763 0 : let shard_zero = shard_zero?;
2764 :
2765 0 : Some(TenantDescribeResponse {
2766 0 : tenant_id: shard_zero.tenant_shard_id.tenant_id,
2767 0 : shards: describe_shards,
2768 0 : stripe_size: shard_zero.shard.stripe_size,
2769 0 : policy: shard_zero.policy.clone(),
2770 0 : config: shard_zero.config.clone(),
2771 0 : })
2772 0 : }
2773 :
2774 0 : pub(crate) fn tenant_describe(
2775 0 : &self,
2776 0 : tenant_id: TenantId,
2777 0 : ) -> Result<TenantDescribeResponse, ApiError> {
2778 0 : let locked = self.inner.read().unwrap();
2779 0 :
2780 0 : self.tenant_describe_impl(
2781 0 : locked
2782 0 : .tenants
2783 0 : .range(TenantShardId::tenant_range(tenant_id))
2784 0 : .map(|(_k, v)| v),
2785 0 : )
2786 0 : .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
2787 0 : }
2788 :
2789 0 : pub(crate) fn tenant_list(&self) -> Vec<TenantDescribeResponse> {
2790 0 : let locked = self.inner.read().unwrap();
2791 0 :
2792 0 : let mut result = Vec::new();
2793 0 : for (_tenant_id, tenant_shards) in
2794 0 : &locked.tenants.iter().group_by(|(id, _shard)| id.tenant_id)
2795 0 : {
2796 0 : result.push(
2797 0 : self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
2798 0 : .expect("Groups are always non-empty"),
2799 0 : );
2800 0 : }
2801 :
2802 0 : result
2803 0 : }
2804 :
2805 0 : #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
2806 : async fn abort_tenant_shard_split(
2807 : &self,
2808 : op: &TenantShardSplitAbort,
2809 : ) -> Result<(), TenantShardSplitAbortError> {
2810 : // Cleaning up a split:
2811 : // - Parent shards are not destroyed during a split, just detached.
2812 : // - Failed pageserver split API calls can leave the remote node with just the parent attached,
2813 : // just the children attached, or both.
2814 : //
2815 : // Therefore our work to do is to:
2816 : // 1. Clean up storage controller's internal state to just refer to parents, no children
2817 : // 2. Call out to pageservers to ensure that children are detached
2818 : // 3. Call out to pageservers to ensure that parents are attached.
2819 : //
2820 : // Crash safety:
2821 : // - If the storage controller stops running during this cleanup *after* clearing the splitting state
2822 : // from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
2823 : // and detach them.
2824 : // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
2825 : // from our database, then we will re-enter this cleanup routine on startup.
2826 :
2827 : let TenantShardSplitAbort {
2828 : tenant_id,
2829 : new_shard_count,
2830 : new_stripe_size,
2831 : ..
2832 : } = op;
2833 :
2834 : // First abort persistent state, if any exists.
2835 : match self
2836 : .persistence
2837 : .abort_shard_split(*tenant_id, *new_shard_count)
2838 : .await?
2839 : {
2840 : AbortShardSplitStatus::Aborted => {
2841 : // Proceed to roll back any child shards created on pageservers
2842 : }
2843 : AbortShardSplitStatus::Complete => {
2844 : // The split completed (we might hit that path if e.g. our database transaction
2845 : // to write the completion landed in the database, but we dropped connection
2846 : // before seeing the result).
2847 : //
2848 : // We must update in-memory state to reflect the successful split.
2849 : self.tenant_shard_split_commit_inmem(
2850 : *tenant_id,
2851 : *new_shard_count,
2852 : *new_stripe_size,
2853 : );
2854 : return Ok(());
2855 : }
2856 : }
2857 :
2858 : // Clean up in-memory state, and accumulate the list of child locations that need detaching
2859 : let detach_locations: Vec<(Node, TenantShardId)> = {
2860 : let mut detach_locations = Vec::new();
2861 : let mut locked = self.inner.write().unwrap();
2862 : let (nodes, tenants, scheduler) = locked.parts_mut();
2863 :
2864 : for (tenant_shard_id, shard) in
2865 : tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
2866 : {
2867 : if shard.shard.count == op.new_shard_count {
2868 : // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
2869 : // is infallible, so if we got an error we shouldn't have got that far.
2870 0 : tracing::warn!(
2871 0 : "During split abort, child shard {tenant_shard_id} found in-memory"
2872 0 : );
2873 : continue;
2874 : }
2875 :
2876 : // Add the children of this shard to this list of things to detach
2877 : if let Some(node_id) = shard.intent.get_attached() {
2878 : for child_id in tenant_shard_id.split(*new_shard_count) {
2879 : detach_locations.push((
2880 : nodes
2881 : .get(node_id)
2882 : .expect("Intent references nonexistent node")
2883 : .clone(),
2884 : child_id,
2885 : ));
2886 : }
2887 : } else {
2888 0 : tracing::warn!(
2889 0 : "During split abort, shard {tenant_shard_id} has no attached location"
2890 0 : );
2891 : }
2892 :
2893 0 : tracing::info!("Restoring parent shard {tenant_shard_id}");
2894 : shard.splitting = SplitState::Idle;
2895 : if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
2896 : // If this shard can't be scheduled now (perhaps due to offline nodes or
2897 : // capacity issues), that must not prevent us rolling back a split. In this
2898 : // case it should be eventually scheduled in the background.
2899 0 : tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
2900 : }
2901 :
2902 : self.maybe_reconcile_shard(shard, nodes);
2903 : }
2904 :
2905 : // We don't expect any new_shard_count shards to exist here, but drop them just in case
2906 0 : tenants.retain(|_id, s| s.shard.count != *new_shard_count);
2907 :
2908 : detach_locations
2909 : };
2910 :
2911 : for (node, child_id) in detach_locations {
2912 : if !node.is_available() {
2913 : // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
2914 : // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
2915 : // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
2916 : // them from the node.
2917 0 : tracing::warn!("Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated.");
2918 : continue;
2919 : }
2920 :
2921 : // Detach the remote child. If the pageserver split API call is still in progress, this call will get
2922 : // a 503 and retry, up to our limit.
2923 0 : tracing::info!("Detaching {child_id} on {node}...");
2924 : match node
2925 : .with_client_retries(
2926 0 : |client| async move {
2927 0 : let config = LocationConfig {
2928 0 : mode: LocationConfigMode::Detached,
2929 0 : generation: None,
2930 0 : secondary_conf: None,
2931 0 : shard_number: child_id.shard_number.0,
2932 0 : shard_count: child_id.shard_count.literal(),
2933 0 : // Stripe size and tenant config don't matter when detaching
2934 0 : shard_stripe_size: 0,
2935 0 : tenant_conf: TenantConfig::default(),
2936 0 : };
2937 0 :
2938 0 : client.location_config(child_id, config, None, false).await
2939 0 : },
2940 : &self.config.jwt_token,
2941 : 1,
2942 : 10,
2943 : Duration::from_secs(5),
2944 : &self.cancel,
2945 : )
2946 : .await
2947 : {
2948 : Some(Ok(_)) => {}
2949 : Some(Err(e)) => {
2950 : // We failed to communicate with the remote node. This is problematic: we may be
2951 : // leaving it with a rogue child shard.
2952 0 : tracing::warn!(
2953 0 : "Failed to detach child {child_id} from node {node} during abort"
2954 0 : );
2955 : return Err(e.into());
2956 : }
2957 : None => {
2958 : // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
2959 : // clean up on restart. The node going offline requires a retry.
2960 : return Err(TenantShardSplitAbortError::Unavailable);
2961 : }
2962 : };
2963 : }
2964 :
2965 0 : tracing::info!("Successfully aborted split");
2966 : Ok(())
2967 : }
2968 :
2969 : /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
2970 : /// of the tenant map to reflect the child shards that exist after the split.
2971 0 : fn tenant_shard_split_commit_inmem(
2972 0 : &self,
2973 0 : tenant_id: TenantId,
2974 0 : new_shard_count: ShardCount,
2975 0 : new_stripe_size: Option<ShardStripeSize>,
2976 0 : ) -> (
2977 0 : TenantShardSplitResponse,
2978 0 : Vec<(TenantShardId, NodeId, ShardStripeSize)>,
2979 0 : ) {
2980 0 : let mut response = TenantShardSplitResponse {
2981 0 : new_shards: Vec::new(),
2982 0 : };
2983 0 : let mut child_locations = Vec::new();
2984 0 : {
2985 0 : let mut locked = self.inner.write().unwrap();
2986 0 :
2987 0 : let parent_ids = locked
2988 0 : .tenants
2989 0 : .range(TenantShardId::tenant_range(tenant_id))
2990 0 : .map(|(shard_id, _)| *shard_id)
2991 0 : .collect::<Vec<_>>();
2992 0 :
2993 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2994 0 : for parent_id in parent_ids {
2995 0 : let child_ids = parent_id.split(new_shard_count);
2996 :
2997 0 : let (pageserver, generation, policy, parent_ident, config) = {
2998 0 : let mut old_state = tenants
2999 0 : .remove(&parent_id)
3000 0 : .expect("It was present, we just split it");
3001 0 :
3002 0 : // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
3003 0 : // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
3004 0 : // nothing else can clear this.
3005 0 : assert!(matches!(old_state.splitting, SplitState::Splitting));
3006 :
3007 0 : let old_attached = old_state.intent.get_attached().unwrap();
3008 0 : old_state.intent.clear(scheduler);
3009 0 : let generation = old_state.generation.expect("Shard must have been attached");
3010 0 : (
3011 0 : old_attached,
3012 0 : generation,
3013 0 : old_state.policy,
3014 0 : old_state.shard,
3015 0 : old_state.config,
3016 0 : )
3017 0 : };
3018 0 :
3019 0 : let mut schedule_context = ScheduleContext::default();
3020 0 : for child in child_ids {
3021 0 : let mut child_shard = parent_ident;
3022 0 : child_shard.number = child.shard_number;
3023 0 : child_shard.count = child.shard_count;
3024 0 : if let Some(stripe_size) = new_stripe_size {
3025 0 : child_shard.stripe_size = stripe_size;
3026 0 : }
3027 :
3028 0 : let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
3029 0 : child_observed.insert(
3030 0 : pageserver,
3031 0 : ObservedStateLocation {
3032 0 : conf: Some(attached_location_conf(
3033 0 : generation,
3034 0 : &child_shard,
3035 0 : &config,
3036 0 : matches!(policy, PlacementPolicy::Attached(n) if n > 0),
3037 : )),
3038 : },
3039 : );
3040 :
3041 0 : let mut child_state = TenantShard::new(child, child_shard, policy.clone());
3042 0 : child_state.intent = IntentState::single(scheduler, Some(pageserver));
3043 0 : child_state.observed = ObservedState {
3044 0 : locations: child_observed,
3045 0 : };
3046 0 : child_state.generation = Some(generation);
3047 0 : child_state.config = config.clone();
3048 0 :
3049 0 : // The child's TenantShard::splitting is intentionally left at the default value of Idle,
3050 0 : // as at this point in the split process we have succeeded and this part is infallible:
3051 0 : // we will never need to do any special recovery from this state.
3052 0 :
3053 0 : child_locations.push((child, pageserver, child_shard.stripe_size));
3054 :
3055 0 : if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
3056 : // This is not fatal, because we've implicitly already got an attached
3057 : // location for the child shard. Failure here just means we couldn't
3058 : // find a secondary (e.g. because cluster is overloaded).
3059 0 : tracing::warn!("Failed to schedule child shard {child}: {e}");
3060 0 : }
3061 : // In the background, attach secondary locations for the new shards
3062 0 : self.maybe_reconcile_shard(&mut child_state, nodes);
3063 0 :
3064 0 : tenants.insert(child, child_state);
3065 0 : response.new_shards.push(child);
3066 : }
3067 : }
3068 :
3069 0 : (response, child_locations)
3070 0 : }
3071 0 : }
3072 :
3073 0 : pub(crate) async fn tenant_shard_split(
3074 0 : &self,
3075 0 : tenant_id: TenantId,
3076 0 : split_req: TenantShardSplitRequest,
3077 0 : ) -> Result<TenantShardSplitResponse, ApiError> {
3078 : // TODO: return 503 if we get stuck waiting for this lock
3079 : // (issue https://github.com/neondatabase/neon/issues/7108)
3080 0 : let _tenant_lock = self.tenant_op_locks.exclusive(tenant_id).await;
3081 :
3082 0 : let new_shard_count = ShardCount::new(split_req.new_shard_count);
3083 0 : let new_stripe_size = split_req.new_stripe_size;
3084 :
3085 : // Validate the request and construct parameters. This phase is fallible, but does not require
3086 : // rollback on errors, as it does no I/O and mutates no state.
3087 0 : let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
3088 0 : ShardSplitAction::NoOp(resp) => return Ok(resp),
3089 0 : ShardSplitAction::Split(params) => params,
3090 : };
3091 :
3092 : // Execute this split: this phase mutates state and does remote I/O on pageservers. If it fails,
3093 : // we must roll back.
3094 0 : let r = self
3095 0 : .do_tenant_shard_split(tenant_id, shard_split_params)
3096 0 : .await;
3097 :
3098 0 : match r {
3099 0 : Ok(r) => Ok(r),
3100 0 : Err(e) => {
3101 0 : // Split might be part-done, we must do work to abort it.
3102 0 : tracing::warn!("Enqueuing background abort of split on {tenant_id}");
3103 0 : self.abort_tx
3104 0 : .send(TenantShardSplitAbort {
3105 0 : tenant_id,
3106 0 : new_shard_count,
3107 0 : new_stripe_size,
3108 0 : _tenant_lock,
3109 0 : })
3110 0 : // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
3111 0 : .ok();
3112 0 : Err(e)
3113 : }
3114 : }
3115 0 : }
3116 :
3117 0 : fn prepare_tenant_shard_split(
3118 0 : &self,
3119 0 : tenant_id: TenantId,
3120 0 : split_req: TenantShardSplitRequest,
3121 0 : ) -> Result<ShardSplitAction, ApiError> {
3122 0 : fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
3123 0 : anyhow::anyhow!("failpoint")
3124 0 : )));
3125 :
3126 0 : let mut policy = None;
3127 0 : let mut config = None;
3128 0 : let mut shard_ident = None;
3129 : // Validate input, and calculate which shards we will create
3130 0 : let (old_shard_count, targets) =
3131 : {
3132 0 : let locked = self.inner.read().unwrap();
3133 0 :
3134 0 : let pageservers = locked.nodes.clone();
3135 0 :
3136 0 : let mut targets = Vec::new();
3137 0 :
3138 0 : // In case this is a retry, count how many already-split shards we found
3139 0 : let mut children_found = Vec::new();
3140 0 : let mut old_shard_count = None;
3141 :
3142 0 : for (tenant_shard_id, shard) in
3143 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3144 : {
3145 0 : match shard.shard.count.count().cmp(&split_req.new_shard_count) {
3146 : Ordering::Equal => {
3147 : // Already split this
3148 0 : children_found.push(*tenant_shard_id);
3149 0 : continue;
3150 : }
3151 : Ordering::Greater => {
3152 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3153 0 : "Requested count {} but already have shards at count {}",
3154 0 : split_req.new_shard_count,
3155 0 : shard.shard.count.count()
3156 0 : )));
3157 : }
3158 0 : Ordering::Less => {
3159 0 : // Fall through: this shard has lower count than requested,
3160 0 : // is a candidate for splitting.
3161 0 : }
3162 0 : }
3163 0 :
3164 0 : match old_shard_count {
3165 0 : None => old_shard_count = Some(shard.shard.count),
3166 0 : Some(old_shard_count) => {
3167 0 : if old_shard_count != shard.shard.count {
3168 : // We may hit this case if a caller asked for two splits to
3169 : // different sizes, before the first one is complete.
3170 : // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
3171 : // of shard_count=1 and shard_count=2 shards in the map.
3172 0 : return Err(ApiError::Conflict(
3173 0 : "Cannot split, currently mid-split".to_string(),
3174 0 : ));
3175 0 : }
3176 : }
3177 : }
3178 0 : if policy.is_none() {
3179 0 : policy = Some(shard.policy.clone());
3180 0 : }
3181 0 : if shard_ident.is_none() {
3182 0 : shard_ident = Some(shard.shard);
3183 0 : }
3184 0 : if config.is_none() {
3185 0 : config = Some(shard.config.clone());
3186 0 : }
3187 :
3188 0 : if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
3189 0 : tracing::info!(
3190 0 : "Tenant shard {} already has shard count {}",
3191 0 : tenant_shard_id,
3192 0 : split_req.new_shard_count
3193 0 : );
3194 0 : continue;
3195 0 : }
3196 :
3197 0 : let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
3198 0 : anyhow::anyhow!("Cannot split a tenant that is not attached"),
3199 0 : ))?;
3200 :
3201 0 : let node = pageservers
3202 0 : .get(&node_id)
3203 0 : .expect("Pageservers may not be deleted while referenced");
3204 0 :
3205 0 : targets.push(ShardSplitTarget {
3206 0 : parent_id: *tenant_shard_id,
3207 0 : node: node.clone(),
3208 0 : child_ids: tenant_shard_id
3209 0 : .split(ShardCount::new(split_req.new_shard_count)),
3210 0 : });
3211 : }
3212 :
3213 0 : if targets.is_empty() {
3214 0 : if children_found.len() == split_req.new_shard_count as usize {
3215 0 : return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
3216 0 : new_shards: children_found,
3217 0 : }));
3218 : } else {
3219 : // No shards found to split, and no existing children found: the
3220 : // tenant doesn't exist at all.
3221 0 : return Err(ApiError::NotFound(
3222 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
3223 0 : ));
3224 : }
3225 0 : }
3226 0 :
3227 0 : (old_shard_count, targets)
3228 0 : };
3229 0 :
3230 0 : // unwrap safety: we would have returned above if we didn't find at least one shard to split
3231 0 : let old_shard_count = old_shard_count.unwrap();
3232 0 : let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
3233 : // This ShardIdentity will be used as the template for all children, so this implicitly
3234 : // applies the new stripe size to the children.
3235 0 : let mut shard_ident = shard_ident.unwrap();
3236 0 : if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
3237 0 : return Err(ApiError::BadRequest(anyhow::anyhow!("Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards", shard_ident.stripe_size)));
3238 0 : }
3239 0 :
3240 0 : shard_ident.stripe_size = new_stripe_size;
3241 0 : tracing::info!("applied stripe size {}", shard_ident.stripe_size.0);
3242 0 : shard_ident
3243 : } else {
3244 0 : shard_ident.unwrap()
3245 : };
3246 0 : let policy = policy.unwrap();
3247 0 : let config = config.unwrap();
3248 0 :
3249 0 : Ok(ShardSplitAction::Split(ShardSplitParams {
3250 0 : old_shard_count,
3251 0 : new_shard_count: ShardCount::new(split_req.new_shard_count),
3252 0 : new_stripe_size: split_req.new_stripe_size,
3253 0 : targets,
3254 0 : policy,
3255 0 : config,
3256 0 : shard_ident,
3257 0 : }))
3258 0 : }
3259 :
3260 0 : async fn do_tenant_shard_split(
3261 0 : &self,
3262 0 : tenant_id: TenantId,
3263 0 : params: ShardSplitParams,
3264 0 : ) -> Result<TenantShardSplitResponse, ApiError> {
3265 0 : // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
3266 0 : // request could occur here, deleting or mutating the tenant. begin_shard_split checks that the
3267 0 : // parent shards exist as expected, but it would be neater to do the above pre-checks within the
3268 0 : // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
3269 0 : // (https://github.com/neondatabase/neon/issues/6676)
3270 0 :
3271 0 : let ShardSplitParams {
3272 0 : old_shard_count,
3273 0 : new_shard_count,
3274 0 : new_stripe_size,
3275 0 : mut targets,
3276 0 : policy,
3277 0 : config,
3278 0 : shard_ident,
3279 0 : } = params;
3280 :
3281 : // Drop any secondary locations: pageservers do not support splitting these, and in any case the
3282 : // end-state for a split tenant will usually be to have secondary locations on different nodes.
3283 : // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
3284 : // at the time of split.
3285 0 : let waiters = {
3286 0 : let mut locked = self.inner.write().unwrap();
3287 0 : let mut waiters = Vec::new();
3288 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3289 0 : for target in &mut targets {
3290 0 : let Some(shard) = tenants.get_mut(&target.parent_id) else {
3291 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
3292 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3293 0 : "Shard {} not found",
3294 0 : target.parent_id
3295 0 : )));
3296 : };
3297 :
3298 0 : if shard.intent.get_attached() != &Some(target.node.get_id()) {
3299 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
3300 0 : return Err(ApiError::Conflict(format!(
3301 0 : "Shard {} unexpectedly rescheduled during split",
3302 0 : target.parent_id
3303 0 : )));
3304 0 : }
3305 0 :
3306 0 : // Irrespective of PlacementPolicy, clear secondary locations from intent
3307 0 : shard.intent.clear_secondary(scheduler);
3308 :
3309 : // Run Reconciler to execute detach fo secondary locations.
3310 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
3311 0 : waiters.push(waiter);
3312 0 : }
3313 : }
3314 0 : waiters
3315 0 : };
3316 0 : self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
3317 :
3318 : // Before creating any new child shards in memory or on the pageservers, persist them: this
3319 : // enables us to ensure that we will always be able to clean up if something goes wrong. This also
3320 : // acts as the protection against two concurrent attempts to split: one of them will get a database
3321 : // error trying to insert the child shards.
3322 0 : let mut child_tsps = Vec::new();
3323 0 : for target in &targets {
3324 0 : let mut this_child_tsps = Vec::new();
3325 0 : for child in &target.child_ids {
3326 0 : let mut child_shard = shard_ident;
3327 0 : child_shard.number = child.shard_number;
3328 0 : child_shard.count = child.shard_count;
3329 0 :
3330 0 : tracing::info!(
3331 0 : "Create child shard persistence with stripe size {}",
3332 0 : shard_ident.stripe_size.0
3333 0 : );
3334 :
3335 0 : this_child_tsps.push(TenantShardPersistence {
3336 0 : tenant_id: child.tenant_id.to_string(),
3337 0 : shard_number: child.shard_number.0 as i32,
3338 0 : shard_count: child.shard_count.literal() as i32,
3339 0 : shard_stripe_size: shard_ident.stripe_size.0 as i32,
3340 0 : // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
3341 0 : // populate the correct generation as part of its transaction, to protect us
3342 0 : // against racing with changes in the state of the parent.
3343 0 : generation: None,
3344 0 : generation_pageserver: Some(target.node.get_id().0 as i64),
3345 0 : placement_policy: serde_json::to_string(&policy).unwrap(),
3346 0 : config: serde_json::to_string(&config).unwrap(),
3347 0 : splitting: SplitState::Splitting,
3348 0 :
3349 0 : // Scheduling policies do not carry through to children
3350 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
3351 0 : .unwrap(),
3352 0 : });
3353 : }
3354 :
3355 0 : child_tsps.push((target.parent_id, this_child_tsps));
3356 : }
3357 :
3358 0 : if let Err(e) = self
3359 0 : .persistence
3360 0 : .begin_shard_split(old_shard_count, tenant_id, child_tsps)
3361 0 : .await
3362 : {
3363 0 : match e {
3364 : DatabaseError::Query(diesel::result::Error::DatabaseError(
3365 : DatabaseErrorKind::UniqueViolation,
3366 : _,
3367 : )) => {
3368 : // Inserting a child shard violated a unique constraint: we raced with another call to
3369 : // this function
3370 0 : tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
3371 0 : return Err(ApiError::Conflict("Tenant is already splitting".into()));
3372 : }
3373 0 : _ => return Err(ApiError::InternalServerError(e.into())),
3374 : }
3375 0 : }
3376 0 : fail::fail_point!("shard-split-post-begin", |_| Err(
3377 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
3378 0 : ));
3379 :
3380 : // Now that I have persisted the splitting state, apply it in-memory. This is infallible, so
3381 : // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
3382 : // is not set in memory, then it was not persisted.
3383 : {
3384 0 : let mut locked = self.inner.write().unwrap();
3385 0 : for target in &targets {
3386 0 : if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
3387 0 : parent_shard.splitting = SplitState::Splitting;
3388 0 : // Put the observed state to None, to reflect that it is indeterminate once we start the
3389 0 : // split operation.
3390 0 : parent_shard
3391 0 : .observed
3392 0 : .locations
3393 0 : .insert(target.node.get_id(), ObservedStateLocation { conf: None });
3394 0 : }
3395 : }
3396 : }
3397 :
3398 : // TODO: issue split calls concurrently (this only matters once we're splitting
3399 : // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
3400 :
3401 0 : for target in &targets {
3402 : let ShardSplitTarget {
3403 0 : parent_id,
3404 0 : node,
3405 0 : child_ids,
3406 0 : } = target;
3407 0 : let client = PageserverClient::new(
3408 0 : node.get_id(),
3409 0 : node.base_url(),
3410 0 : self.config.jwt_token.as_deref(),
3411 0 : );
3412 0 : let response = client
3413 0 : .tenant_shard_split(
3414 0 : *parent_id,
3415 0 : TenantShardSplitRequest {
3416 0 : new_shard_count: new_shard_count.literal(),
3417 0 : new_stripe_size,
3418 0 : },
3419 0 : )
3420 0 : .await
3421 0 : .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?;
3422 :
3423 0 : fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
3424 0 : "failpoint".to_string()
3425 0 : )));
3426 :
3427 0 : tracing::info!(
3428 0 : "Split {} into {}",
3429 0 : parent_id,
3430 0 : response
3431 0 : .new_shards
3432 0 : .iter()
3433 0 : .map(|s| format!("{:?}", s))
3434 0 : .collect::<Vec<_>>()
3435 0 : .join(",")
3436 0 : );
3437 :
3438 0 : if &response.new_shards != child_ids {
3439 : // This should never happen: the pageserver should agree with us on how shard splits work.
3440 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3441 0 : "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
3442 0 : parent_id,
3443 0 : response.new_shards,
3444 0 : child_ids
3445 0 : )));
3446 0 : }
3447 : }
3448 :
3449 : // TODO: if the pageserver restarted concurrently with our split API call,
3450 : // the actual generation of the child shard might differ from the generation
3451 : // we expect it to have. In order for our in-database generation to end up
3452 : // correct, we should carry the child generation back in the response and apply it here
3453 : // in complete_shard_split (and apply the correct generation in memory)
3454 : // (or, we can carry generation in the request and reject the request if
3455 : // it doesn't match, but that requires more retry logic on this side)
3456 :
3457 0 : self.persistence
3458 0 : .complete_shard_split(tenant_id, old_shard_count)
3459 0 : .await?;
3460 :
3461 0 : fail::fail_point!("shard-split-post-complete", |_| Err(
3462 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
3463 0 : ));
3464 :
3465 : // Replace all the shards we just split with their children: this phase is infallible.
3466 0 : let (response, child_locations) =
3467 0 : self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
3468 0 :
3469 0 : // Send compute notifications for all the new shards
3470 0 : let mut failed_notifications = Vec::new();
3471 0 : for (child_id, child_ps, stripe_size) in child_locations {
3472 0 : if let Err(e) = self
3473 0 : .compute_hook
3474 0 : .notify(child_id, child_ps, stripe_size, &self.cancel)
3475 0 : .await
3476 : {
3477 0 : tracing::warn!("Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
3478 0 : child_id, child_ps);
3479 0 : failed_notifications.push(child_id);
3480 0 : }
3481 : }
3482 :
3483 : // If we failed any compute notifications, make a note to retry later.
3484 0 : if !failed_notifications.is_empty() {
3485 0 : let mut locked = self.inner.write().unwrap();
3486 0 : for failed in failed_notifications {
3487 0 : if let Some(shard) = locked.tenants.get_mut(&failed) {
3488 0 : shard.pending_compute_notification = true;
3489 0 : }
3490 : }
3491 0 : }
3492 :
3493 0 : Ok(response)
3494 0 : }
3495 :
3496 0 : pub(crate) async fn tenant_shard_migrate(
3497 0 : &self,
3498 0 : tenant_shard_id: TenantShardId,
3499 0 : migrate_req: TenantShardMigrateRequest,
3500 0 : ) -> Result<TenantShardMigrateResponse, ApiError> {
3501 0 : let waiter = {
3502 0 : let mut locked = self.inner.write().unwrap();
3503 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3504 :
3505 0 : let Some(node) = nodes.get(&migrate_req.node_id) else {
3506 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3507 0 : "Node {} not found",
3508 0 : migrate_req.node_id
3509 0 : )));
3510 : };
3511 :
3512 0 : if !node.is_available() {
3513 : // Warn but proceed: the caller may intend to manually adjust the placement of
3514 : // a shard even if the node is down, e.g. if intervening during an incident.
3515 0 : tracing::warn!("Migrating to unavailable node {node}");
3516 0 : }
3517 :
3518 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
3519 0 : return Err(ApiError::NotFound(
3520 0 : anyhow::anyhow!("Tenant shard not found").into(),
3521 0 : ));
3522 : };
3523 :
3524 0 : if shard.intent.get_attached() == &Some(migrate_req.node_id) {
3525 : // No-op case: we will still proceed to wait for reconciliation in case it is
3526 : // incomplete from an earlier update to the intent.
3527 0 : tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
3528 : } else {
3529 0 : let old_attached = *shard.intent.get_attached();
3530 0 :
3531 0 : match shard.policy {
3532 0 : PlacementPolicy::Attached(n) => {
3533 0 : // If our new attached node was a secondary, it no longer should be.
3534 0 : shard.intent.remove_secondary(scheduler, migrate_req.node_id);
3535 :
3536 : // If we were already attached to something, demote that to a secondary
3537 0 : if let Some(old_attached) = old_attached {
3538 0 : if n > 0 {
3539 : // Remove other secondaries to make room for the location we'll demote
3540 0 : while shard.intent.get_secondary().len() >= n {
3541 0 : shard.intent.pop_secondary(scheduler);
3542 0 : }
3543 :
3544 0 : shard.intent.push_secondary(scheduler, old_attached);
3545 0 : }
3546 0 : }
3547 :
3548 0 : shard.intent.set_attached(scheduler, Some(migrate_req.node_id));
3549 : }
3550 0 : PlacementPolicy::Secondary => {
3551 0 : shard.intent.clear(scheduler);
3552 0 : shard.intent.push_secondary(scheduler, migrate_req.node_id);
3553 0 : }
3554 : PlacementPolicy::Detached => {
3555 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3556 0 : "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
3557 0 : )))
3558 : }
3559 : }
3560 :
3561 0 : tracing::info!("Migrating: new intent {:?}", shard.intent);
3562 0 : shard.sequence = shard.sequence.next();
3563 : }
3564 :
3565 0 : self.maybe_reconcile_shard(shard, nodes)
3566 : };
3567 :
3568 0 : if let Some(waiter) = waiter {
3569 0 : waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
3570 : } else {
3571 0 : tracing::info!("Migration is a no-op");
3572 : }
3573 :
3574 0 : Ok(TenantShardMigrateResponse {})
3575 0 : }
3576 :
3577 : /// This is for debug/support only: we simply drop all state for a tenant, without
3578 : /// detaching or deleting it on pageservers.
3579 0 : pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
3580 0 : self.persistence.delete_tenant(tenant_id).await?;
3581 :
3582 0 : let mut locked = self.inner.write().unwrap();
3583 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
3584 0 : let mut shards = Vec::new();
3585 0 : for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
3586 0 : shards.push(*tenant_shard_id);
3587 0 : }
3588 :
3589 0 : for shard_id in shards {
3590 0 : if let Some(mut shard) = tenants.remove(&shard_id) {
3591 0 : shard.intent.clear(scheduler);
3592 0 : }
3593 : }
3594 :
3595 0 : Ok(())
3596 0 : }
3597 :
3598 : /// For debug/support: a full JSON dump of TenantShards. Returns a response so that
3599 : /// we don't have to make TenantShard clonable in the return path.
3600 0 : pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
3601 0 : let serialized = {
3602 0 : let locked = self.inner.read().unwrap();
3603 0 : let result = locked.tenants.values().collect::<Vec<_>>();
3604 0 : serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
3605 : };
3606 :
3607 0 : hyper::Response::builder()
3608 0 : .status(hyper::StatusCode::OK)
3609 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
3610 0 : .body(hyper::Body::from(serialized))
3611 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
3612 0 : }
3613 :
3614 : /// Check the consistency of in-memory state vs. persistent state, and check that the
3615 : /// scheduler's statistics are up to date.
3616 : ///
3617 : /// These consistency checks expect an **idle** system. If changes are going on while
3618 : /// we run, then we can falsely indicate a consistency issue. This is sufficient for end-of-test
3619 : /// checks, but not suitable for running continuously in the background in the field.
3620 0 : pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
3621 0 : let (mut expect_nodes, mut expect_shards) = {
3622 0 : let locked = self.inner.read().unwrap();
3623 0 :
3624 0 : locked
3625 0 : .scheduler
3626 0 : .consistency_check(locked.nodes.values(), locked.tenants.values())
3627 0 : .context("Scheduler checks")
3628 0 : .map_err(ApiError::InternalServerError)?;
3629 :
3630 0 : let expect_nodes = locked
3631 0 : .nodes
3632 0 : .values()
3633 0 : .map(|n| n.to_persistent())
3634 0 : .collect::<Vec<_>>();
3635 0 :
3636 0 : let expect_shards = locked
3637 0 : .tenants
3638 0 : .values()
3639 0 : .map(|t| t.to_persistent())
3640 0 : .collect::<Vec<_>>();
3641 :
3642 : // This method can only validate the state of an idle system: if a reconcile is in
3643 : // progress, fail out early to avoid giving false errors on state that won't match
3644 : // between database and memory under a ReconcileResult is processed.
3645 0 : for t in locked.tenants.values() {
3646 0 : if t.reconciler.is_some() {
3647 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3648 0 : "Shard {} reconciliation in progress",
3649 0 : t.tenant_shard_id
3650 0 : )));
3651 0 : }
3652 : }
3653 :
3654 0 : (expect_nodes, expect_shards)
3655 : };
3656 :
3657 0 : let mut nodes = self.persistence.list_nodes().await?;
3658 0 : expect_nodes.sort_by_key(|n| n.node_id);
3659 0 : nodes.sort_by_key(|n| n.node_id);
3660 0 :
3661 0 : if nodes != expect_nodes {
3662 0 : tracing::error!("Consistency check failed on nodes.");
3663 0 : tracing::error!(
3664 0 : "Nodes in memory: {}",
3665 0 : serde_json::to_string(&expect_nodes)
3666 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
3667 0 : );
3668 0 : tracing::error!(
3669 0 : "Nodes in database: {}",
3670 0 : serde_json::to_string(&nodes)
3671 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
3672 0 : );
3673 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3674 0 : "Node consistency failure"
3675 0 : )));
3676 0 : }
3677 :
3678 0 : let mut shards = self.persistence.list_tenant_shards().await?;
3679 0 : shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
3680 0 : expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
3681 0 :
3682 0 : if shards != expect_shards {
3683 0 : tracing::error!("Consistency check failed on shards.");
3684 0 : tracing::error!(
3685 0 : "Shards in memory: {}",
3686 0 : serde_json::to_string(&expect_shards)
3687 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
3688 0 : );
3689 0 : tracing::error!(
3690 0 : "Shards in database: {}",
3691 0 : serde_json::to_string(&shards)
3692 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
3693 0 : );
3694 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3695 0 : "Shard consistency failure"
3696 0 : )));
3697 0 : }
3698 0 :
3699 0 : Ok(())
3700 0 : }
3701 :
3702 : /// For debug/support: a JSON dump of the [`Scheduler`]. Returns a response so that
3703 : /// we don't have to make TenantShard clonable in the return path.
3704 0 : pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
3705 0 : let serialized = {
3706 0 : let locked = self.inner.read().unwrap();
3707 0 : serde_json::to_string(&locked.scheduler)
3708 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
3709 : };
3710 :
3711 0 : hyper::Response::builder()
3712 0 : .status(hyper::StatusCode::OK)
3713 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
3714 0 : .body(hyper::Body::from(serialized))
3715 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
3716 0 : }
3717 :
3718 : /// This is for debug/support only: we simply drop all state for a tenant, without
3719 : /// detaching or deleting it on pageservers. We do not try and re-schedule any
3720 : /// tenants that were on this node.
3721 : ///
3722 : /// TODO: proper node deletion API that unhooks things more gracefully
3723 0 : pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
3724 0 : self.persistence.delete_node(node_id).await?;
3725 :
3726 0 : let mut locked = self.inner.write().unwrap();
3727 :
3728 0 : for shard in locked.tenants.values_mut() {
3729 0 : shard.deref_node(node_id);
3730 0 : }
3731 :
3732 0 : let mut nodes = (*locked.nodes).clone();
3733 0 : nodes.remove(&node_id);
3734 0 : locked.nodes = Arc::new(nodes);
3735 0 :
3736 0 : locked.scheduler.node_remove(node_id);
3737 0 :
3738 0 : Ok(())
3739 0 : }
3740 :
3741 0 : pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
3742 0 : let nodes = {
3743 0 : self.inner
3744 0 : .read()
3745 0 : .unwrap()
3746 0 : .nodes
3747 0 : .values()
3748 0 : .cloned()
3749 0 : .collect::<Vec<_>>()
3750 0 : };
3751 0 :
3752 0 : Ok(nodes)
3753 0 : }
3754 :
3755 0 : pub(crate) async fn node_register(
3756 0 : &self,
3757 0 : register_req: NodeRegisterRequest,
3758 0 : ) -> Result<(), ApiError> {
3759 0 : let _node_lock = self.node_op_locks.exclusive(register_req.node_id).await;
3760 :
3761 : // Pre-check for an already-existing node
3762 : {
3763 0 : let locked = self.inner.read().unwrap();
3764 0 : if let Some(node) = locked.nodes.get(®ister_req.node_id) {
3765 : // Note that we do not do a total equality of the struct, because we don't require
3766 : // the availability/scheduling states to agree for a POST to be idempotent.
3767 0 : if node.registration_match(®ister_req) {
3768 0 : tracing::info!(
3769 0 : "Node {} re-registered with matching address",
3770 0 : register_req.node_id
3771 0 : );
3772 0 : return Ok(());
3773 : } else {
3774 : // TODO: decide if we want to allow modifying node addresses without removing and re-adding
3775 : // the node. Safest/simplest thing is to refuse it, and usually we deploy with
3776 : // a fixed address through the lifetime of a node.
3777 0 : tracing::warn!(
3778 0 : "Node {} tried to register with different address",
3779 0 : register_req.node_id
3780 0 : );
3781 0 : return Err(ApiError::Conflict(
3782 0 : "Node is already registered with different address".to_string(),
3783 0 : ));
3784 : }
3785 0 : }
3786 0 : }
3787 0 :
3788 0 : // We do not require that a node is actually online when registered (it will start life
3789 0 : // with it's availability set to Offline), but we _do_ require that its DNS record exists. We're
3790 0 : // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
3791 0 : // that register themselves with a broken DNS config. We check only the HTTP hostname, because
3792 0 : // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
3793 0 : if tokio::net::lookup_host(format!(
3794 0 : "{}:{}",
3795 0 : register_req.listen_http_addr, register_req.listen_http_port
3796 0 : ))
3797 0 : .await
3798 0 : .is_err()
3799 : {
3800 : // If we have a transient DNS issue, it's up to the caller to retry their registration. Because
3801 : // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
3802 : // we return a soft 503 error, to encourage callers to retry past transient issues.
3803 0 : return Err(ApiError::ResourceUnavailable(
3804 0 : format!(
3805 0 : "Node {} tried to register with unknown DNS name '{}'",
3806 0 : register_req.node_id, register_req.listen_http_addr
3807 0 : )
3808 0 : .into(),
3809 0 : ));
3810 0 : }
3811 0 :
3812 0 : // Ordering: we must persist the new node _before_ adding it to in-memory state.
3813 0 : // This ensures that before we use it for anything or expose it via any external
3814 0 : // API, it is guaranteed to be available after a restart.
3815 0 : let new_node = Node::new(
3816 0 : register_req.node_id,
3817 0 : register_req.listen_http_addr,
3818 0 : register_req.listen_http_port,
3819 0 : register_req.listen_pg_addr,
3820 0 : register_req.listen_pg_port,
3821 0 : );
3822 0 :
3823 0 : // TODO: idempotency if the node already exists in the database
3824 0 : self.persistence.insert_node(&new_node).await?;
3825 :
3826 0 : let mut locked = self.inner.write().unwrap();
3827 0 : let mut new_nodes = (*locked.nodes).clone();
3828 0 :
3829 0 : locked.scheduler.node_upsert(&new_node);
3830 0 : new_nodes.insert(register_req.node_id, new_node);
3831 0 :
3832 0 : locked.nodes = Arc::new(new_nodes);
3833 0 :
3834 0 : tracing::info!(
3835 0 : "Registered pageserver {}, now have {} pageservers",
3836 0 : register_req.node_id,
3837 0 : locked.nodes.len()
3838 0 : );
3839 0 : Ok(())
3840 0 : }
3841 :
3842 0 : pub(crate) async fn node_configure(
3843 0 : &self,
3844 0 : node_id: NodeId,
3845 0 : availability: Option<NodeAvailability>,
3846 0 : scheduling: Option<NodeSchedulingPolicy>,
3847 0 : ) -> Result<(), ApiError> {
3848 0 : let _node_lock = self.node_op_locks.exclusive(node_id).await;
3849 :
3850 0 : if let Some(scheduling) = scheduling {
3851 : // Scheduling is a persistent part of Node: we must write updates to the database before
3852 : // applying them in memory
3853 0 : self.persistence.update_node(node_id, scheduling).await?;
3854 0 : }
3855 :
3856 : // If we're activating a node, then before setting it active we must reconcile any shard locations
3857 : // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
3858 : // by calling [`Self::node_activate_reconcile`]
3859 : //
3860 : // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
3861 : // nothing else can mutate its availability while we run.
3862 0 : let availability_transition = if let Some(input_availability) = availability {
3863 0 : let (activate_node, availability_transition) = {
3864 0 : let locked = self.inner.read().unwrap();
3865 0 : let Some(node) = locked.nodes.get(&node_id) else {
3866 0 : return Err(ApiError::NotFound(
3867 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
3868 0 : ));
3869 : };
3870 :
3871 0 : (
3872 0 : node.clone(),
3873 0 : node.get_availability_transition(input_availability),
3874 0 : )
3875 : };
3876 :
3877 0 : if matches!(availability_transition, AvailabilityTransition::ToActive) {
3878 0 : self.node_activate_reconcile(activate_node, &_node_lock)
3879 0 : .await?;
3880 0 : }
3881 0 : availability_transition
3882 : } else {
3883 0 : AvailabilityTransition::Unchanged
3884 : };
3885 :
3886 : // Apply changes from the request to our in-memory state for the Node
3887 0 : let mut locked = self.inner.write().unwrap();
3888 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3889 0 :
3890 0 : let mut new_nodes = (**nodes).clone();
3891 :
3892 0 : let Some(node) = new_nodes.get_mut(&node_id) else {
3893 0 : return Err(ApiError::NotFound(
3894 0 : anyhow::anyhow!("Node not registered").into(),
3895 0 : ));
3896 : };
3897 :
3898 0 : if let Some(availability) = &availability {
3899 0 : node.set_availability(*availability);
3900 0 : }
3901 :
3902 0 : if let Some(scheduling) = scheduling {
3903 0 : node.set_scheduling(scheduling);
3904 0 :
3905 0 : // TODO: once we have a background scheduling ticker for fill/drain, kick it
3906 0 : // to wake up and start working.
3907 0 : }
3908 :
3909 : // Update the scheduler, in case the elegibility of the node for new shards has changed
3910 0 : scheduler.node_upsert(node);
3911 0 :
3912 0 : let new_nodes = Arc::new(new_nodes);
3913 0 :
3914 0 : // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
3915 0 : match availability_transition {
3916 : AvailabilityTransition::ToOffline => {
3917 0 : tracing::info!("Node {} transition to offline", node_id);
3918 0 : let mut tenants_affected: usize = 0;
3919 :
3920 0 : for (tenant_shard_id, tenant_shard) in tenants {
3921 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
3922 0 : // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
3923 0 : // not assume our knowledge of the node's configuration is accurate until it comes back online
3924 0 : observed_loc.conf = None;
3925 0 : }
3926 :
3927 0 : if new_nodes.len() == 1 {
3928 : // Special case for single-node cluster: there is no point trying to reschedule
3929 : // any tenant shards: avoid doing so, in order to avoid spewing warnings about
3930 : // failures to schedule them.
3931 0 : continue;
3932 0 : }
3933 0 :
3934 0 : if tenant_shard.intent.demote_attached(node_id) {
3935 0 : tenant_shard.sequence = tenant_shard.sequence.next();
3936 0 :
3937 0 : // TODO: populate a ScheduleContext including all shards in the same tenant_id (only matters
3938 0 : // for tenants without secondary locations: if they have a secondary location, then this
3939 0 : // schedule() call is just promoting an existing secondary)
3940 0 : let mut schedule_context = ScheduleContext::default();
3941 0 :
3942 0 : match tenant_shard.schedule(scheduler, &mut schedule_context) {
3943 0 : Err(e) => {
3944 0 : // It is possible that some tenants will become unschedulable when too many pageservers
3945 0 : // go offline: in this case there isn't much we can do other than make the issue observable.
3946 0 : // TODO: give TenantShard a scheduling error attribute to be queried later.
3947 0 : tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
3948 : }
3949 : Ok(()) => {
3950 0 : if self
3951 0 : .maybe_reconcile_shard(tenant_shard, &new_nodes)
3952 0 : .is_some()
3953 0 : {
3954 0 : tenants_affected += 1;
3955 0 : };
3956 : }
3957 : }
3958 0 : }
3959 : }
3960 0 : tracing::info!(
3961 0 : "Launched {} reconciler tasks for tenants affected by node {} going offline",
3962 0 : tenants_affected,
3963 0 : node_id
3964 0 : )
3965 : }
3966 : AvailabilityTransition::ToActive => {
3967 0 : tracing::info!("Node {} transition to active", node_id);
3968 : // When a node comes back online, we must reconcile any tenant that has a None observed
3969 : // location on the node.
3970 0 : for tenant_shard in locked.tenants.values_mut() {
3971 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
3972 0 : if observed_loc.conf.is_none() {
3973 0 : self.maybe_reconcile_shard(tenant_shard, &new_nodes);
3974 0 : }
3975 0 : }
3976 : }
3977 :
3978 : // TODO: in the background, we should balance work back onto this pageserver
3979 : }
3980 : AvailabilityTransition::Unchanged => {
3981 0 : tracing::info!("Node {} no change during config", node_id);
3982 : }
3983 : }
3984 :
3985 0 : locked.nodes = new_nodes;
3986 0 :
3987 0 : Ok(())
3988 0 : }
3989 :
3990 : /// Helper for methods that will try and call pageserver APIs for
3991 : /// a tenant, such as timeline CRUD: they cannot proceed unless the tenant
3992 : /// is attached somewhere.
3993 0 : fn ensure_attached_schedule(
3994 0 : &self,
3995 0 : mut locked: std::sync::RwLockWriteGuard<'_, ServiceState>,
3996 0 : tenant_id: TenantId,
3997 0 : ) -> Result<Vec<ReconcilerWaiter>, anyhow::Error> {
3998 0 : let mut waiters = Vec::new();
3999 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4000 0 :
4001 0 : let mut schedule_context = ScheduleContext::default();
4002 0 : for (tenant_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
4003 0 : shard.schedule(scheduler, &mut schedule_context)?;
4004 :
4005 : // The shard's policies may not result in an attached location being scheduled: this
4006 : // is an error because our caller needs it attached somewhere.
4007 0 : if shard.intent.get_attached().is_none() {
4008 0 : return Err(anyhow::anyhow!(
4009 0 : "Tenant {tenant_id} not scheduled to be attached"
4010 0 : ));
4011 0 : };
4012 0 :
4013 0 : if shard.stably_attached().is_some() {
4014 : // We do not require the shard to be totally up to date on reconciliation: we just require
4015 : // that it has been attached on the intended node. Other dirty state such as unattached secondary
4016 : // locations, or compute hook notifications can be ignored.
4017 0 : continue;
4018 0 : }
4019 :
4020 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
4021 0 : tracing::info!("Waiting for shard {tenant_shard_id} to reconcile, in order to ensure it is attached");
4022 0 : waiters.push(waiter);
4023 0 : }
4024 : }
4025 0 : Ok(waiters)
4026 0 : }
4027 :
4028 0 : async fn ensure_attached_wait(&self, tenant_id: TenantId) -> Result<(), ApiError> {
4029 0 : let ensure_waiters = {
4030 0 : let locked = self.inner.write().unwrap();
4031 :
4032 : // Check if the tenant is splitting: in this case, even if it is attached,
4033 : // we must act as if it is not: this blocks e.g. timeline creation/deletion
4034 : // operations during the split.
4035 0 : for (_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
4036 0 : if !matches!(shard.splitting, SplitState::Idle) {
4037 0 : return Err(ApiError::ResourceUnavailable(
4038 0 : "Tenant shards are currently splitting".into(),
4039 0 : ));
4040 0 : }
4041 : }
4042 :
4043 0 : self.ensure_attached_schedule(locked, tenant_id)
4044 0 : .map_err(ApiError::InternalServerError)?
4045 : };
4046 :
4047 0 : let deadline = Instant::now().checked_add(Duration::from_secs(5)).unwrap();
4048 0 : for waiter in ensure_waiters {
4049 0 : let timeout = deadline.duration_since(Instant::now());
4050 0 : waiter.wait_timeout(timeout).await?;
4051 : }
4052 :
4053 0 : Ok(())
4054 0 : }
4055 :
4056 : /// Convenience wrapper around [`TenantShard::maybe_reconcile`] that provides
4057 : /// all the references to parts of Self that are needed
4058 0 : fn maybe_reconcile_shard(
4059 0 : &self,
4060 0 : shard: &mut TenantShard,
4061 0 : nodes: &Arc<HashMap<NodeId, Node>>,
4062 0 : ) -> Option<ReconcilerWaiter> {
4063 0 : shard.maybe_reconcile(
4064 0 : &self.result_tx,
4065 0 : nodes,
4066 0 : &self.compute_hook,
4067 0 : &self.config,
4068 0 : &self.persistence,
4069 0 : &self.gate,
4070 0 : &self.cancel,
4071 0 : )
4072 0 : }
4073 :
4074 : /// Check all tenants for pending reconciliation work, and reconcile those in need.
4075 : /// Additionally, reschedule tenants that require it.
4076 : ///
4077 : /// Returns how many reconciliation tasks were started
4078 0 : fn reconcile_all(&self) -> usize {
4079 0 : let mut locked = self.inner.write().unwrap();
4080 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
4081 0 : let pageservers = nodes.clone();
4082 0 :
4083 0 : let mut schedule_context = ScheduleContext::default();
4084 0 :
4085 0 : let mut reconciles_spawned = 0;
4086 0 : for (tenant_shard_id, shard) in tenants.iter_mut() {
4087 0 : if tenant_shard_id.is_shard_zero() {
4088 0 : schedule_context = ScheduleContext::default();
4089 0 : }
4090 :
4091 : // Eventual consistency: if an earlier reconcile job failed, and the shard is still
4092 : // dirty, spawn another rone
4093 0 : if self.maybe_reconcile_shard(shard, &pageservers).is_some() {
4094 0 : reconciles_spawned += 1;
4095 0 : }
4096 :
4097 0 : schedule_context.avoid(&shard.intent.all_pageservers());
4098 : }
4099 :
4100 0 : reconciles_spawned
4101 0 : }
4102 :
4103 : /// `optimize` in this context means identifying shards which have valid scheduled locations, but
4104 : /// could be scheduled somewhere better:
4105 : /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
4106 : /// * e.g. after a node fails then recovers, to move some work back to it
4107 : /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
4108 : /// * e.g. after a shard split, the initial attached locations will all be on the node where
4109 : /// we did the split, but are probably better placed elsewhere.
4110 : /// - Creating new secondary locations if it improves the spreading of a sharded tenant
4111 : /// * e.g. after a shard split, some locations will be on the same node (where the split
4112 : /// happened), and will probably be better placed elsewhere.
4113 : ///
4114 : /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
4115 : /// the time of scheduling, this function looks for cases where a better-scoring location is available
4116 : /// according to those same soft constraints.
4117 0 : fn optimize_all(&self) -> usize {
4118 0 : let mut locked = self.inner.write().unwrap();
4119 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4120 0 : let pageservers = nodes.clone();
4121 0 :
4122 0 : let mut schedule_context = ScheduleContext::default();
4123 0 :
4124 0 : let mut reconciles_spawned = 0;
4125 0 :
4126 0 : let mut tenant_shards: Vec<&TenantShard> = Vec::new();
4127 0 :
4128 0 : // Limit on how many shards' optmizations each call to this function will execute. Combined
4129 0 : // with the frequency of background calls, this acts as an implicit rate limit that runs a small
4130 0 : // trickle of optimizations in the background, rather than executing a large number in parallel
4131 0 : // when a change occurs.
4132 0 : const MAX_OPTIMIZATIONS_PER_PASS: usize = 2;
4133 0 :
4134 0 : let mut work = Vec::new();
4135 :
4136 0 : for (tenant_shard_id, shard) in tenants.iter() {
4137 0 : if tenant_shard_id.is_shard_zero() {
4138 0 : // Reset accumulators on the first shard in a tenant
4139 0 : schedule_context = ScheduleContext::default();
4140 0 : schedule_context.mode = ScheduleMode::Speculative;
4141 0 : tenant_shards.clear();
4142 0 : }
4143 :
4144 0 : if work.len() >= MAX_OPTIMIZATIONS_PER_PASS {
4145 0 : break;
4146 0 : }
4147 0 :
4148 0 : match shard.get_scheduling_policy() {
4149 0 : ShardSchedulingPolicy::Active => {
4150 0 : // Ok to do optimization
4151 0 : }
4152 : ShardSchedulingPolicy::Essential
4153 : | ShardSchedulingPolicy::Pause
4154 : | ShardSchedulingPolicy::Stop => {
4155 : // Policy prevents optimizing this shard.
4156 0 : continue;
4157 : }
4158 : }
4159 :
4160 : // Accumulate the schedule context for all the shards in a tenant: we must have
4161 : // the total view of all shards before we can try to optimize any of them.
4162 0 : schedule_context.avoid(&shard.intent.all_pageservers());
4163 0 : if let Some(attached) = shard.intent.get_attached() {
4164 0 : schedule_context.push_attached(*attached);
4165 0 : }
4166 0 : tenant_shards.push(shard);
4167 0 :
4168 0 : // Once we have seen the last shard in the tenant, proceed to search across all shards
4169 0 : // in the tenant for optimizations
4170 0 : if shard.shard.number.0 == shard.shard.count.count() - 1 {
4171 0 : if tenant_shards.iter().any(|s| s.reconciler.is_some()) {
4172 : // Do not start any optimizations while another change to the tenant is ongoing: this
4173 : // is not necessary for correctness, but simplifies operations and implicitly throttles
4174 : // optimization changes to happen in a "trickle" over time.
4175 0 : continue;
4176 0 : }
4177 0 :
4178 0 : if tenant_shards.iter().any(|s| {
4179 0 : !matches!(s.splitting, SplitState::Idle)
4180 0 : || matches!(s.policy, PlacementPolicy::Detached)
4181 0 : }) {
4182 : // Never attempt to optimize a tenant that is currently being split, or
4183 : // a tenant that is meant to be detached
4184 0 : continue;
4185 0 : }
4186 :
4187 : // TODO: optimization calculations are relatively expensive: create some fast-path for
4188 : // the common idle case (avoiding the search on tenants that we have recently checked)
4189 :
4190 0 : for shard in &tenant_shards {
4191 0 : if let Some(optimization) =
4192 : // If idle, maybe ptimize attachments: if a shard has a secondary location that is preferable to
4193 : // its primary location based on soft constraints, cut it over.
4194 0 : shard.optimize_attachment(nodes, &schedule_context)
4195 : {
4196 0 : work.push((shard.tenant_shard_id, optimization));
4197 0 : break;
4198 0 : } else if let Some(optimization) =
4199 : // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
4200 : // better placed on another node, based on ScheduleContext, then adjust it. This
4201 : // covers cases like after a shard split, where we might have too many shards
4202 : // in the same tenant with secondary locations on the node where they originally split.
4203 0 : shard.optimize_secondary(scheduler, &schedule_context)
4204 : {
4205 0 : work.push((shard.tenant_shard_id, optimization));
4206 0 : break;
4207 0 : }
4208 :
4209 : // TODO: extend this mechanism to prefer attaching on nodes with fewer attached
4210 : // tenants (i.e. extend schedule state to distinguish attached from secondary counts),
4211 : // for the total number of attachments on a node (not just within a tenant.)
4212 : }
4213 0 : }
4214 : }
4215 :
4216 0 : for (tenant_shard_id, optimization) in work {
4217 0 : let shard = tenants
4218 0 : .get_mut(&tenant_shard_id)
4219 0 : .expect("We held lock from place we got this ID");
4220 0 : shard.apply_optimization(scheduler, optimization);
4221 0 :
4222 0 : if self.maybe_reconcile_shard(shard, &pageservers).is_some() {
4223 0 : reconciles_spawned += 1;
4224 0 : }
4225 : }
4226 :
4227 0 : reconciles_spawned
4228 0 : }
4229 :
4230 : /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
4231 : /// also wait for any generated Reconcilers to complete. Calling this until it returns zero should
4232 : /// put the system into a quiescent state where future background reconciliations won't do anything.
4233 0 : pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
4234 0 : let reconciles_spawned = self.reconcile_all();
4235 0 : if reconciles_spawned == 0 {
4236 0 : // Only optimize when we are otherwise idle
4237 0 : self.optimize_all();
4238 0 : }
4239 :
4240 0 : let waiters = {
4241 0 : let mut waiters = Vec::new();
4242 0 : let locked = self.inner.read().unwrap();
4243 0 : for (_tenant_shard_id, shard) in locked.tenants.iter() {
4244 0 : if let Some(waiter) = shard.get_waiter() {
4245 0 : waiters.push(waiter);
4246 0 : }
4247 : }
4248 0 : waiters
4249 0 : };
4250 0 :
4251 0 : let waiter_count = waiters.len();
4252 0 : self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
4253 0 : Ok(waiter_count)
4254 0 : }
4255 :
4256 0 : pub async fn shutdown(&self) {
4257 0 : // Note that this already stops processing any results from reconciles: so
4258 0 : // we do not expect that our [`TenantShard`] objects will reach a neat
4259 0 : // final state.
4260 0 : self.cancel.cancel();
4261 0 :
4262 0 : // The cancellation tokens in [`crate::reconciler::Reconciler`] are children
4263 0 : // of our cancellation token, so we do not need to explicitly cancel each of
4264 0 : // them.
4265 0 :
4266 0 : // Background tasks and reconcilers hold gate guards: this waits for them all
4267 0 : // to complete.
4268 0 : self.gate.close().await;
4269 0 : }
4270 : }
|