Line data Source code
1 : use std::{
2 : borrow::Cow,
3 : cmp::Ordering,
4 : collections::{BTreeMap, HashMap, HashSet},
5 : path::PathBuf,
6 : str::FromStr,
7 : sync::Arc,
8 : time::{Duration, Instant},
9 : };
10 :
11 : use crate::{
12 : background_node_operations::{
13 : Drain, Fill, Operation, OperationError, OperationHandler, MAX_RECONCILES_PER_OPERATION,
14 : },
15 : compute_hook::NotifyError,
16 : id_lock_map::{trace_exclusive_lock, trace_shared_lock, IdLockMap, TracingExclusiveGuard},
17 : persistence::{AbortShardSplitStatus, TenantFilter},
18 : reconciler::{ReconcileError, ReconcileUnits},
19 : scheduler::{MaySchedule, ScheduleContext, ScheduleMode},
20 : tenant_shard::{
21 : MigrateAttachment, ReconcileNeeded, ReconcilerStatus, ScheduleOptimization,
22 : ScheduleOptimizationAction,
23 : },
24 : };
25 : use anyhow::Context;
26 : use control_plane::storage_controller::{
27 : AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
28 : };
29 : use diesel::result::DatabaseErrorKind;
30 : use futures::{stream::FuturesUnordered, StreamExt};
31 : use itertools::Itertools;
32 : use pageserver_api::{
33 : controller_api::{
34 : NodeAvailability, NodeRegisterRequest, NodeSchedulingPolicy, PlacementPolicy,
35 : ShardSchedulingPolicy, TenantCreateResponse, TenantCreateResponseShard,
36 : TenantDescribeResponse, TenantDescribeResponseShard, TenantLocateResponse,
37 : TenantPolicyRequest, TenantShardMigrateRequest, TenantShardMigrateResponse,
38 : UtilizationScore,
39 : },
40 : models::{SecondaryProgress, TenantConfigRequest, TopTenantShardsRequest},
41 : };
42 : use reqwest::StatusCode;
43 : use tracing::{instrument, Instrument};
44 :
45 : use crate::pageserver_client::PageserverClient;
46 : use pageserver_api::{
47 : models::{
48 : self, LocationConfig, LocationConfigListResponse, LocationConfigMode,
49 : PageserverUtilization, ShardParameters, TenantConfig, TenantCreateRequest,
50 : TenantLocationConfigRequest, TenantLocationConfigResponse, TenantShardLocation,
51 : TenantShardSplitRequest, TenantShardSplitResponse, TenantTimeTravelRequest,
52 : TimelineCreateRequest, TimelineInfo,
53 : },
54 : shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId},
55 : upcall_api::{
56 : ReAttachRequest, ReAttachResponse, ReAttachResponseTenant, ValidateRequest,
57 : ValidateResponse, ValidateResponseTenant,
58 : },
59 : };
60 : use pageserver_client::mgmt_api;
61 : use tokio::sync::mpsc::error::TrySendError;
62 : use tokio_util::sync::CancellationToken;
63 : use utils::{
64 : completion::Barrier,
65 : failpoint_support,
66 : generation::Generation,
67 : http::error::ApiError,
68 : id::{NodeId, TenantId, TimelineId},
69 : sync::gate::Gate,
70 : };
71 :
72 : use crate::{
73 : compute_hook::ComputeHook,
74 : heartbeater::{Heartbeater, PageserverState},
75 : node::{AvailabilityTransition, Node},
76 : persistence::{split_state::SplitState, DatabaseError, Persistence, TenantShardPersistence},
77 : reconciler::attached_location_conf,
78 : scheduler::Scheduler,
79 : tenant_shard::{
80 : IntentState, ObservedState, ObservedStateLocation, ReconcileResult, ReconcileWaitError,
81 : ReconcilerWaiter, TenantShard,
82 : },
83 : };
84 :
85 : // For operations that should be quick, like attaching a new tenant
86 : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
87 :
88 : // For operations that might be slow, like migrating a tenant with
89 : // some data in it.
90 : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
91 :
92 : // If we receive a call using Secondary mode initially, it will omit generation. We will initialize
93 : // tenant shards into this generation, and as long as it remains in this generation, we will accept
94 : // input generation from future requests as authoritative.
95 : const INITIAL_GENERATION: Generation = Generation::new(0);
96 :
97 : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
98 : /// up on unresponsive pageservers and proceed.
99 : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
100 :
101 : /// How long a node may be unresponsive to heartbeats before we declare it offline.
102 : /// This must be long enough to cover node restarts as well as normal operations: in future
103 : /// it should be separated into distinct timeouts for startup vs. normal operation
104 : /// (`<https://github.com/neondatabase/neon/issues/7552>`)
105 : pub const MAX_UNAVAILABLE_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
106 :
107 0 : #[derive(Clone, strum_macros::Display)]
108 : enum TenantOperations {
109 : Create,
110 : LocationConfig,
111 : ConfigSet,
112 : TimeTravelRemoteStorage,
113 : Delete,
114 : UpdatePolicy,
115 : ShardSplit,
116 : SecondaryDownload,
117 : TimelineCreate,
118 : TimelineDelete,
119 : }
120 :
121 0 : #[derive(Clone, strum_macros::Display)]
122 : enum NodeOperations {
123 : Register,
124 : Configure,
125 : }
126 :
127 : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
128 :
129 : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
130 : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
131 : // than they're being pushed onto the queue.
132 : const MAX_DELAYED_RECONCILES: usize = 10000;
133 :
134 : // Top level state available to all HTTP handlers
135 : struct ServiceState {
136 : tenants: BTreeMap<TenantShardId, TenantShard>,
137 :
138 : nodes: Arc<HashMap<NodeId, Node>>,
139 :
140 : scheduler: Scheduler,
141 :
142 : /// Ongoing background operation on the cluster if any is running.
143 : /// Note that only one such operation may run at any given time,
144 : /// hence the type choice.
145 : ongoing_operation: Option<OperationHandler>,
146 :
147 : /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
148 : delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
149 : }
150 :
151 : /// Transform an error from a pageserver into an error to return to callers of a storage
152 : /// controller API.
153 0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
154 0 : match e {
155 0 : mgmt_api::Error::ReceiveErrorBody(str) => {
156 0 : // Presume errors receiving body are connectivity/availability issues
157 0 : ApiError::ResourceUnavailable(
158 0 : format!("{node} error receiving error body: {str}").into(),
159 0 : )
160 : }
161 0 : mgmt_api::Error::ReceiveBody(str) => {
162 0 : // Presume errors receiving body are connectivity/availability issues
163 0 : ApiError::ResourceUnavailable(format!("{node} error receiving body: {str}").into())
164 : }
165 0 : mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
166 0 : ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
167 : }
168 0 : mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
169 0 : ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
170 : }
171 0 : mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
172 0 : | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
173 : // Auth errors talking to a pageserver are not auth errors for the caller: they are
174 : // internal server errors, showing that something is wrong with the pageserver or
175 : // storage controller's auth configuration.
176 0 : ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
177 : }
178 0 : mgmt_api::Error::ApiError(status, msg) => {
179 0 : // Presume general case of pageserver API errors is that we tried to do something
180 0 : // that can't be done right now.
181 0 : ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
182 : }
183 0 : mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
184 : }
185 0 : }
186 :
187 : impl ServiceState {
188 0 : fn new(
189 0 : nodes: HashMap<NodeId, Node>,
190 0 : tenants: BTreeMap<TenantShardId, TenantShard>,
191 0 : scheduler: Scheduler,
192 0 : delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
193 0 : ) -> Self {
194 0 : Self {
195 0 : tenants,
196 0 : nodes: Arc::new(nodes),
197 0 : scheduler,
198 0 : ongoing_operation: None,
199 0 : delayed_reconcile_rx,
200 0 : }
201 0 : }
202 :
203 0 : fn parts_mut(
204 0 : &mut self,
205 0 : ) -> (
206 0 : &mut Arc<HashMap<NodeId, Node>>,
207 0 : &mut BTreeMap<TenantShardId, TenantShard>,
208 0 : &mut Scheduler,
209 0 : ) {
210 0 : (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
211 0 : }
212 : }
213 :
214 : #[derive(Clone)]
215 : pub struct Config {
216 : // All pageservers managed by one instance of this service must have
217 : // the same public key. This JWT token will be used to authenticate
218 : // this service to the pageservers it manages.
219 : pub jwt_token: Option<String>,
220 :
221 : // This JWT token will be used to authenticate this service to the control plane.
222 : pub control_plane_jwt_token: Option<String>,
223 :
224 : /// Where the compute hook should send notifications of pageserver attachment locations
225 : /// (this URL points to the control plane in prod). If this is None, the compute hook will
226 : /// assume it is running in a test environment and try to update neon_local.
227 : pub compute_hook_url: Option<String>,
228 :
229 : /// Grace period within which a pageserver does not respond to heartbeats, but is still
230 : /// considered active. Once the grace period elapses, the next heartbeat failure will
231 : /// mark the pagseserver offline.
232 : pub max_unavailable_interval: Duration,
233 :
234 : /// How many Reconcilers may be spawned concurrently
235 : pub reconciler_concurrency: usize,
236 :
237 : /// How large must a shard grow in bytes before we split it?
238 : /// None disables auto-splitting.
239 : pub split_threshold: Option<u64>,
240 :
241 : // TODO: make this cfg(feature = "testing")
242 : pub neon_local_repo_dir: Option<PathBuf>,
243 : }
244 :
245 : impl From<DatabaseError> for ApiError {
246 0 : fn from(err: DatabaseError) -> ApiError {
247 0 : match err {
248 0 : DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
249 : // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
250 : DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
251 0 : ApiError::ShuttingDown
252 : }
253 0 : DatabaseError::Logical(reason) => {
254 0 : ApiError::InternalServerError(anyhow::anyhow!(reason))
255 : }
256 : }
257 0 : }
258 : }
259 :
260 : pub struct Service {
261 : inner: Arc<std::sync::RwLock<ServiceState>>,
262 : config: Config,
263 : persistence: Arc<Persistence>,
264 : compute_hook: Arc<ComputeHook>,
265 : result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResult>,
266 :
267 : heartbeater: Heartbeater,
268 :
269 : // Channel for background cleanup from failed operations that require cleanup, such as shard split
270 : abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
271 :
272 : // Locking on a tenant granularity (covers all shards in the tenant):
273 : // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
274 : // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
275 : tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
276 :
277 : // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
278 : // that transition it to/from Active.
279 : node_op_locks: IdLockMap<NodeId, NodeOperations>,
280 :
281 : // Limit how many Reconcilers we will spawn concurrently
282 : reconciler_concurrency: Arc<tokio::sync::Semaphore>,
283 :
284 : /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
285 : /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
286 : ///
287 : /// Note that this state logically lives inside ServiceInner, but carrying Sender here makes the code simpler
288 : /// by avoiding needing a &mut ref to something inside the ServiceInner. This could be optimized to
289 : /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
290 : delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
291 :
292 : // Process shutdown will fire this token
293 : cancel: CancellationToken,
294 :
295 : // Background tasks will hold this gate
296 : gate: Gate,
297 :
298 : /// This waits for initial reconciliation with pageservers to complete. Until this barrier
299 : /// passes, it isn't safe to do any actions that mutate tenants.
300 : pub(crate) startup_complete: Barrier,
301 : }
302 :
303 : impl From<ReconcileWaitError> for ApiError {
304 0 : fn from(value: ReconcileWaitError) -> Self {
305 0 : match value {
306 0 : ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
307 0 : e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
308 0 : e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
309 : }
310 0 : }
311 : }
312 :
313 : impl From<OperationError> for ApiError {
314 0 : fn from(value: OperationError) -> Self {
315 0 : match value {
316 0 : OperationError::NodeStateChanged(err) | OperationError::FinalizeError(err) => {
317 0 : ApiError::InternalServerError(anyhow::anyhow!(err))
318 : }
319 0 : OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()),
320 : }
321 0 : }
322 : }
323 :
324 : #[allow(clippy::large_enum_variant)]
325 : enum TenantCreateOrUpdate {
326 : Create(TenantCreateRequest),
327 : Update(Vec<ShardUpdate>),
328 : }
329 :
330 : struct ShardSplitParams {
331 : old_shard_count: ShardCount,
332 : new_shard_count: ShardCount,
333 : new_stripe_size: Option<ShardStripeSize>,
334 : targets: Vec<ShardSplitTarget>,
335 : policy: PlacementPolicy,
336 : config: TenantConfig,
337 : shard_ident: ShardIdentity,
338 : }
339 :
340 : // When preparing for a shard split, we may either choose to proceed with the split,
341 : // or find that the work is already done and return NoOp.
342 : enum ShardSplitAction {
343 : Split(ShardSplitParams),
344 : NoOp(TenantShardSplitResponse),
345 : }
346 :
347 : // A parent shard which will be split
348 : struct ShardSplitTarget {
349 : parent_id: TenantShardId,
350 : node: Node,
351 : child_ids: Vec<TenantShardId>,
352 : }
353 :
354 : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
355 : /// might not be available. We therefore use a queue of abort operations processed in the background.
356 : struct TenantShardSplitAbort {
357 : tenant_id: TenantId,
358 : /// The target values from the request that failed
359 : new_shard_count: ShardCount,
360 : new_stripe_size: Option<ShardStripeSize>,
361 : /// Until this abort op is complete, no other operations may be done on the tenant
362 : _tenant_lock: TracingExclusiveGuard<TenantOperations>,
363 : }
364 :
365 0 : #[derive(thiserror::Error, Debug)]
366 : enum TenantShardSplitAbortError {
367 : #[error(transparent)]
368 : Database(#[from] DatabaseError),
369 : #[error(transparent)]
370 : Remote(#[from] mgmt_api::Error),
371 : #[error("Unavailable")]
372 : Unavailable,
373 : }
374 :
375 : struct ShardUpdate {
376 : tenant_shard_id: TenantShardId,
377 : placement_policy: PlacementPolicy,
378 : tenant_config: TenantConfig,
379 :
380 : /// If this is None, generation is not updated.
381 : generation: Option<Generation>,
382 : }
383 :
384 : impl Service {
385 0 : pub fn get_config(&self) -> &Config {
386 0 : &self.config
387 0 : }
388 :
389 : /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
390 : /// view of the world, and determine which pageservers are responsive.
391 0 : #[instrument(skip_all)]
392 : async fn startup_reconcile(
393 : self: &Arc<Service>,
394 : bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
395 : Result<(), (TenantShardId, NotifyError)>,
396 : >,
397 : ) {
398 : // For all tenant shards, a vector of observed states on nodes (where None means
399 : // indeterminate, same as in [`ObservedStateLocation`])
400 : let mut observed: HashMap<TenantShardId, Vec<(NodeId, Option<LocationConfig>)>> =
401 : HashMap::new();
402 :
403 : // Startup reconciliation does I/O to other services: whether they
404 : // are responsive or not, we should aim to finish within our deadline, because:
405 : // - If we don't, a k8s readiness hook watching /ready will kill us.
406 : // - While we're waiting for startup reconciliation, we are not fully
407 : // available for end user operations like creating/deleting tenants and timelines.
408 : //
409 : // We set multiple deadlines to break up the time available between the phases of work: this is
410 : // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
411 : let start_at = Instant::now();
412 : let node_scan_deadline = start_at
413 : .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
414 : .expect("Reconcile timeout is a modest constant");
415 :
416 : // Accumulate a list of any tenant locations that ought to be detached
417 : let mut cleanup = Vec::new();
418 :
419 : let node_listings = self.scan_node_locations(node_scan_deadline).await;
420 : // Send initial heartbeat requests to nodes that replied to the location listing above.
421 : let nodes_online = self.initial_heartbeat_round(node_listings.keys()).await;
422 :
423 : for (node_id, list_response) in node_listings {
424 : let tenant_shards = list_response.tenant_shards;
425 : tracing::info!(
426 : "Received {} shard statuses from pageserver {}, setting it to Active",
427 : tenant_shards.len(),
428 : node_id
429 : );
430 :
431 : for (tenant_shard_id, conf_opt) in tenant_shards {
432 : let shard_observations = observed.entry(tenant_shard_id).or_default();
433 : shard_observations.push((node_id, conf_opt));
434 : }
435 : }
436 :
437 : // List of tenants for which we will attempt to notify compute of their location at startup
438 : let mut compute_notifications = Vec::new();
439 :
440 : // Populate intent and observed states for all tenants, based on reported state on pageservers
441 : tracing::info!("Populating tenant shards' states from initial pageserver scan...");
442 : let shard_count = {
443 : let mut locked = self.inner.write().unwrap();
444 : let (nodes, tenants, scheduler) = locked.parts_mut();
445 :
446 : // Mark nodes online if they responded to us: nodes are offline by default after a restart.
447 : let mut new_nodes = (**nodes).clone();
448 : for (node_id, node) in new_nodes.iter_mut() {
449 : if let Some(utilization) = nodes_online.get(node_id) {
450 : node.set_availability(NodeAvailability::Active(UtilizationScore(
451 : utilization.utilization_score,
452 : )));
453 : scheduler.node_upsert(node);
454 : }
455 : }
456 : *nodes = Arc::new(new_nodes);
457 :
458 : for (tenant_shard_id, shard_observations) in observed {
459 : for (node_id, observed_loc) in shard_observations {
460 : let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
461 : cleanup.push((tenant_shard_id, node_id));
462 : continue;
463 : };
464 : tenant_shard
465 : .observed
466 : .locations
467 : .insert(node_id, ObservedStateLocation { conf: observed_loc });
468 : }
469 : }
470 :
471 : // Populate each tenant's intent state
472 : let mut schedule_context = ScheduleContext::default();
473 : for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
474 : if tenant_shard_id.shard_number == ShardNumber(0) {
475 : // Reset scheduling context each time we advance to the next Tenant
476 : schedule_context = ScheduleContext::default();
477 : }
478 :
479 : tenant_shard.intent_from_observed(scheduler);
480 : if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
481 : // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
482 : // not enough pageservers are available. The tenant may well still be available
483 : // to clients.
484 : tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
485 : } else {
486 : // If we're both intending and observed to be attached at a particular node, we will
487 : // emit a compute notification for this. In the case where our observed state does not
488 : // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
489 : if let Some(attached_at) = tenant_shard.stably_attached() {
490 : compute_notifications.push((
491 : *tenant_shard_id,
492 : attached_at,
493 : tenant_shard.shard.stripe_size,
494 : ));
495 : }
496 : }
497 : }
498 :
499 : tenants.len()
500 : };
501 :
502 : // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
503 : // generation_pageserver in the database.
504 :
505 : // Emit compute hook notifications for all tenants which are already stably attached. Other tenants
506 : // will emit compute hook notifications when they reconcile.
507 : //
508 : // Ordering: our calls to notify_background synchronously establish a relative order for these notifications vs. any later
509 : // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
510 : // calls will be correctly ordered wrt these.
511 : //
512 : // Concurrency: we call notify_background for all tenants, which will create O(N) tokio tasks, but almost all of them
513 : // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
514 : // unit and start doing I/O.
515 : tracing::info!(
516 : "Sending {} compute notifications",
517 : compute_notifications.len()
518 : );
519 : self.compute_hook.notify_background(
520 : compute_notifications,
521 : bg_compute_notify_result_tx.clone(),
522 : &self.cancel,
523 : );
524 :
525 : // Finally, now that the service is up and running, launch reconcile operations for any tenants
526 : // which require it: under normal circumstances this should only include tenants that were in some
527 : // transient state before we restarted, or any tenants whose compute hooks failed above.
528 : tracing::info!("Checking for shards in need of reconciliation...");
529 : let reconcile_tasks = self.reconcile_all();
530 : // We will not wait for these reconciliation tasks to run here: we're now done with startup and
531 : // normal operations may proceed.
532 :
533 : // Clean up any tenants that were found on pageservers but are not known to us. Do this in the
534 : // background because it does not need to complete in order to proceed with other work.
535 : if !cleanup.is_empty() {
536 : tracing::info!("Cleaning up {} locations in the background", cleanup.len());
537 : tokio::task::spawn({
538 : let cleanup_self = self.clone();
539 0 : async move { cleanup_self.cleanup_locations(cleanup).await }
540 : });
541 : }
542 :
543 : tracing::info!("Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)");
544 : }
545 :
546 0 : async fn initial_heartbeat_round<'a>(
547 0 : &self,
548 0 : node_ids: impl Iterator<Item = &'a NodeId>,
549 0 : ) -> HashMap<NodeId, PageserverUtilization> {
550 0 : assert!(!self.startup_complete.is_ready());
551 :
552 0 : let all_nodes = {
553 0 : let locked = self.inner.read().unwrap();
554 0 : locked.nodes.clone()
555 0 : };
556 0 :
557 0 : let mut nodes_to_heartbeat = HashMap::new();
558 0 : for node_id in node_ids {
559 0 : match all_nodes.get(node_id) {
560 0 : Some(node) => {
561 0 : nodes_to_heartbeat.insert(*node_id, node.clone());
562 0 : }
563 : None => {
564 0 : tracing::warn!("Node {node_id} was removed during start-up");
565 : }
566 : }
567 : }
568 :
569 0 : tracing::info!("Sending initial heartbeats...");
570 0 : let res = self
571 0 : .heartbeater
572 0 : .heartbeat(Arc::new(nodes_to_heartbeat))
573 0 : .await;
574 :
575 0 : let mut online_nodes = HashMap::new();
576 0 : if let Ok(deltas) = res {
577 0 : for (node_id, status) in deltas.0 {
578 0 : match status {
579 0 : PageserverState::Available { utilization, .. } => {
580 0 : online_nodes.insert(node_id, utilization);
581 0 : }
582 0 : PageserverState::Offline => {}
583 : }
584 : }
585 0 : }
586 :
587 0 : online_nodes
588 0 : }
589 :
590 : /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
591 : ///
592 : /// The result includes only nodes which responded within the deadline
593 0 : async fn scan_node_locations(
594 0 : &self,
595 0 : deadline: Instant,
596 0 : ) -> HashMap<NodeId, LocationConfigListResponse> {
597 0 : let nodes = {
598 0 : let locked = self.inner.read().unwrap();
599 0 : locked.nodes.clone()
600 0 : };
601 0 :
602 0 : let mut node_results = HashMap::new();
603 0 :
604 0 : let mut node_list_futs = FuturesUnordered::new();
605 0 :
606 0 : tracing::info!("Scanning shards on {} nodes...", nodes.len());
607 0 : for node in nodes.values() {
608 0 : node_list_futs.push({
609 0 : async move {
610 0 : tracing::info!("Scanning shards on node {node}...");
611 0 : let timeout = Duration::from_secs(1);
612 0 : let response = node
613 0 : .with_client_retries(
614 0 : |client| async move { client.list_location_config().await },
615 0 : &self.config.jwt_token,
616 0 : 1,
617 0 : 5,
618 0 : timeout,
619 0 : &self.cancel,
620 0 : )
621 0 : .await;
622 0 : (node.get_id(), response)
623 0 : }
624 0 : });
625 0 : }
626 :
627 : loop {
628 0 : let (node_id, result) = tokio::select! {
629 : next = node_list_futs.next() => {
630 : match next {
631 : Some(result) => result,
632 : None =>{
633 : // We got results for all our nodes
634 : break;
635 : }
636 :
637 : }
638 : },
639 : _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
640 : // Give up waiting for anyone who hasn't responded: we will yield the results that we have
641 : tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
642 : break;
643 : }
644 : };
645 :
646 0 : let Some(list_response) = result else {
647 0 : tracing::info!("Shutdown during startup_reconcile");
648 0 : break;
649 : };
650 :
651 0 : match list_response {
652 0 : Err(e) => {
653 0 : tracing::warn!("Could not scan node {} ({e})", node_id);
654 : }
655 0 : Ok(listing) => {
656 0 : node_results.insert(node_id, listing);
657 0 : }
658 : }
659 : }
660 :
661 0 : node_results
662 0 : }
663 :
664 : /// Used during [`Self::startup_reconcile`]: detach a list of unknown-to-us tenants from pageservers.
665 : ///
666 : /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
667 : /// tenants, then it is probably something incompletely deleted before: we will not fight with any
668 : /// other task trying to attach it.
669 0 : #[instrument(skip_all)]
670 : async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
671 : let nodes = self.inner.read().unwrap().nodes.clone();
672 :
673 : for (tenant_shard_id, node_id) in cleanup {
674 : // A node reported a tenant_shard_id which is unknown to us: detach it.
675 : let Some(node) = nodes.get(&node_id) else {
676 : // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
677 : // a location to clean up on a node that has since been removed.
678 : tracing::info!(
679 : "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
680 : );
681 : continue;
682 : };
683 :
684 : if self.cancel.is_cancelled() {
685 : break;
686 : }
687 :
688 : let client = PageserverClient::new(
689 : node.get_id(),
690 : node.base_url(),
691 : self.config.jwt_token.as_deref(),
692 : );
693 : match client
694 : .location_config(
695 : tenant_shard_id,
696 : LocationConfig {
697 : mode: LocationConfigMode::Detached,
698 : generation: None,
699 : secondary_conf: None,
700 : shard_number: tenant_shard_id.shard_number.0,
701 : shard_count: tenant_shard_id.shard_count.literal(),
702 : shard_stripe_size: 0,
703 : tenant_conf: models::TenantConfig::default(),
704 : },
705 : None,
706 : false,
707 : )
708 : .await
709 : {
710 : Ok(()) => {
711 : tracing::info!(
712 : "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
713 : );
714 : }
715 : Err(e) => {
716 : // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
717 : // break anything.
718 : tracing::error!(
719 : "Failed to detach unknkown shard {tenant_shard_id} on pageserver {node_id}: {e}"
720 : );
721 : }
722 : }
723 : }
724 : }
725 :
726 : /// Long running background task that periodically wakes up and looks for shards that need
727 : /// reconciliation. Reconciliation is fallible, so any reconciliation tasks that fail during
728 : /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
729 : /// for those retries.
730 0 : #[instrument(skip_all)]
731 : async fn background_reconcile(self: &Arc<Self>) {
732 : self.startup_complete.clone().wait().await;
733 :
734 : const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
735 :
736 : let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
737 : while !self.cancel.is_cancelled() {
738 : tokio::select! {
739 : _ = interval.tick() => {
740 : let reconciles_spawned = self.reconcile_all();
741 : if reconciles_spawned == 0 {
742 : // Run optimizer only when we didn't find any other work to do
743 : let optimizations = self.optimize_all().await;
744 : if optimizations == 0 {
745 : // Run new splits only when no optimizations are pending
746 : self.autosplit_tenants().await;
747 : }
748 : }
749 : }
750 : _ = self.cancel.cancelled() => return
751 : }
752 : }
753 : }
754 0 : #[instrument(skip_all)]
755 : async fn spawn_heartbeat_driver(&self) {
756 : self.startup_complete.clone().wait().await;
757 :
758 : const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5);
759 :
760 : let mut interval = tokio::time::interval(HEARTBEAT_INTERVAL);
761 : while !self.cancel.is_cancelled() {
762 : tokio::select! {
763 : _ = interval.tick() => { }
764 : _ = self.cancel.cancelled() => return
765 : };
766 :
767 : let nodes = {
768 : let locked = self.inner.read().unwrap();
769 : locked.nodes.clone()
770 : };
771 :
772 : let res = self.heartbeater.heartbeat(nodes).await;
773 : if let Ok(deltas) = res {
774 : for (node_id, state) in deltas.0 {
775 : let (new_node, new_availability) = match state {
776 : PageserverState::Available {
777 : utilization, new, ..
778 : } => (
779 : new,
780 : NodeAvailability::Active(UtilizationScore(
781 : utilization.utilization_score,
782 : )),
783 : ),
784 : PageserverState::Offline => (false, NodeAvailability::Offline),
785 : };
786 :
787 : if new_node {
788 : // When the heartbeats detect a newly added node, we don't wish
789 : // to attempt to reconcile the shards assigned to it. The node
790 : // is likely handling it's re-attach response, so reconciling now
791 : // would be counterproductive.
792 : //
793 : // Instead, update the in-memory state with the details learned about the
794 : // node.
795 : let mut locked = self.inner.write().unwrap();
796 : let (nodes, _tenants, scheduler) = locked.parts_mut();
797 :
798 : let mut new_nodes = (**nodes).clone();
799 :
800 : if let Some(node) = new_nodes.get_mut(&node_id) {
801 : node.set_availability(new_availability);
802 : scheduler.node_upsert(node);
803 : }
804 :
805 : locked.nodes = Arc::new(new_nodes);
806 : } else {
807 : // This is the code path for geniune availability transitions (i.e node
808 : // goes unavailable and/or comes back online).
809 : let res = self
810 : .node_configure(node_id, Some(new_availability), None)
811 : .await;
812 :
813 : match res {
814 : Ok(()) => {}
815 : Err(ApiError::NotFound(_)) => {
816 : // This should be rare, but legitimate since the heartbeats are done
817 : // on a snapshot of the nodes.
818 : tracing::info!(
819 : "Node {} was not found after heartbeat round",
820 : node_id
821 : );
822 : }
823 : Err(err) => {
824 : tracing::error!(
825 : "Failed to update node {} after heartbeat round: {}",
826 : node_id,
827 : err
828 : );
829 : }
830 : }
831 : }
832 : }
833 : }
834 : }
835 : }
836 :
837 : /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
838 : /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
839 : /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
840 : /// will indicate that reconciliation is not needed.
841 0 : #[instrument(skip_all, fields(
842 : tenant_id=%result.tenant_shard_id.tenant_id, shard_id=%result.tenant_shard_id.shard_slug(),
843 : sequence=%result.sequence
844 0 : ))]
845 : fn process_result(&self, result: ReconcileResult) {
846 : let mut locked = self.inner.write().unwrap();
847 : let Some(tenant) = locked.tenants.get_mut(&result.tenant_shard_id) else {
848 : // A reconciliation result might race with removing a tenant: drop results for
849 : // tenants that aren't in our map.
850 : return;
851 : };
852 :
853 : // Usually generation should only be updated via this path, so the max() isn't
854 : // needed, but it is used to handle out-of-band updates via. e.g. test hook.
855 : tenant.generation = std::cmp::max(tenant.generation, result.generation);
856 :
857 : // If the reconciler signals that it failed to notify compute, set this state on
858 : // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
859 : tenant.pending_compute_notification = result.pending_compute_notification;
860 :
861 : // Let the TenantShard know it is idle.
862 : tenant.reconcile_complete(result.sequence);
863 :
864 : match result.result {
865 : Ok(()) => {
866 : for (node_id, loc) in &result.observed.locations {
867 : if let Some(conf) = &loc.conf {
868 : tracing::info!("Updating observed location {}: {:?}", node_id, conf);
869 : } else {
870 : tracing::info!("Setting observed location {} to None", node_id,)
871 : }
872 : }
873 : tenant.observed = result.observed;
874 : tenant.waiter.advance(result.sequence);
875 : }
876 : Err(e) => {
877 : match e {
878 : ReconcileError::Cancel => {
879 : tracing::info!("Reconciler was cancelled");
880 : }
881 : ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
882 : // This might be due to the reconciler getting cancelled, or it might
883 : // be due to the `Node` being marked offline.
884 : tracing::info!("Reconciler cancelled during pageserver API call");
885 : }
886 : _ => {
887 : tracing::warn!("Reconcile error: {}", e);
888 : }
889 : }
890 :
891 : // Ordering: populate last_error before advancing error_seq,
892 : // so that waiters will see the correct error after waiting.
893 : tenant.set_last_error(result.sequence, e);
894 :
895 : for (node_id, o) in result.observed.locations {
896 : tenant.observed.locations.insert(node_id, o);
897 : }
898 : }
899 : }
900 :
901 : // Maybe some other work can proceed now that this job finished.
902 : if self.reconciler_concurrency.available_permits() > 0 {
903 : while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
904 : let (nodes, tenants, _scheduler) = locked.parts_mut();
905 : if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
906 : shard.delayed_reconcile = false;
907 : self.maybe_reconcile_shard(shard, nodes);
908 : }
909 :
910 : if self.reconciler_concurrency.available_permits() == 0 {
911 : break;
912 : }
913 : }
914 : }
915 : }
916 :
917 0 : async fn process_results(
918 0 : &self,
919 0 : mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResult>,
920 0 : mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
921 0 : Result<(), (TenantShardId, NotifyError)>,
922 0 : >,
923 0 : ) {
924 0 : loop {
925 0 : // Wait for the next result, or for cancellation
926 0 : tokio::select! {
927 : r = result_rx.recv() => {
928 : match r {
929 : Some(result) => {self.process_result(result);},
930 : None => {break;}
931 : }
932 : }
933 0 : _ = async{
934 0 : match bg_compute_hook_result_rx.recv().await {
935 0 : Some(result) => {
936 0 : if let Err((tenant_shard_id, notify_error)) = result {
937 0 : tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
938 0 : let mut locked = self.inner.write().unwrap();
939 0 : if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
940 0 : shard.pending_compute_notification = true;
941 0 : }
942 :
943 0 : }
944 : },
945 : None => {
946 : // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
947 0 : self.cancel.cancelled().await;
948 : }
949 : }
950 0 : } => {},
951 : _ = self.cancel.cancelled() => {
952 : break;
953 : }
954 0 : };
955 0 : }
956 :
957 : // We should only fall through on shutdown
958 0 : assert!(self.cancel.is_cancelled());
959 0 : }
960 :
961 0 : async fn process_aborts(
962 0 : &self,
963 0 : mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
964 0 : ) {
965 : loop {
966 : // Wait for the next result, or for cancellation
967 0 : let op = tokio::select! {
968 : r = abort_rx.recv() => {
969 : match r {
970 : Some(op) => {op},
971 : None => {break;}
972 : }
973 : }
974 : _ = self.cancel.cancelled() => {
975 : break;
976 : }
977 : };
978 :
979 : // Retry until shutdown: we must keep this request object alive until it is properly
980 : // processed, as it holds a lock guard that prevents other operations trying to do things
981 : // to the tenant while it is in a weird part-split state.
982 0 : while !self.cancel.is_cancelled() {
983 0 : match self.abort_tenant_shard_split(&op).await {
984 0 : Ok(_) => break,
985 0 : Err(e) => {
986 0 : tracing::warn!(
987 0 : "Failed to abort shard split on {}, will retry: {e}",
988 : op.tenant_id
989 : );
990 :
991 : // If a node is unavailable, we hope that it has been properly marked Offline
992 : // when we retry, so that the abort op will succeed. If the abort op is failing
993 : // for some other reason, we will keep retrying forever, or until a human notices
994 : // and does something about it (either fixing a pageserver or restarting the controller).
995 0 : tokio::time::timeout(Duration::from_secs(5), self.cancel.cancelled())
996 0 : .await
997 0 : .ok();
998 : }
999 : }
1000 : }
1001 : }
1002 0 : }
1003 :
1004 0 : pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
1005 0 : let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
1006 0 : let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
1007 0 :
1008 0 : tracing::info!("Loading nodes from database...");
1009 0 : let nodes = persistence
1010 0 : .list_nodes()
1011 0 : .await?
1012 0 : .into_iter()
1013 0 : .map(Node::from_persistent)
1014 0 : .collect::<Vec<_>>();
1015 0 : let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
1016 0 : tracing::info!("Loaded {} nodes from database.", nodes.len());
1017 :
1018 0 : tracing::info!("Loading shards from database...");
1019 0 : let mut tenant_shard_persistence = persistence.list_tenant_shards().await?;
1020 0 : tracing::info!(
1021 0 : "Loaded {} shards from database.",
1022 0 : tenant_shard_persistence.len()
1023 : );
1024 :
1025 : // If any shard splits were in progress, reset the database state to abort them
1026 0 : let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
1027 0 : HashMap::new();
1028 0 : for tsp in &mut tenant_shard_persistence {
1029 0 : let shard = tsp.get_shard_identity()?;
1030 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
1031 0 : let entry = tenant_shard_count_min_max
1032 0 : .entry(tenant_shard_id.tenant_id)
1033 0 : .or_insert_with(|| (shard.count, shard.count));
1034 0 : entry.0 = std::cmp::min(entry.0, shard.count);
1035 0 : entry.1 = std::cmp::max(entry.1, shard.count);
1036 : }
1037 :
1038 0 : for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
1039 0 : if count_min != count_max {
1040 : // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
1041 : // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
1042 : // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
1043 0 : tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
1044 0 : let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
1045 :
1046 : // We may never see the Complete status here: if the split was complete, we wouldn't have
1047 : // identified this tenant has having mismatching min/max counts.
1048 0 : assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
1049 :
1050 : // Clear the splitting status in-memory, to reflect that we just aborted in the database
1051 0 : tenant_shard_persistence.iter_mut().for_each(|tsp| {
1052 0 : // Set idle split state on those shards that we will retain.
1053 0 : let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
1054 0 : if tsp_tenant_id == tenant_id
1055 0 : && tsp.get_shard_identity().unwrap().count == count_min
1056 0 : {
1057 0 : tsp.splitting = SplitState::Idle;
1058 0 : } else if tsp_tenant_id == tenant_id {
1059 : // Leave the splitting state on the child shards: this will be used next to
1060 : // drop them.
1061 0 : tracing::info!(
1062 0 : "Shard {tsp_tenant_id} will be dropped after shard split abort",
1063 : );
1064 0 : }
1065 0 : });
1066 0 :
1067 0 : // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
1068 0 : tenant_shard_persistence.retain(|tsp| {
1069 0 : TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
1070 0 : || tsp.splitting == SplitState::Idle
1071 0 : });
1072 0 : }
1073 : }
1074 :
1075 0 : let mut tenants = BTreeMap::new();
1076 0 :
1077 0 : let mut scheduler = Scheduler::new(nodes.values());
1078 0 :
1079 0 : #[cfg(feature = "testing")]
1080 0 : {
1081 0 : // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
1082 0 : // tests only store the shards, not the nodes. The nodes will be loaded shortly
1083 0 : // after when pageservers start up and register.
1084 0 : let mut node_ids = HashSet::new();
1085 0 : for tsp in &tenant_shard_persistence {
1086 0 : if let Some(node_id) = tsp.generation_pageserver {
1087 0 : node_ids.insert(node_id);
1088 0 : }
1089 : }
1090 0 : for node_id in node_ids {
1091 0 : tracing::info!("Creating node {} in scheduler for tests", node_id);
1092 0 : let node = Node::new(
1093 0 : NodeId(node_id as u64),
1094 0 : "".to_string(),
1095 0 : 123,
1096 0 : "".to_string(),
1097 0 : 123,
1098 0 : );
1099 0 :
1100 0 : scheduler.node_upsert(&node);
1101 : }
1102 : }
1103 0 : for tsp in tenant_shard_persistence {
1104 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
1105 :
1106 : // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
1107 : // it with what we can infer: the node for which a generation was most recently issued.
1108 0 : let mut intent = IntentState::new();
1109 0 : if let Some(generation_pageserver) = tsp.generation_pageserver {
1110 0 : intent.set_attached(&mut scheduler, Some(NodeId(generation_pageserver as u64)));
1111 0 : }
1112 0 : let new_tenant = TenantShard::from_persistent(tsp, intent)?;
1113 :
1114 0 : tenants.insert(tenant_shard_id, new_tenant);
1115 : }
1116 :
1117 0 : let (startup_completion, startup_complete) = utils::completion::channel();
1118 0 :
1119 0 : // This channel is continuously consumed by process_results, so doesn't need to be very large.
1120 0 : let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
1121 0 : tokio::sync::mpsc::channel(512);
1122 0 :
1123 0 : let (delayed_reconcile_tx, delayed_reconcile_rx) =
1124 0 : tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
1125 0 :
1126 0 : let cancel = CancellationToken::new();
1127 0 : let heartbeater = Heartbeater::new(
1128 0 : config.jwt_token.clone(),
1129 0 : config.max_unavailable_interval,
1130 0 : cancel.clone(),
1131 0 : );
1132 0 : let this = Arc::new(Self {
1133 0 : inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
1134 0 : nodes,
1135 0 : tenants,
1136 0 : scheduler,
1137 0 : delayed_reconcile_rx,
1138 0 : ))),
1139 0 : config: config.clone(),
1140 0 : persistence,
1141 0 : compute_hook: Arc::new(ComputeHook::new(config.clone())),
1142 0 : result_tx,
1143 0 : heartbeater,
1144 0 : reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
1145 0 : config.reconciler_concurrency,
1146 0 : )),
1147 0 : delayed_reconcile_tx,
1148 0 : abort_tx,
1149 0 : startup_complete: startup_complete.clone(),
1150 0 : cancel,
1151 0 : gate: Gate::default(),
1152 0 : tenant_op_locks: Default::default(),
1153 0 : node_op_locks: Default::default(),
1154 0 : });
1155 0 :
1156 0 : let result_task_this = this.clone();
1157 0 : tokio::task::spawn(async move {
1158 : // Block shutdown until we're done (we must respect self.cancel)
1159 0 : if let Ok(_gate) = result_task_this.gate.enter() {
1160 0 : result_task_this
1161 0 : .process_results(result_rx, bg_compute_notify_result_rx)
1162 0 : .await
1163 0 : }
1164 0 : });
1165 0 :
1166 0 : tokio::task::spawn({
1167 0 : let this = this.clone();
1168 0 : async move {
1169 : // Block shutdown until we're done (we must respect self.cancel)
1170 0 : if let Ok(_gate) = this.gate.enter() {
1171 0 : this.process_aborts(abort_rx).await
1172 0 : }
1173 0 : }
1174 0 : });
1175 0 :
1176 0 : tokio::task::spawn({
1177 0 : let this = this.clone();
1178 0 : async move {
1179 0 : if let Ok(_gate) = this.gate.enter() {
1180 0 : loop {
1181 0 : tokio::select! {
1182 : _ = this.cancel.cancelled() => {
1183 : break;
1184 : },
1185 : _ = tokio::time::sleep(Duration::from_secs(60)) => {}
1186 0 : };
1187 0 : this.tenant_op_locks.housekeeping();
1188 0 : }
1189 0 : }
1190 0 : }
1191 0 : });
1192 0 :
1193 0 : tokio::task::spawn({
1194 0 : let this = this.clone();
1195 0 : // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
1196 0 : // is done.
1197 0 : let startup_completion = startup_completion.clone();
1198 0 : async move {
1199 : // Block shutdown until we're done (we must respect self.cancel)
1200 0 : let Ok(_gate) = this.gate.enter() else {
1201 0 : return;
1202 : };
1203 :
1204 0 : this.startup_reconcile(bg_compute_notify_result_tx).await;
1205 0 : drop(startup_completion);
1206 0 : }
1207 0 : });
1208 0 :
1209 0 : tokio::task::spawn({
1210 0 : let this = this.clone();
1211 0 : let startup_complete = startup_complete.clone();
1212 0 : async move {
1213 0 : startup_complete.wait().await;
1214 0 : this.background_reconcile().await;
1215 0 : }
1216 0 : });
1217 0 :
1218 0 : tokio::task::spawn({
1219 0 : let this = this.clone();
1220 0 : let startup_complete = startup_complete.clone();
1221 0 : async move {
1222 0 : startup_complete.wait().await;
1223 0 : this.spawn_heartbeat_driver().await;
1224 0 : }
1225 0 : });
1226 0 :
1227 0 : Ok(this)
1228 0 : }
1229 :
1230 0 : pub(crate) async fn attach_hook(
1231 0 : &self,
1232 0 : attach_req: AttachHookRequest,
1233 0 : ) -> anyhow::Result<AttachHookResponse> {
1234 0 : // This is a test hook. To enable using it on tenants that were created directly with
1235 0 : // the pageserver API (not via this service), we will auto-create any missing tenant
1236 0 : // shards with default state.
1237 0 : let insert = {
1238 0 : let locked = self.inner.write().unwrap();
1239 0 : !locked.tenants.contains_key(&attach_req.tenant_shard_id)
1240 0 : };
1241 0 :
1242 0 : if insert {
1243 0 : let tsp = TenantShardPersistence {
1244 0 : tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
1245 0 : shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
1246 0 : shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
1247 0 : shard_stripe_size: 0,
1248 0 : generation: attach_req.generation_override.or(Some(0)),
1249 0 : generation_pageserver: None,
1250 0 : placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
1251 0 : config: serde_json::to_string(&TenantConfig::default()).unwrap(),
1252 0 : splitting: SplitState::default(),
1253 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
1254 0 : .unwrap(),
1255 0 : };
1256 0 :
1257 0 : match self.persistence.insert_tenant_shards(vec![tsp]).await {
1258 0 : Err(e) => match e {
1259 : DatabaseError::Query(diesel::result::Error::DatabaseError(
1260 : DatabaseErrorKind::UniqueViolation,
1261 : _,
1262 : )) => {
1263 0 : tracing::info!(
1264 0 : "Raced with another request to insert tenant {}",
1265 : attach_req.tenant_shard_id
1266 : )
1267 : }
1268 0 : _ => return Err(e.into()),
1269 : },
1270 : Ok(()) => {
1271 0 : tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
1272 :
1273 0 : let mut locked = self.inner.write().unwrap();
1274 0 : locked.tenants.insert(
1275 0 : attach_req.tenant_shard_id,
1276 0 : TenantShard::new(
1277 0 : attach_req.tenant_shard_id,
1278 0 : ShardIdentity::unsharded(),
1279 0 : PlacementPolicy::Attached(0),
1280 0 : ),
1281 0 : );
1282 0 : tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
1283 : }
1284 : }
1285 0 : }
1286 :
1287 0 : let new_generation = if let Some(req_node_id) = attach_req.node_id {
1288 0 : let maybe_tenant_conf = {
1289 0 : let locked = self.inner.write().unwrap();
1290 0 : locked
1291 0 : .tenants
1292 0 : .get(&attach_req.tenant_shard_id)
1293 0 : .map(|t| t.config.clone())
1294 0 : };
1295 0 :
1296 0 : match maybe_tenant_conf {
1297 0 : Some(conf) => {
1298 0 : let new_generation = self
1299 0 : .persistence
1300 0 : .increment_generation(attach_req.tenant_shard_id, req_node_id)
1301 0 : .await?;
1302 :
1303 : // Persist the placement policy update. This is required
1304 : // when we reattaching a detached tenant.
1305 0 : self.persistence
1306 0 : .update_tenant_shard(
1307 0 : TenantFilter::Shard(attach_req.tenant_shard_id),
1308 0 : Some(PlacementPolicy::Attached(0)),
1309 0 : Some(conf),
1310 0 : None,
1311 0 : None,
1312 0 : )
1313 0 : .await?;
1314 0 : Some(new_generation)
1315 : }
1316 : None => {
1317 0 : anyhow::bail!("Attach hook handling raced with tenant removal")
1318 : }
1319 : }
1320 : } else {
1321 0 : self.persistence.detach(attach_req.tenant_shard_id).await?;
1322 0 : None
1323 : };
1324 :
1325 0 : let mut locked = self.inner.write().unwrap();
1326 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
1327 0 :
1328 0 : let tenant_shard = tenants
1329 0 : .get_mut(&attach_req.tenant_shard_id)
1330 0 : .expect("Checked for existence above");
1331 :
1332 0 : if let Some(new_generation) = new_generation {
1333 0 : tenant_shard.generation = Some(new_generation);
1334 0 : tenant_shard.policy = PlacementPolicy::Attached(0);
1335 0 : } else {
1336 : // This is a detach notification. We must update placement policy to avoid re-attaching
1337 : // during background scheduling/reconciliation, or during storage controller restart.
1338 0 : assert!(attach_req.node_id.is_none());
1339 0 : tenant_shard.policy = PlacementPolicy::Detached;
1340 : }
1341 :
1342 0 : if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
1343 0 : tracing::info!(
1344 : tenant_id = %attach_req.tenant_shard_id,
1345 : ps_id = %attaching_pageserver,
1346 : generation = ?tenant_shard.generation,
1347 0 : "issuing",
1348 : );
1349 0 : } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
1350 0 : tracing::info!(
1351 : tenant_id = %attach_req.tenant_shard_id,
1352 : %ps_id,
1353 : generation = ?tenant_shard.generation,
1354 0 : "dropping",
1355 : );
1356 : } else {
1357 0 : tracing::info!(
1358 : tenant_id = %attach_req.tenant_shard_id,
1359 0 : "no-op: tenant already has no pageserver");
1360 : }
1361 0 : tenant_shard
1362 0 : .intent
1363 0 : .set_attached(scheduler, attach_req.node_id);
1364 0 :
1365 0 : tracing::info!(
1366 0 : "attach_hook: tenant {} set generation {:?}, pageserver {}",
1367 0 : attach_req.tenant_shard_id,
1368 0 : tenant_shard.generation,
1369 0 : // TODO: this is an odd number of 0xf's
1370 0 : attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff))
1371 : );
1372 :
1373 : // Trick the reconciler into not doing anything for this tenant: this helps
1374 : // tests that manually configure a tenant on the pagesrever, and then call this
1375 : // attach hook: they don't want background reconciliation to modify what they
1376 : // did to the pageserver.
1377 : #[cfg(feature = "testing")]
1378 : {
1379 0 : if let Some(node_id) = attach_req.node_id {
1380 0 : tenant_shard.observed.locations = HashMap::from([(
1381 0 : node_id,
1382 0 : ObservedStateLocation {
1383 0 : conf: Some(attached_location_conf(
1384 0 : tenant_shard.generation.unwrap(),
1385 0 : &tenant_shard.shard,
1386 0 : &tenant_shard.config,
1387 0 : false,
1388 0 : )),
1389 0 : },
1390 0 : )]);
1391 0 : } else {
1392 0 : tenant_shard.observed.locations.clear();
1393 0 : }
1394 : }
1395 :
1396 0 : Ok(AttachHookResponse {
1397 0 : gen: attach_req
1398 0 : .node_id
1399 0 : .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
1400 0 : })
1401 0 : }
1402 :
1403 0 : pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
1404 0 : let locked = self.inner.read().unwrap();
1405 0 :
1406 0 : let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
1407 0 :
1408 0 : InspectResponse {
1409 0 : attachment: tenant_shard.and_then(|s| {
1410 0 : s.intent
1411 0 : .get_attached()
1412 0 : .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
1413 0 : }),
1414 0 : }
1415 0 : }
1416 :
1417 : // When the availability state of a node transitions to active, we must do a full reconciliation
1418 : // of LocationConfigs on that node. This is because while a node was offline:
1419 : // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
1420 : // - aborting a tenant shard split might have left rogue child shards behind on this node.
1421 : //
1422 : // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
1423 : // Reconcilers might communicate with the node, and these must not overlap with the work we do in
1424 : // this function.
1425 : //
1426 : // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
1427 : // for written for a single node rather than as a batch job for all nodes.
1428 0 : #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
1429 : async fn node_activate_reconcile(
1430 : &self,
1431 : mut node: Node,
1432 : _lock: &TracingExclusiveGuard<NodeOperations>,
1433 : ) -> Result<(), ApiError> {
1434 : // This Node is a mutable local copy: we will set it active so that we can use its
1435 : // API client to reconcile with the node. The Node in [`Self::nodes`] will get updated
1436 : // later.
1437 : node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
1438 :
1439 : let configs = match node
1440 : .with_client_retries(
1441 0 : |client| async move { client.list_location_config().await },
1442 : &self.config.jwt_token,
1443 : 1,
1444 : 5,
1445 : SHORT_RECONCILE_TIMEOUT,
1446 : &self.cancel,
1447 : )
1448 : .await
1449 : {
1450 : None => {
1451 : // We're shutting down (the Node's cancellation token can't have fired, because
1452 : // we're the only scope that has a reference to it, and we didn't fire it).
1453 : return Err(ApiError::ShuttingDown);
1454 : }
1455 : Some(Err(e)) => {
1456 : // This node didn't succeed listing its locations: it may not proceed to active state
1457 : // as it is apparently unavailable.
1458 : return Err(ApiError::PreconditionFailed(
1459 : format!("Failed to query node location configs, cannot activate ({e})").into(),
1460 : ));
1461 : }
1462 : Some(Ok(configs)) => configs,
1463 : };
1464 : tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
1465 :
1466 : let mut cleanup = Vec::new();
1467 : {
1468 : let mut locked = self.inner.write().unwrap();
1469 :
1470 : for (tenant_shard_id, observed_loc) in configs.tenant_shards {
1471 : let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
1472 : cleanup.push(tenant_shard_id);
1473 : continue;
1474 : };
1475 : tenant_shard
1476 : .observed
1477 : .locations
1478 : .insert(node.get_id(), ObservedStateLocation { conf: observed_loc });
1479 : }
1480 : }
1481 :
1482 : for tenant_shard_id in cleanup {
1483 : tracing::info!("Detaching {tenant_shard_id}");
1484 : match node
1485 : .with_client_retries(
1486 0 : |client| async move {
1487 0 : let config = LocationConfig {
1488 0 : mode: LocationConfigMode::Detached,
1489 0 : generation: None,
1490 0 : secondary_conf: None,
1491 0 : shard_number: tenant_shard_id.shard_number.0,
1492 0 : shard_count: tenant_shard_id.shard_count.literal(),
1493 0 : shard_stripe_size: 0,
1494 0 : tenant_conf: models::TenantConfig::default(),
1495 0 : };
1496 0 : client
1497 0 : .location_config(tenant_shard_id, config, None, false)
1498 0 : .await
1499 0 : },
1500 : &self.config.jwt_token,
1501 : 1,
1502 : 5,
1503 : SHORT_RECONCILE_TIMEOUT,
1504 : &self.cancel,
1505 : )
1506 : .await
1507 : {
1508 : None => {
1509 : // We're shutting down (the Node's cancellation token can't have fired, because
1510 : // we're the only scope that has a reference to it, and we didn't fire it).
1511 : return Err(ApiError::ShuttingDown);
1512 : }
1513 : Some(Err(e)) => {
1514 : // Do not let the node proceed to Active state if it is not responsive to requests
1515 : // to detach. This could happen if e.g. a shutdown bug in the pageserver is preventing
1516 : // detach completing: we should not let this node back into the set of nodes considered
1517 : // okay for scheduling.
1518 : return Err(ApiError::Conflict(format!(
1519 : "Node {node} failed to detach {tenant_shard_id}: {e}"
1520 : )));
1521 : }
1522 : Some(Ok(_)) => {}
1523 : };
1524 : }
1525 :
1526 : Ok(())
1527 : }
1528 :
1529 0 : pub(crate) async fn re_attach(
1530 0 : &self,
1531 0 : reattach_req: ReAttachRequest,
1532 0 : ) -> Result<ReAttachResponse, ApiError> {
1533 0 : if let Some(register_req) = reattach_req.register {
1534 0 : self.node_register(register_req).await?;
1535 0 : }
1536 :
1537 : // Ordering: we must persist generation number updates before making them visible in the in-memory state
1538 0 : let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
1539 :
1540 0 : tracing::info!(
1541 : node_id=%reattach_req.node_id,
1542 0 : "Incremented {} tenant shards' generations",
1543 0 : incremented_generations.len()
1544 : );
1545 :
1546 : // Apply the updated generation to our in-memory state, and
1547 : // gather discover secondary locations.
1548 0 : let mut locked = self.inner.write().unwrap();
1549 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
1550 0 :
1551 0 : let mut response = ReAttachResponse {
1552 0 : tenants: Vec::new(),
1553 0 : };
1554 :
1555 : // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
1556 : // to call location_conf API with an old generation. Wait for cancellation to complete
1557 : // before responding to this request. Requires well implemented CancellationToken logic
1558 : // all the way to where we call location_conf. Even then, there can still be a location_conf
1559 : // request in flight over the network: TODO handle that by making location_conf API refuse
1560 : // to go backward in generations.
1561 :
1562 : // Scan through all shards, applying updates for ones where we updated generation
1563 : // and identifying shards that intend to have a secondary location on this node.
1564 0 : for (tenant_shard_id, shard) in tenants {
1565 0 : if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
1566 0 : let new_gen = *new_gen;
1567 0 : response.tenants.push(ReAttachResponseTenant {
1568 0 : id: *tenant_shard_id,
1569 0 : gen: Some(new_gen.into().unwrap()),
1570 0 : // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
1571 0 : // execution. If a pageserver is restarted during that process, then the reconcile pass will
1572 0 : // fail, and start from scratch, so it doesn't make sense for us to try and preserve
1573 0 : // the stale/multi states at this point.
1574 0 : mode: LocationConfigMode::AttachedSingle,
1575 0 : });
1576 0 :
1577 0 : shard.generation = std::cmp::max(shard.generation, Some(new_gen));
1578 0 : if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
1579 : // Why can we update `observed` even though we're not sure our response will be received
1580 : // by the pageserver? Because the pageserver will not proceed with startup until
1581 : // it has processed response: if it loses it, we'll see another request and increment
1582 : // generation again, avoiding any uncertainty about dirtiness of tenant's state.
1583 0 : if let Some(conf) = observed.conf.as_mut() {
1584 0 : conf.generation = new_gen.into();
1585 0 : }
1586 0 : } else {
1587 0 : // This node has no observed state for the shard: perhaps it was offline
1588 0 : // when the pageserver restarted. Insert a None, so that the Reconciler
1589 0 : // will be prompted to learn the location's state before it makes changes.
1590 0 : shard
1591 0 : .observed
1592 0 : .locations
1593 0 : .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
1594 0 : }
1595 0 : } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
1596 0 : // Ordering: pageserver will not accept /location_config requests until it has
1597 0 : // finished processing the response from re-attach. So we can update our in-memory state
1598 0 : // now, and be confident that we are not stamping on the result of some later location config.
1599 0 : // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
1600 0 : // so we might update observed state here, and then get over-written by some racing
1601 0 : // ReconcileResult. The impact is low however, since we have set state on pageserver something
1602 0 : // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
1603 0 :
1604 0 : response.tenants.push(ReAttachResponseTenant {
1605 0 : id: *tenant_shard_id,
1606 0 : gen: None,
1607 0 : mode: LocationConfigMode::Secondary,
1608 0 : });
1609 0 :
1610 0 : // We must not update observed, because we have no guarantee that our
1611 0 : // response will be received by the pageserver. This could leave it
1612 0 : // falsely dirty, but the resulting reconcile should be idempotent.
1613 0 : }
1614 : }
1615 :
1616 : // We consider a node Active once we have composed a re-attach response, but we
1617 : // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
1618 : // implicitly synchronizes the LocationConfigs on the node.
1619 : //
1620 : // Setting a node active unblocks any Reconcilers that might write to the location config API,
1621 : // but those requests will not be accepted by the node until it has finished processing
1622 : // the re-attach response.
1623 : //
1624 : // Additionally, reset the nodes scheduling policy to match the conditional update done
1625 : // in [`Persistence::re_attach`].
1626 0 : if let Some(node) = nodes.get(&reattach_req.node_id) {
1627 0 : let reset_scheduling = matches!(
1628 0 : node.get_scheduling(),
1629 : NodeSchedulingPolicy::PauseForRestart
1630 : | NodeSchedulingPolicy::Draining
1631 : | NodeSchedulingPolicy::Filling
1632 : );
1633 :
1634 0 : if !node.is_available() || reset_scheduling {
1635 0 : let mut new_nodes = (**nodes).clone();
1636 0 : if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
1637 0 : if !node.is_available() {
1638 0 : node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
1639 0 : }
1640 :
1641 0 : if reset_scheduling {
1642 0 : node.set_scheduling(NodeSchedulingPolicy::Active);
1643 0 : }
1644 :
1645 0 : scheduler.node_upsert(node);
1646 0 : let new_nodes = Arc::new(new_nodes);
1647 0 : *nodes = new_nodes;
1648 0 : }
1649 0 : }
1650 0 : }
1651 :
1652 0 : Ok(response)
1653 0 : }
1654 :
1655 0 : pub(crate) fn validate(&self, validate_req: ValidateRequest) -> ValidateResponse {
1656 0 : let locked = self.inner.read().unwrap();
1657 0 :
1658 0 : let mut response = ValidateResponse {
1659 0 : tenants: Vec::new(),
1660 0 : };
1661 :
1662 0 : for req_tenant in validate_req.tenants {
1663 0 : if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
1664 0 : let valid = tenant_shard.generation == Some(Generation::new(req_tenant.gen));
1665 0 : tracing::info!(
1666 0 : "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
1667 : req_tenant.id,
1668 : req_tenant.gen,
1669 : tenant_shard.generation
1670 : );
1671 0 : response.tenants.push(ValidateResponseTenant {
1672 0 : id: req_tenant.id,
1673 0 : valid,
1674 0 : });
1675 0 : } else {
1676 0 : // After tenant deletion, we may approve any validation. This avoids
1677 0 : // spurious warnings on the pageserver if it has pending LSN updates
1678 0 : // at the point a deletion happens.
1679 0 : response.tenants.push(ValidateResponseTenant {
1680 0 : id: req_tenant.id,
1681 0 : valid: true,
1682 0 : });
1683 0 : }
1684 : }
1685 0 : response
1686 0 : }
1687 :
1688 0 : pub(crate) async fn tenant_create(
1689 0 : &self,
1690 0 : create_req: TenantCreateRequest,
1691 0 : ) -> Result<TenantCreateResponse, ApiError> {
1692 0 : let tenant_id = create_req.new_tenant_id.tenant_id;
1693 :
1694 : // Exclude any concurrent attempts to create/access the same tenant ID
1695 0 : let _tenant_lock = trace_exclusive_lock(
1696 0 : &self.tenant_op_locks,
1697 0 : create_req.new_tenant_id.tenant_id,
1698 0 : TenantOperations::Create,
1699 0 : )
1700 0 : .await;
1701 0 : let (response, waiters) = self.do_tenant_create(create_req).await?;
1702 :
1703 0 : if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
1704 : // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
1705 : // accept compute notifications while it is in the process of creating. Reconciliation will
1706 : // be retried in the background.
1707 0 : tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
1708 0 : }
1709 0 : Ok(response)
1710 0 : }
1711 :
1712 0 : pub(crate) async fn do_tenant_create(
1713 0 : &self,
1714 0 : create_req: TenantCreateRequest,
1715 0 : ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
1716 0 : let placement_policy = create_req
1717 0 : .placement_policy
1718 0 : .clone()
1719 0 : // As a default, zero secondaries is convenient for tests that don't choose a policy.
1720 0 : .unwrap_or(PlacementPolicy::Attached(0));
1721 :
1722 : // This service expects to handle sharding itself: it is an error to try and directly create
1723 : // a particular shard here.
1724 0 : let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
1725 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
1726 0 : "Attempted to create a specific shard, this API is for creating the whole tenant"
1727 0 : )));
1728 : } else {
1729 0 : create_req.new_tenant_id.tenant_id
1730 0 : };
1731 0 :
1732 0 : tracing::info!(
1733 0 : "Creating tenant {}, shard_count={:?}",
1734 : create_req.new_tenant_id,
1735 : create_req.shard_parameters.count,
1736 : );
1737 :
1738 0 : let create_ids = (0..create_req.shard_parameters.count.count())
1739 0 : .map(|i| TenantShardId {
1740 0 : tenant_id,
1741 0 : shard_number: ShardNumber(i),
1742 0 : shard_count: create_req.shard_parameters.count,
1743 0 : })
1744 0 : .collect::<Vec<_>>();
1745 :
1746 : // If the caller specifies a None generation, it means "start from default". This is different
1747 : // to [`Self::tenant_location_config`], where a None generation is used to represent
1748 : // an incompletely-onboarded tenant.
1749 0 : let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
1750 0 : tracing::info!(
1751 0 : "tenant_create: secondary mode, generation is_some={}",
1752 0 : create_req.generation.is_some()
1753 : );
1754 0 : create_req.generation.map(Generation::new)
1755 : } else {
1756 0 : tracing::info!(
1757 0 : "tenant_create: not secondary mode, generation is_some={}",
1758 0 : create_req.generation.is_some()
1759 : );
1760 0 : Some(
1761 0 : create_req
1762 0 : .generation
1763 0 : .map(Generation::new)
1764 0 : .unwrap_or(INITIAL_GENERATION),
1765 0 : )
1766 : };
1767 :
1768 : // Ordering: we persist tenant shards before creating them on the pageserver. This enables a caller
1769 : // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
1770 : // during the creation, rather than risking leaving orphan objects in S3.
1771 0 : let persist_tenant_shards = create_ids
1772 0 : .iter()
1773 0 : .map(|tenant_shard_id| TenantShardPersistence {
1774 0 : tenant_id: tenant_shard_id.tenant_id.to_string(),
1775 0 : shard_number: tenant_shard_id.shard_number.0 as i32,
1776 0 : shard_count: tenant_shard_id.shard_count.literal() as i32,
1777 0 : shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
1778 0 : generation: initial_generation.map(|g| g.into().unwrap() as i32),
1779 0 : // The pageserver is not known until scheduling happens: we will set this column when
1780 0 : // incrementing the generation the first time we attach to a pageserver.
1781 0 : generation_pageserver: None,
1782 0 : placement_policy: serde_json::to_string(&placement_policy).unwrap(),
1783 0 : config: serde_json::to_string(&create_req.config).unwrap(),
1784 0 : splitting: SplitState::default(),
1785 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
1786 0 : .unwrap(),
1787 0 : })
1788 0 : .collect();
1789 0 :
1790 0 : match self
1791 0 : .persistence
1792 0 : .insert_tenant_shards(persist_tenant_shards)
1793 0 : .await
1794 : {
1795 0 : Ok(_) => {}
1796 : Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
1797 : DatabaseErrorKind::UniqueViolation,
1798 : _,
1799 : ))) => {
1800 : // Unique key violation: this is probably a retry. Because the shard count is part of the unique key,
1801 : // if we see a unique key violation it means that the creation request's shard count matches the previous
1802 : // creation's shard count.
1803 0 : tracing::info!("Tenant shards already present in database, proceeding with idempotent creation...");
1804 : }
1805 : // Any other database error is unexpected and a bug.
1806 0 : Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
1807 : };
1808 :
1809 0 : let mut schedule_context = ScheduleContext::default();
1810 :
1811 0 : let (waiters, response_shards) = {
1812 0 : let mut locked = self.inner.write().unwrap();
1813 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
1814 0 :
1815 0 : let mut response_shards = Vec::new();
1816 0 : let mut schcedule_error = None;
1817 :
1818 0 : for tenant_shard_id in create_ids {
1819 0 : tracing::info!("Creating shard {tenant_shard_id}...");
1820 :
1821 : use std::collections::btree_map::Entry;
1822 0 : match tenants.entry(tenant_shard_id) {
1823 0 : Entry::Occupied(mut entry) => {
1824 0 : tracing::info!(
1825 0 : "Tenant shard {tenant_shard_id} already exists while creating"
1826 : );
1827 :
1828 : // TODO: schedule() should take an anti-affinity expression that pushes
1829 : // attached and secondary locations (independently) away frorm those
1830 : // pageservers also holding a shard for this tenant.
1831 :
1832 0 : entry
1833 0 : .get_mut()
1834 0 : .schedule(scheduler, &mut schedule_context)
1835 0 : .map_err(|e| {
1836 0 : ApiError::Conflict(format!(
1837 0 : "Failed to schedule shard {tenant_shard_id}: {e}"
1838 0 : ))
1839 0 : })?;
1840 :
1841 0 : if let Some(node_id) = entry.get().intent.get_attached() {
1842 0 : let generation = entry
1843 0 : .get()
1844 0 : .generation
1845 0 : .expect("Generation is set when in attached mode");
1846 0 : response_shards.push(TenantCreateResponseShard {
1847 0 : shard_id: tenant_shard_id,
1848 0 : node_id: *node_id,
1849 0 : generation: generation.into().unwrap(),
1850 0 : });
1851 0 : }
1852 :
1853 0 : continue;
1854 : }
1855 0 : Entry::Vacant(entry) => {
1856 0 : let state = entry.insert(TenantShard::new(
1857 0 : tenant_shard_id,
1858 0 : ShardIdentity::from_params(
1859 0 : tenant_shard_id.shard_number,
1860 0 : &create_req.shard_parameters,
1861 0 : ),
1862 0 : placement_policy.clone(),
1863 0 : ));
1864 0 :
1865 0 : state.generation = initial_generation;
1866 0 : state.config = create_req.config.clone();
1867 0 : if let Err(e) = state.schedule(scheduler, &mut schedule_context) {
1868 0 : schcedule_error = Some(e);
1869 0 : }
1870 :
1871 : // Only include shards in result if we are attaching: the purpose
1872 : // of the response is to tell the caller where the shards are attached.
1873 0 : if let Some(node_id) = state.intent.get_attached() {
1874 0 : let generation = state
1875 0 : .generation
1876 0 : .expect("Generation is set when in attached mode");
1877 0 : response_shards.push(TenantCreateResponseShard {
1878 0 : shard_id: tenant_shard_id,
1879 0 : node_id: *node_id,
1880 0 : generation: generation.into().unwrap(),
1881 0 : });
1882 0 : }
1883 : }
1884 : };
1885 : }
1886 :
1887 : // If we failed to schedule shards, then they are still created in the controller,
1888 : // but we return an error to the requester to avoid a silent failure when someone
1889 : // tries to e.g. create a tenant whose placement policy requires more nodes than
1890 : // are present in the system. We do this here rather than in the above loop, to
1891 : // avoid situations where we only create a subset of shards in the tenant.
1892 0 : if let Some(e) = schcedule_error {
1893 0 : return Err(ApiError::Conflict(format!(
1894 0 : "Failed to schedule shard(s): {e}"
1895 0 : )));
1896 0 : }
1897 0 :
1898 0 : let waiters = tenants
1899 0 : .range_mut(TenantShardId::tenant_range(tenant_id))
1900 0 : .filter_map(|(_shard_id, shard)| self.maybe_reconcile_shard(shard, nodes))
1901 0 : .collect::<Vec<_>>();
1902 0 : (waiters, response_shards)
1903 0 : };
1904 0 :
1905 0 : Ok((
1906 0 : TenantCreateResponse {
1907 0 : shards: response_shards,
1908 0 : },
1909 0 : waiters,
1910 0 : ))
1911 0 : }
1912 :
1913 : /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
1914 : /// wait for reconciliation to complete before responding.
1915 0 : async fn await_waiters(
1916 0 : &self,
1917 0 : waiters: Vec<ReconcilerWaiter>,
1918 0 : timeout: Duration,
1919 0 : ) -> Result<(), ReconcileWaitError> {
1920 0 : let deadline = Instant::now().checked_add(timeout).unwrap();
1921 0 : for waiter in waiters {
1922 0 : let timeout = deadline.duration_since(Instant::now());
1923 0 : waiter.wait_timeout(timeout).await?;
1924 : }
1925 :
1926 0 : Ok(())
1927 0 : }
1928 :
1929 : /// Same as [`Service::await_waiters`], but returns the waiters which are still
1930 : /// in progress
1931 0 : async fn await_waiters_remainder(
1932 0 : &self,
1933 0 : waiters: Vec<ReconcilerWaiter>,
1934 0 : timeout: Duration,
1935 0 : ) -> Vec<ReconcilerWaiter> {
1936 0 : let deadline = Instant::now().checked_add(timeout).unwrap();
1937 0 : for waiter in waiters.iter() {
1938 0 : let timeout = deadline.duration_since(Instant::now());
1939 0 : let _ = waiter.wait_timeout(timeout).await;
1940 : }
1941 :
1942 0 : waiters
1943 0 : .into_iter()
1944 0 : .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress))
1945 0 : .collect::<Vec<_>>()
1946 0 : }
1947 :
1948 : /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
1949 : /// and transform it into either a tenant creation of a series of shard updates.
1950 : ///
1951 : /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
1952 : /// still be returned.
1953 0 : fn tenant_location_config_prepare(
1954 0 : &self,
1955 0 : tenant_id: TenantId,
1956 0 : req: TenantLocationConfigRequest,
1957 0 : ) -> TenantCreateOrUpdate {
1958 0 : let mut updates = Vec::new();
1959 0 : let mut locked = self.inner.write().unwrap();
1960 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
1961 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
1962 :
1963 : // Use location config mode as an indicator of policy.
1964 0 : let placement_policy = match req.config.mode {
1965 0 : LocationConfigMode::Detached => PlacementPolicy::Detached,
1966 0 : LocationConfigMode::Secondary => PlacementPolicy::Secondary,
1967 : LocationConfigMode::AttachedMulti
1968 : | LocationConfigMode::AttachedSingle
1969 : | LocationConfigMode::AttachedStale => {
1970 0 : if nodes.len() > 1 {
1971 0 : PlacementPolicy::Attached(1)
1972 : } else {
1973 : // Convenience for dev/test: if we just have one pageserver, import
1974 : // tenants into non-HA mode so that scheduling will succeed.
1975 0 : PlacementPolicy::Attached(0)
1976 : }
1977 : }
1978 : };
1979 :
1980 0 : let mut create = true;
1981 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
1982 : // Saw an existing shard: this is not a creation
1983 0 : create = false;
1984 :
1985 : // Shards may have initially been created by a Secondary request, where we
1986 : // would have left generation as None.
1987 : //
1988 : // We only update generation the first time we see an attached-mode request,
1989 : // and if there is no existing generation set. The caller is responsible for
1990 : // ensuring that no non-storage-controller pageserver ever uses a higher
1991 : // generation than they passed in here.
1992 : use LocationConfigMode::*;
1993 0 : let set_generation = match req.config.mode {
1994 0 : AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
1995 0 : req.config.generation.map(Generation::new)
1996 : }
1997 0 : _ => None,
1998 : };
1999 :
2000 0 : updates.push(ShardUpdate {
2001 0 : tenant_shard_id: *shard_id,
2002 0 : placement_policy: placement_policy.clone(),
2003 0 : tenant_config: req.config.tenant_conf.clone(),
2004 0 : generation: set_generation,
2005 0 : });
2006 : }
2007 :
2008 0 : if create {
2009 : use LocationConfigMode::*;
2010 0 : let generation = match req.config.mode {
2011 0 : AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
2012 : // If a caller provided a generation in a non-attached request, ignore it
2013 : // and leave our generation as None: this enables a subsequent update to set
2014 : // the generation when setting an attached mode for the first time.
2015 0 : _ => None,
2016 : };
2017 :
2018 0 : TenantCreateOrUpdate::Create(
2019 0 : // Synthesize a creation request
2020 0 : TenantCreateRequest {
2021 0 : new_tenant_id: tenant_shard_id,
2022 0 : generation,
2023 0 : shard_parameters: ShardParameters {
2024 0 : count: tenant_shard_id.shard_count,
2025 0 : // We only import un-sharded or single-sharded tenants, so stripe
2026 0 : // size can be made up arbitrarily here.
2027 0 : stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE,
2028 0 : },
2029 0 : placement_policy: Some(placement_policy),
2030 0 : config: req.config.tenant_conf,
2031 0 : },
2032 0 : )
2033 : } else {
2034 0 : assert!(!updates.is_empty());
2035 0 : TenantCreateOrUpdate::Update(updates)
2036 : }
2037 0 : }
2038 :
2039 : /// This API is used by the cloud control plane to migrate unsharded tenants that it created
2040 : /// directly with pageservers into this service.
2041 : ///
2042 : /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
2043 : /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
2044 : /// Think of the first attempt to call this API as a transfer of absolute authority over the
2045 : /// tenant's source of generation numbers.
2046 : ///
2047 : /// The mode in this request coarse-grained control of tenants:
2048 : /// - Call with mode Attached* to upsert the tenant.
2049 : /// - Call with mode Secondary to either onboard a tenant without attaching it, or
2050 : /// to set an existing tenant to PolicyMode::Secondary
2051 : /// - Call with mode Detached to switch to PolicyMode::Detached
2052 0 : pub(crate) async fn tenant_location_config(
2053 0 : &self,
2054 0 : tenant_shard_id: TenantShardId,
2055 0 : req: TenantLocationConfigRequest,
2056 0 : ) -> Result<TenantLocationConfigResponse, ApiError> {
2057 : // We require an exclusive lock, because we are updating both persistent and in-memory state
2058 0 : let _tenant_lock = trace_exclusive_lock(
2059 0 : &self.tenant_op_locks,
2060 0 : tenant_shard_id.tenant_id,
2061 0 : TenantOperations::LocationConfig,
2062 0 : )
2063 0 : .await;
2064 :
2065 0 : if !tenant_shard_id.is_unsharded() {
2066 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
2067 0 : "This API is for importing single-sharded or unsharded tenants"
2068 0 : )));
2069 0 : }
2070 0 :
2071 0 : // First check if this is a creation or an update
2072 0 : let create_or_update = self.tenant_location_config_prepare(tenant_shard_id.tenant_id, req);
2073 0 :
2074 0 : let mut result = TenantLocationConfigResponse {
2075 0 : shards: Vec::new(),
2076 0 : stripe_size: None,
2077 0 : };
2078 0 : let waiters = match create_or_update {
2079 0 : TenantCreateOrUpdate::Create(create_req) => {
2080 0 : let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
2081 0 : result.shards = create_resp
2082 0 : .shards
2083 0 : .into_iter()
2084 0 : .map(|s| TenantShardLocation {
2085 0 : node_id: s.node_id,
2086 0 : shard_id: s.shard_id,
2087 0 : })
2088 0 : .collect();
2089 0 : waiters
2090 : }
2091 0 : TenantCreateOrUpdate::Update(updates) => {
2092 0 : // Persist updates
2093 0 : // Ordering: write to the database before applying changes in-memory, so that
2094 0 : // we will not appear time-travel backwards on a restart.
2095 0 : let mut schedule_context = ScheduleContext::default();
2096 : for ShardUpdate {
2097 0 : tenant_shard_id,
2098 0 : placement_policy,
2099 0 : tenant_config,
2100 0 : generation,
2101 0 : } in &updates
2102 : {
2103 0 : self.persistence
2104 0 : .update_tenant_shard(
2105 0 : TenantFilter::Shard(*tenant_shard_id),
2106 0 : Some(placement_policy.clone()),
2107 0 : Some(tenant_config.clone()),
2108 0 : *generation,
2109 0 : None,
2110 0 : )
2111 0 : .await?;
2112 : }
2113 :
2114 : // Apply updates in-memory
2115 0 : let mut waiters = Vec::new();
2116 0 : {
2117 0 : let mut locked = self.inner.write().unwrap();
2118 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2119 :
2120 : for ShardUpdate {
2121 0 : tenant_shard_id,
2122 0 : placement_policy,
2123 0 : tenant_config,
2124 0 : generation: update_generation,
2125 0 : } in updates
2126 : {
2127 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
2128 0 : tracing::warn!("Shard {tenant_shard_id} removed while updating");
2129 0 : continue;
2130 : };
2131 :
2132 : // Update stripe size
2133 0 : if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
2134 0 : result.stripe_size = Some(shard.shard.stripe_size);
2135 0 : }
2136 :
2137 0 : shard.policy = placement_policy;
2138 0 : shard.config = tenant_config;
2139 0 : if let Some(generation) = update_generation {
2140 0 : shard.generation = Some(generation);
2141 0 : }
2142 :
2143 0 : shard.schedule(scheduler, &mut schedule_context)?;
2144 :
2145 0 : let maybe_waiter = self.maybe_reconcile_shard(shard, nodes);
2146 0 : if let Some(waiter) = maybe_waiter {
2147 0 : waiters.push(waiter);
2148 0 : }
2149 :
2150 0 : if let Some(node_id) = shard.intent.get_attached() {
2151 0 : result.shards.push(TenantShardLocation {
2152 0 : shard_id: tenant_shard_id,
2153 0 : node_id: *node_id,
2154 0 : })
2155 0 : }
2156 : }
2157 : }
2158 0 : waiters
2159 : }
2160 : };
2161 :
2162 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
2163 : // Do not treat a reconcile error as fatal: we have already applied any requested
2164 : // Intent changes, and the reconcile can fail for external reasons like unavailable
2165 : // compute notification API. In these cases, it is important that we do not
2166 : // cause the cloud control plane to retry forever on this API.
2167 0 : tracing::warn!(
2168 0 : "Failed to reconcile after /location_config: {e}, returning success anyway"
2169 : );
2170 0 : }
2171 :
2172 : // Logging the full result is useful because it lets us cross-check what the cloud control
2173 : // plane's tenant_shards table should contain.
2174 0 : tracing::info!("Complete, returning {result:?}");
2175 :
2176 0 : Ok(result)
2177 0 : }
2178 :
2179 0 : pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
2180 : // We require an exclusive lock, because we are updating persistent and in-memory state
2181 0 : let _tenant_lock = trace_exclusive_lock(
2182 0 : &self.tenant_op_locks,
2183 0 : req.tenant_id,
2184 0 : TenantOperations::ConfigSet,
2185 0 : )
2186 0 : .await;
2187 :
2188 0 : let tenant_id = req.tenant_id;
2189 0 : let config = req.config;
2190 0 :
2191 0 : self.persistence
2192 0 : .update_tenant_shard(
2193 0 : TenantFilter::Tenant(req.tenant_id),
2194 0 : None,
2195 0 : Some(config.clone()),
2196 0 : None,
2197 0 : None,
2198 0 : )
2199 0 : .await?;
2200 :
2201 0 : let waiters = {
2202 0 : let mut waiters = Vec::new();
2203 0 : let mut locked = self.inner.write().unwrap();
2204 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
2205 0 : for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2206 0 : shard.config = config.clone();
2207 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
2208 0 : waiters.push(waiter);
2209 0 : }
2210 : }
2211 0 : waiters
2212 : };
2213 :
2214 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
2215 : // Treat this as success because we have stored the configuration. If e.g.
2216 : // a node was unavailable at this time, it should not stop us accepting a
2217 : // configuration change.
2218 0 : tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
2219 0 : }
2220 :
2221 0 : Ok(())
2222 0 : }
2223 :
2224 0 : pub(crate) fn tenant_config_get(
2225 0 : &self,
2226 0 : tenant_id: TenantId,
2227 0 : ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
2228 0 : let config = {
2229 0 : let locked = self.inner.read().unwrap();
2230 0 :
2231 0 : match locked
2232 0 : .tenants
2233 0 : .range(TenantShardId::tenant_range(tenant_id))
2234 0 : .next()
2235 : {
2236 0 : Some((_tenant_shard_id, shard)) => shard.config.clone(),
2237 : None => {
2238 0 : return Err(ApiError::NotFound(
2239 0 : anyhow::anyhow!("Tenant not found").into(),
2240 0 : ))
2241 : }
2242 : }
2243 : };
2244 :
2245 : // Unlike the pageserver, we do not have a set of global defaults: the config is
2246 : // entirely per-tenant. Therefore the distinction between `tenant_specific_overrides`
2247 : // and `effective_config` in the response is meaningless, but we retain that syntax
2248 : // in order to remain compatible with the pageserver API.
2249 :
2250 0 : let response = HashMap::from([
2251 : (
2252 : "tenant_specific_overrides",
2253 0 : serde_json::to_value(&config)
2254 0 : .context("serializing tenant specific overrides")
2255 0 : .map_err(ApiError::InternalServerError)?,
2256 : ),
2257 : (
2258 0 : "effective_config",
2259 0 : serde_json::to_value(&config)
2260 0 : .context("serializing effective config")
2261 0 : .map_err(ApiError::InternalServerError)?,
2262 : ),
2263 : ]);
2264 :
2265 0 : Ok(response)
2266 0 : }
2267 :
2268 0 : pub(crate) async fn tenant_time_travel_remote_storage(
2269 0 : &self,
2270 0 : time_travel_req: &TenantTimeTravelRequest,
2271 0 : tenant_id: TenantId,
2272 0 : timestamp: Cow<'_, str>,
2273 0 : done_if_after: Cow<'_, str>,
2274 0 : ) -> Result<(), ApiError> {
2275 0 : let _tenant_lock = trace_exclusive_lock(
2276 0 : &self.tenant_op_locks,
2277 0 : tenant_id,
2278 0 : TenantOperations::TimeTravelRemoteStorage,
2279 0 : )
2280 0 : .await;
2281 :
2282 0 : let node = {
2283 0 : let locked = self.inner.read().unwrap();
2284 : // Just a sanity check to prevent misuse: the API expects that the tenant is fully
2285 : // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
2286 : // but only at the start of the process, so it's really just to prevent operator
2287 : // mistakes.
2288 0 : for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
2289 0 : if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
2290 : {
2291 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2292 0 : "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
2293 0 : )));
2294 0 : }
2295 0 : let maybe_attached = shard
2296 0 : .observed
2297 0 : .locations
2298 0 : .iter()
2299 0 : .filter_map(|(node_id, observed_location)| {
2300 0 : observed_location
2301 0 : .conf
2302 0 : .as_ref()
2303 0 : .map(|loc| (node_id, observed_location, loc.mode))
2304 0 : })
2305 0 : .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
2306 0 : if let Some((node_id, _observed_location, mode)) = maybe_attached {
2307 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}")));
2308 0 : }
2309 : }
2310 0 : let scheduler = &locked.scheduler;
2311 : // Right now we only perform the operation on a single node without parallelization
2312 : // TODO fan out the operation to multiple nodes for better performance
2313 0 : let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?;
2314 0 : let node = locked
2315 0 : .nodes
2316 0 : .get(&node_id)
2317 0 : .expect("Pageservers may not be deleted while lock is active");
2318 0 : node.clone()
2319 0 : };
2320 0 :
2321 0 : // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
2322 0 : let mut counts = time_travel_req
2323 0 : .shard_counts
2324 0 : .iter()
2325 0 : .copied()
2326 0 : .collect::<HashSet<_>>()
2327 0 : .into_iter()
2328 0 : .collect::<Vec<_>>();
2329 0 : counts.sort_unstable();
2330 :
2331 0 : for count in counts {
2332 0 : let shard_ids = (0..count.count())
2333 0 : .map(|i| TenantShardId {
2334 0 : tenant_id,
2335 0 : shard_number: ShardNumber(i),
2336 0 : shard_count: count,
2337 0 : })
2338 0 : .collect::<Vec<_>>();
2339 0 : for tenant_shard_id in shard_ids {
2340 0 : let client = PageserverClient::new(
2341 0 : node.get_id(),
2342 0 : node.base_url(),
2343 0 : self.config.jwt_token.as_deref(),
2344 0 : );
2345 0 :
2346 0 : tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
2347 :
2348 0 : client
2349 0 : .tenant_time_travel_remote_storage(
2350 0 : tenant_shard_id,
2351 0 : ×tamp,
2352 0 : &done_if_after,
2353 0 : )
2354 0 : .await
2355 0 : .map_err(|e| {
2356 0 : ApiError::InternalServerError(anyhow::anyhow!(
2357 0 : "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
2358 0 : node
2359 0 : ))
2360 0 : })?;
2361 : }
2362 : }
2363 0 : Ok(())
2364 0 : }
2365 :
2366 0 : pub(crate) async fn tenant_secondary_download(
2367 0 : &self,
2368 0 : tenant_id: TenantId,
2369 0 : wait: Option<Duration>,
2370 0 : ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
2371 0 : let _tenant_lock = trace_shared_lock(
2372 0 : &self.tenant_op_locks,
2373 0 : tenant_id,
2374 0 : TenantOperations::SecondaryDownload,
2375 0 : )
2376 0 : .await;
2377 :
2378 : // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
2379 0 : let targets = {
2380 0 : let locked = self.inner.read().unwrap();
2381 0 : let mut targets = Vec::new();
2382 :
2383 0 : for (tenant_shard_id, shard) in
2384 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2385 : {
2386 0 : for node_id in shard.intent.get_secondary() {
2387 0 : let node = locked
2388 0 : .nodes
2389 0 : .get(node_id)
2390 0 : .expect("Pageservers may not be deleted while referenced");
2391 0 :
2392 0 : targets.push((*tenant_shard_id, node.clone()));
2393 0 : }
2394 : }
2395 0 : targets
2396 0 : };
2397 0 :
2398 0 : // Issue concurrent requests to all shards' locations
2399 0 : let mut futs = FuturesUnordered::new();
2400 0 : for (tenant_shard_id, node) in targets {
2401 0 : let client = PageserverClient::new(
2402 0 : node.get_id(),
2403 0 : node.base_url(),
2404 0 : self.config.jwt_token.as_deref(),
2405 0 : );
2406 0 : futs.push(async move {
2407 0 : let result = client
2408 0 : .tenant_secondary_download(tenant_shard_id, wait)
2409 0 : .await;
2410 0 : (result, node, tenant_shard_id)
2411 0 : })
2412 : }
2413 :
2414 : // Handle any errors returned by pageservers. This includes cases like this request racing with
2415 : // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
2416 : // well as more general cases like 503s, 500s, or timeouts.
2417 0 : let mut aggregate_progress = SecondaryProgress::default();
2418 0 : let mut aggregate_status: Option<StatusCode> = None;
2419 0 : let mut error: Option<mgmt_api::Error> = None;
2420 0 : while let Some((result, node, tenant_shard_id)) = futs.next().await {
2421 0 : match result {
2422 0 : Err(e) => {
2423 0 : // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
2424 0 : // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
2425 0 : // than they had hoped for.
2426 0 : tracing::warn!("Secondary download error from pageserver {node}: {e}",);
2427 0 : error = Some(e)
2428 : }
2429 0 : Ok((status_code, progress)) => {
2430 0 : tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
2431 0 : aggregate_progress.layers_downloaded += progress.layers_downloaded;
2432 0 : aggregate_progress.layers_total += progress.layers_total;
2433 0 : aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
2434 0 : aggregate_progress.bytes_total += progress.bytes_total;
2435 0 : aggregate_progress.heatmap_mtime =
2436 0 : std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
2437 0 : aggregate_status = match aggregate_status {
2438 0 : None => Some(status_code),
2439 0 : Some(StatusCode::OK) => Some(status_code),
2440 0 : Some(cur) => {
2441 0 : // Other status codes (e.g. 202) -- do not overwrite.
2442 0 : Some(cur)
2443 : }
2444 : };
2445 : }
2446 : }
2447 : }
2448 :
2449 : // If any of the shards return 202, indicate our result as 202.
2450 0 : match aggregate_status {
2451 : None => {
2452 0 : match error {
2453 0 : Some(e) => {
2454 0 : // No successes, and an error: surface it
2455 0 : Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
2456 : }
2457 : None => {
2458 : // No shards found
2459 0 : Err(ApiError::NotFound(
2460 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
2461 0 : ))
2462 : }
2463 : }
2464 : }
2465 0 : Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
2466 : }
2467 0 : }
2468 :
2469 0 : pub(crate) async fn tenant_delete(&self, tenant_id: TenantId) -> Result<StatusCode, ApiError> {
2470 0 : let _tenant_lock =
2471 0 : trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
2472 :
2473 : // Detach all shards
2474 0 : let (detach_waiters, shard_ids, node) = {
2475 0 : let mut shard_ids = Vec::new();
2476 0 : let mut detach_waiters = Vec::new();
2477 0 : let mut locked = self.inner.write().unwrap();
2478 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2479 0 : for (tenant_shard_id, shard) in
2480 0 : tenants.range_mut(TenantShardId::tenant_range(tenant_id))
2481 : {
2482 0 : shard_ids.push(*tenant_shard_id);
2483 0 :
2484 0 : // Update the tenant's intent to remove all attachments
2485 0 : shard.policy = PlacementPolicy::Detached;
2486 0 : shard
2487 0 : .schedule(scheduler, &mut ScheduleContext::default())
2488 0 : .expect("De-scheduling is infallible");
2489 0 : debug_assert!(shard.intent.get_attached().is_none());
2490 0 : debug_assert!(shard.intent.get_secondary().is_empty());
2491 :
2492 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
2493 0 : detach_waiters.push(waiter);
2494 0 : }
2495 : }
2496 :
2497 : // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant
2498 : // was attached, just has to be able to see the S3 content)
2499 0 : let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?;
2500 0 : let node = nodes
2501 0 : .get(&node_id)
2502 0 : .expect("Pageservers may not be deleted while lock is active");
2503 0 : (detach_waiters, shard_ids, node.clone())
2504 0 : };
2505 0 :
2506 0 : // This reconcile wait can fail in a few ways:
2507 0 : // A there is a very long queue for the reconciler semaphore
2508 0 : // B some pageserver is failing to handle a detach promptly
2509 0 : // C some pageserver goes offline right at the moment we send it a request.
2510 0 : //
2511 0 : // A and C are transient: the semaphore will eventually become available, and once a node is marked offline
2512 0 : // the next attempt to reconcile will silently skip detaches for an offline node and succeed. If B happens,
2513 0 : // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while
2514 0 : // deleting the underlying data).
2515 0 : self.await_waiters(detach_waiters, RECONCILE_TIMEOUT)
2516 0 : .await?;
2517 :
2518 0 : let locations = shard_ids
2519 0 : .into_iter()
2520 0 : .map(|s| (s, node.clone()))
2521 0 : .collect::<Vec<_>>();
2522 0 : let results = self.tenant_for_shards_api(
2523 0 : locations,
2524 0 : |tenant_shard_id, client| async move { client.tenant_delete(tenant_shard_id).await },
2525 0 : 1,
2526 0 : 3,
2527 0 : RECONCILE_TIMEOUT,
2528 0 : &self.cancel,
2529 0 : )
2530 0 : .await;
2531 0 : for result in results {
2532 0 : match result {
2533 : Ok(StatusCode::ACCEPTED) => {
2534 : // This should never happen: we waited for detaches to finish above
2535 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2536 0 : "Unexpectedly still attached on {}",
2537 0 : node
2538 0 : )));
2539 : }
2540 0 : Ok(_) => {}
2541 : Err(mgmt_api::Error::Cancelled) => {
2542 0 : return Err(ApiError::ShuttingDown);
2543 : }
2544 0 : Err(e) => {
2545 0 : // This is unexpected: remote deletion should be infallible, unless the object store
2546 0 : // at large is unavailable.
2547 0 : tracing::error!("Error deleting via node {}: {e}", node);
2548 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
2549 : }
2550 : }
2551 : }
2552 :
2553 : // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
2554 : // our in-memory state and database state.
2555 :
2556 : // Ordering: we delete persistent state first: if we then
2557 : // crash, we will drop the in-memory state.
2558 :
2559 : // Drop persistent state.
2560 0 : self.persistence.delete_tenant(tenant_id).await?;
2561 :
2562 : // Drop in-memory state
2563 : {
2564 0 : let mut locked = self.inner.write().unwrap();
2565 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
2566 :
2567 : // Dereference Scheduler from shards before dropping them
2568 0 : for (_tenant_shard_id, shard) in
2569 0 : tenants.range_mut(TenantShardId::tenant_range(tenant_id))
2570 0 : {
2571 0 : shard.intent.clear(scheduler);
2572 0 : }
2573 :
2574 0 : tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
2575 0 : tracing::info!(
2576 0 : "Deleted tenant {tenant_id}, now have {} tenants",
2577 0 : locked.tenants.len()
2578 : );
2579 : };
2580 :
2581 : // Success is represented as 404, to imitate the existing pageserver deletion API
2582 0 : Ok(StatusCode::NOT_FOUND)
2583 0 : }
2584 :
2585 : /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
2586 : /// for a tenant. The TenantConfig is passed through to pageservers, whereas this function modifies
2587 : /// the tenant's policies (configuration) within the storage controller
2588 0 : pub(crate) async fn tenant_update_policy(
2589 0 : &self,
2590 0 : tenant_id: TenantId,
2591 0 : req: TenantPolicyRequest,
2592 0 : ) -> Result<(), ApiError> {
2593 : // We require an exclusive lock, because we are updating persistent and in-memory state
2594 0 : let _tenant_lock = trace_exclusive_lock(
2595 0 : &self.tenant_op_locks,
2596 0 : tenant_id,
2597 0 : TenantOperations::UpdatePolicy,
2598 0 : )
2599 0 : .await;
2600 :
2601 0 : failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
2602 :
2603 : let TenantPolicyRequest {
2604 0 : placement,
2605 0 : scheduling,
2606 0 : } = req;
2607 0 :
2608 0 : self.persistence
2609 0 : .update_tenant_shard(
2610 0 : TenantFilter::Tenant(tenant_id),
2611 0 : placement.clone(),
2612 0 : None,
2613 0 : None,
2614 0 : scheduling,
2615 0 : )
2616 0 : .await?;
2617 :
2618 0 : let mut schedule_context = ScheduleContext::default();
2619 0 : let mut locked = self.inner.write().unwrap();
2620 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2621 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2622 0 : if let Some(placement) = &placement {
2623 0 : shard.policy = placement.clone();
2624 0 :
2625 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2626 0 : "Updated placement policy to {placement:?}");
2627 0 : }
2628 :
2629 0 : if let Some(scheduling) = &scheduling {
2630 0 : shard.set_scheduling_policy(*scheduling);
2631 0 :
2632 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2633 0 : "Updated scheduling policy to {scheduling:?}");
2634 0 : }
2635 :
2636 : // In case scheduling is being switched back on, try it now.
2637 0 : shard.schedule(scheduler, &mut schedule_context).ok();
2638 0 : self.maybe_reconcile_shard(shard, nodes);
2639 : }
2640 :
2641 0 : Ok(())
2642 0 : }
2643 :
2644 0 : pub(crate) async fn tenant_timeline_create(
2645 0 : &self,
2646 0 : tenant_id: TenantId,
2647 0 : mut create_req: TimelineCreateRequest,
2648 0 : ) -> Result<TimelineInfo, ApiError> {
2649 0 : tracing::info!(
2650 0 : "Creating timeline {}/{}",
2651 : tenant_id,
2652 : create_req.new_timeline_id,
2653 : );
2654 :
2655 0 : let _tenant_lock = trace_shared_lock(
2656 0 : &self.tenant_op_locks,
2657 0 : tenant_id,
2658 0 : TenantOperations::TimelineCreate,
2659 0 : )
2660 0 : .await;
2661 0 : failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock");
2662 :
2663 0 : self.ensure_attached_wait(tenant_id).await?;
2664 :
2665 0 : let mut targets = {
2666 0 : let locked = self.inner.read().unwrap();
2667 0 : let mut targets = Vec::new();
2668 :
2669 0 : for (tenant_shard_id, shard) in
2670 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2671 0 : {
2672 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2673 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2674 0 : })?;
2675 0 : let node = locked
2676 0 : .nodes
2677 0 : .get(&node_id)
2678 0 : .expect("Pageservers may not be deleted while referenced");
2679 0 :
2680 0 : targets.push((*tenant_shard_id, node.clone()));
2681 : }
2682 0 : targets
2683 0 : };
2684 0 :
2685 0 : if targets.is_empty() {
2686 0 : return Err(ApiError::NotFound(
2687 0 : anyhow::anyhow!("Tenant not found").into(),
2688 0 : ));
2689 0 : };
2690 0 : let shard_zero = targets.remove(0);
2691 :
2692 0 : async fn create_one(
2693 0 : tenant_shard_id: TenantShardId,
2694 0 : node: Node,
2695 0 : jwt: Option<String>,
2696 0 : create_req: TimelineCreateRequest,
2697 0 : ) -> Result<TimelineInfo, ApiError> {
2698 0 : tracing::info!(
2699 0 : "Creating timeline on shard {}/{}, attached to node {node}",
2700 : tenant_shard_id,
2701 : create_req.new_timeline_id,
2702 : );
2703 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
2704 0 :
2705 0 : client
2706 0 : .timeline_create(tenant_shard_id, &create_req)
2707 0 : .await
2708 0 : .map_err(|e| passthrough_api_error(&node, e))
2709 0 : }
2710 :
2711 : // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
2712 : // use whatever LSN that shard picked when creating on subsequent shards. We arbitrarily use shard zero as the shard
2713 : // that will get the first creation request, and propagate the LSN to all the >0 shards.
2714 0 : let timeline_info = create_one(
2715 0 : shard_zero.0,
2716 0 : shard_zero.1,
2717 0 : self.config.jwt_token.clone(),
2718 0 : create_req.clone(),
2719 0 : )
2720 0 : .await?;
2721 :
2722 : // Propagate the LSN that shard zero picked, if caller didn't provide one
2723 0 : if create_req.ancestor_timeline_id.is_some() && create_req.ancestor_start_lsn.is_none() {
2724 0 : create_req.ancestor_start_lsn = timeline_info.ancestor_lsn;
2725 0 : }
2726 :
2727 : // Create timeline on remaining shards with number >0
2728 0 : if !targets.is_empty() {
2729 : // If we had multiple shards, issue requests for the remainder now.
2730 0 : let jwt = self.config.jwt_token.clone();
2731 0 : self.tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
2732 0 : let create_req = create_req.clone();
2733 0 : Box::pin(create_one(tenant_shard_id, node, jwt.clone(), create_req))
2734 0 : })
2735 0 : .await?;
2736 0 : }
2737 :
2738 0 : Ok(timeline_info)
2739 0 : }
2740 :
2741 : /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
2742 : ///
2743 : /// On success, the returned vector contains exactly the same number of elements as the input `locations`.
2744 0 : async fn tenant_for_shards<F, R>(
2745 0 : &self,
2746 0 : locations: Vec<(TenantShardId, Node)>,
2747 0 : mut req_fn: F,
2748 0 : ) -> Result<Vec<R>, ApiError>
2749 0 : where
2750 0 : F: FnMut(
2751 0 : TenantShardId,
2752 0 : Node,
2753 0 : )
2754 0 : -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
2755 0 : {
2756 0 : let mut futs = FuturesUnordered::new();
2757 0 : let mut results = Vec::with_capacity(locations.len());
2758 :
2759 0 : for (tenant_shard_id, node) in locations {
2760 0 : futs.push(req_fn(tenant_shard_id, node));
2761 0 : }
2762 :
2763 0 : while let Some(r) = futs.next().await {
2764 0 : results.push(r?);
2765 : }
2766 :
2767 0 : Ok(results)
2768 0 : }
2769 :
2770 : /// Concurrently invoke a pageserver API call on many shards at once
2771 0 : pub(crate) async fn tenant_for_shards_api<T, O, F>(
2772 0 : &self,
2773 0 : locations: Vec<(TenantShardId, Node)>,
2774 0 : op: O,
2775 0 : warn_threshold: u32,
2776 0 : max_retries: u32,
2777 0 : timeout: Duration,
2778 0 : cancel: &CancellationToken,
2779 0 : ) -> Vec<mgmt_api::Result<T>>
2780 0 : where
2781 0 : O: Fn(TenantShardId, PageserverClient) -> F + Copy,
2782 0 : F: std::future::Future<Output = mgmt_api::Result<T>>,
2783 0 : {
2784 0 : let mut futs = FuturesUnordered::new();
2785 0 : let mut results = Vec::with_capacity(locations.len());
2786 :
2787 0 : for (tenant_shard_id, node) in locations {
2788 0 : futs.push(async move {
2789 0 : node.with_client_retries(
2790 0 : |client| op(tenant_shard_id, client),
2791 0 : &self.config.jwt_token,
2792 0 : warn_threshold,
2793 0 : max_retries,
2794 0 : timeout,
2795 0 : cancel,
2796 0 : )
2797 0 : .await
2798 0 : });
2799 0 : }
2800 :
2801 0 : while let Some(r) = futs.next().await {
2802 0 : let r = r.unwrap_or(Err(mgmt_api::Error::Cancelled));
2803 0 : results.push(r);
2804 0 : }
2805 :
2806 0 : results
2807 0 : }
2808 :
2809 0 : pub(crate) async fn tenant_timeline_delete(
2810 0 : &self,
2811 0 : tenant_id: TenantId,
2812 0 : timeline_id: TimelineId,
2813 0 : ) -> Result<StatusCode, ApiError> {
2814 0 : tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
2815 0 : let _tenant_lock = trace_shared_lock(
2816 0 : &self.tenant_op_locks,
2817 0 : tenant_id,
2818 0 : TenantOperations::TimelineDelete,
2819 0 : )
2820 0 : .await;
2821 :
2822 0 : self.ensure_attached_wait(tenant_id).await?;
2823 :
2824 0 : let mut targets = {
2825 0 : let locked = self.inner.read().unwrap();
2826 0 : let mut targets = Vec::new();
2827 :
2828 0 : for (tenant_shard_id, shard) in
2829 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2830 0 : {
2831 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2832 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2833 0 : })?;
2834 0 : let node = locked
2835 0 : .nodes
2836 0 : .get(&node_id)
2837 0 : .expect("Pageservers may not be deleted while referenced");
2838 0 :
2839 0 : targets.push((*tenant_shard_id, node.clone()));
2840 : }
2841 0 : targets
2842 0 : };
2843 0 :
2844 0 : if targets.is_empty() {
2845 0 : return Err(ApiError::NotFound(
2846 0 : anyhow::anyhow!("Tenant not found").into(),
2847 0 : ));
2848 0 : }
2849 0 : let shard_zero = targets.remove(0);
2850 :
2851 0 : async fn delete_one(
2852 0 : tenant_shard_id: TenantShardId,
2853 0 : timeline_id: TimelineId,
2854 0 : node: Node,
2855 0 : jwt: Option<String>,
2856 0 : ) -> Result<StatusCode, ApiError> {
2857 0 : tracing::info!(
2858 0 : "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
2859 : );
2860 :
2861 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
2862 0 : client
2863 0 : .timeline_delete(tenant_shard_id, timeline_id)
2864 0 : .await
2865 0 : .map_err(|e| {
2866 0 : ApiError::InternalServerError(anyhow::anyhow!(
2867 0 : "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
2868 0 : ))
2869 0 : })
2870 0 : }
2871 :
2872 0 : let statuses = self
2873 0 : .tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
2874 0 : Box::pin(delete_one(
2875 0 : tenant_shard_id,
2876 0 : timeline_id,
2877 0 : node,
2878 0 : self.config.jwt_token.clone(),
2879 0 : ))
2880 0 : })
2881 0 : .await?;
2882 :
2883 : // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero
2884 0 : if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
2885 0 : return Ok(StatusCode::ACCEPTED);
2886 0 : }
2887 :
2888 : // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
2889 : // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
2890 0 : let shard_zero_status = delete_one(
2891 0 : shard_zero.0,
2892 0 : timeline_id,
2893 0 : shard_zero.1,
2894 0 : self.config.jwt_token.clone(),
2895 0 : )
2896 0 : .await?;
2897 :
2898 0 : Ok(shard_zero_status)
2899 0 : }
2900 :
2901 : /// When you need to send an HTTP request to the pageserver that holds shard0 of a tenant, this
2902 : /// function looks up and returns node. If the tenant isn't found, returns Err(ApiError::NotFound)
2903 0 : pub(crate) fn tenant_shard0_node(
2904 0 : &self,
2905 0 : tenant_id: TenantId,
2906 0 : ) -> Result<(Node, TenantShardId), ApiError> {
2907 0 : let locked = self.inner.read().unwrap();
2908 0 : let Some((tenant_shard_id, shard)) = locked
2909 0 : .tenants
2910 0 : .range(TenantShardId::tenant_range(tenant_id))
2911 0 : .next()
2912 : else {
2913 0 : return Err(ApiError::NotFound(
2914 0 : anyhow::anyhow!("Tenant {tenant_id} not found").into(),
2915 0 : ));
2916 : };
2917 :
2918 : // TODO: should use the ID last published to compute_hook, rather than the intent: the intent might
2919 : // point to somewhere we haven't attached yet.
2920 0 : let Some(node_id) = shard.intent.get_attached() else {
2921 0 : tracing::warn!(
2922 0 : tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
2923 0 : "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
2924 : shard.policy
2925 : );
2926 0 : return Err(ApiError::Conflict(
2927 0 : "Cannot call timeline API on non-attached tenant".to_string(),
2928 0 : ));
2929 : };
2930 :
2931 0 : let Some(node) = locked.nodes.get(node_id) else {
2932 : // This should never happen
2933 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2934 0 : "Shard refers to nonexistent node"
2935 0 : )));
2936 : };
2937 :
2938 0 : Ok((node.clone(), *tenant_shard_id))
2939 0 : }
2940 :
2941 0 : pub(crate) fn tenant_locate(
2942 0 : &self,
2943 0 : tenant_id: TenantId,
2944 0 : ) -> Result<TenantLocateResponse, ApiError> {
2945 0 : let locked = self.inner.read().unwrap();
2946 0 : tracing::info!("Locating shards for tenant {tenant_id}");
2947 :
2948 0 : let mut result = Vec::new();
2949 0 : let mut shard_params: Option<ShardParameters> = None;
2950 :
2951 0 : for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2952 : {
2953 0 : let node_id =
2954 0 : shard
2955 0 : .intent
2956 0 : .get_attached()
2957 0 : .ok_or(ApiError::BadRequest(anyhow::anyhow!(
2958 0 : "Cannot locate a tenant that is not attached"
2959 0 : )))?;
2960 :
2961 0 : let node = locked
2962 0 : .nodes
2963 0 : .get(&node_id)
2964 0 : .expect("Pageservers may not be deleted while referenced");
2965 0 :
2966 0 : result.push(node.shard_location(*tenant_shard_id));
2967 0 :
2968 0 : match &shard_params {
2969 0 : None => {
2970 0 : shard_params = Some(ShardParameters {
2971 0 : stripe_size: shard.shard.stripe_size,
2972 0 : count: shard.shard.count,
2973 0 : });
2974 0 : }
2975 0 : Some(params) => {
2976 0 : if params.stripe_size != shard.shard.stripe_size {
2977 : // This should never happen. We enforce at runtime because it's simpler than
2978 : // adding an extra per-tenant data structure to store the things that should be the same
2979 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2980 0 : "Inconsistent shard stripe size parameters!"
2981 0 : )));
2982 0 : }
2983 : }
2984 : }
2985 : }
2986 :
2987 0 : if result.is_empty() {
2988 0 : return Err(ApiError::NotFound(
2989 0 : anyhow::anyhow!("No shards for this tenant ID found").into(),
2990 0 : ));
2991 0 : }
2992 0 : let shard_params = shard_params.expect("result is non-empty, therefore this is set");
2993 0 : tracing::info!(
2994 0 : "Located tenant {} with params {:?} on shards {}",
2995 0 : tenant_id,
2996 0 : shard_params,
2997 0 : result
2998 0 : .iter()
2999 0 : .map(|s| format!("{:?}", s))
3000 0 : .collect::<Vec<_>>()
3001 0 : .join(",")
3002 : );
3003 :
3004 0 : Ok(TenantLocateResponse {
3005 0 : shards: result,
3006 0 : shard_params,
3007 0 : })
3008 0 : }
3009 :
3010 : /// Returns None if the input iterator of shards does not include a shard with number=0
3011 0 : fn tenant_describe_impl<'a>(
3012 0 : &self,
3013 0 : shards: impl Iterator<Item = &'a TenantShard>,
3014 0 : ) -> Option<TenantDescribeResponse> {
3015 0 : let mut shard_zero = None;
3016 0 : let mut describe_shards = Vec::new();
3017 :
3018 0 : for shard in shards {
3019 0 : if shard.tenant_shard_id.is_shard_zero() {
3020 0 : shard_zero = Some(shard);
3021 0 : }
3022 :
3023 0 : describe_shards.push(TenantDescribeResponseShard {
3024 0 : tenant_shard_id: shard.tenant_shard_id,
3025 0 : node_attached: *shard.intent.get_attached(),
3026 0 : node_secondary: shard.intent.get_secondary().to_vec(),
3027 0 : last_error: shard
3028 0 : .last_error
3029 0 : .lock()
3030 0 : .unwrap()
3031 0 : .as_ref()
3032 0 : .map(|e| format!("{e}"))
3033 0 : .unwrap_or("".to_string())
3034 0 : .clone(),
3035 0 : is_reconciling: shard.reconciler.is_some(),
3036 0 : is_pending_compute_notification: shard.pending_compute_notification,
3037 0 : is_splitting: matches!(shard.splitting, SplitState::Splitting),
3038 0 : scheduling_policy: *shard.get_scheduling_policy(),
3039 : })
3040 : }
3041 :
3042 0 : let shard_zero = shard_zero?;
3043 :
3044 0 : Some(TenantDescribeResponse {
3045 0 : tenant_id: shard_zero.tenant_shard_id.tenant_id,
3046 0 : shards: describe_shards,
3047 0 : stripe_size: shard_zero.shard.stripe_size,
3048 0 : policy: shard_zero.policy.clone(),
3049 0 : config: shard_zero.config.clone(),
3050 0 : })
3051 0 : }
3052 :
3053 0 : pub(crate) fn tenant_describe(
3054 0 : &self,
3055 0 : tenant_id: TenantId,
3056 0 : ) -> Result<TenantDescribeResponse, ApiError> {
3057 0 : let locked = self.inner.read().unwrap();
3058 0 :
3059 0 : self.tenant_describe_impl(
3060 0 : locked
3061 0 : .tenants
3062 0 : .range(TenantShardId::tenant_range(tenant_id))
3063 0 : .map(|(_k, v)| v),
3064 0 : )
3065 0 : .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
3066 0 : }
3067 :
3068 0 : pub(crate) fn tenant_list(&self) -> Vec<TenantDescribeResponse> {
3069 0 : let locked = self.inner.read().unwrap();
3070 0 :
3071 0 : let mut result = Vec::new();
3072 0 : for (_tenant_id, tenant_shards) in
3073 0 : &locked.tenants.iter().group_by(|(id, _shard)| id.tenant_id)
3074 0 : {
3075 0 : result.push(
3076 0 : self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
3077 0 : .expect("Groups are always non-empty"),
3078 0 : );
3079 0 : }
3080 :
3081 0 : result
3082 0 : }
3083 :
3084 0 : #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
3085 : async fn abort_tenant_shard_split(
3086 : &self,
3087 : op: &TenantShardSplitAbort,
3088 : ) -> Result<(), TenantShardSplitAbortError> {
3089 : // Cleaning up a split:
3090 : // - Parent shards are not destroyed during a split, just detached.
3091 : // - Failed pageserver split API calls can leave the remote node with just the parent attached,
3092 : // just the children attached, or both.
3093 : //
3094 : // Therefore our work to do is to:
3095 : // 1. Clean up storage controller's internal state to just refer to parents, no children
3096 : // 2. Call out to pageservers to ensure that children are detached
3097 : // 3. Call out to pageservers to ensure that parents are attached.
3098 : //
3099 : // Crash safety:
3100 : // - If the storage controller stops running during this cleanup *after* clearing the splitting state
3101 : // from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
3102 : // and detach them.
3103 : // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
3104 : // from our database, then we will re-enter this cleanup routine on startup.
3105 :
3106 : let TenantShardSplitAbort {
3107 : tenant_id,
3108 : new_shard_count,
3109 : new_stripe_size,
3110 : ..
3111 : } = op;
3112 :
3113 : // First abort persistent state, if any exists.
3114 : match self
3115 : .persistence
3116 : .abort_shard_split(*tenant_id, *new_shard_count)
3117 : .await?
3118 : {
3119 : AbortShardSplitStatus::Aborted => {
3120 : // Proceed to roll back any child shards created on pageservers
3121 : }
3122 : AbortShardSplitStatus::Complete => {
3123 : // The split completed (we might hit that path if e.g. our database transaction
3124 : // to write the completion landed in the database, but we dropped connection
3125 : // before seeing the result).
3126 : //
3127 : // We must update in-memory state to reflect the successful split.
3128 : self.tenant_shard_split_commit_inmem(
3129 : *tenant_id,
3130 : *new_shard_count,
3131 : *new_stripe_size,
3132 : );
3133 : return Ok(());
3134 : }
3135 : }
3136 :
3137 : // Clean up in-memory state, and accumulate the list of child locations that need detaching
3138 : let detach_locations: Vec<(Node, TenantShardId)> = {
3139 : let mut detach_locations = Vec::new();
3140 : let mut locked = self.inner.write().unwrap();
3141 : let (nodes, tenants, scheduler) = locked.parts_mut();
3142 :
3143 : for (tenant_shard_id, shard) in
3144 : tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
3145 : {
3146 : if shard.shard.count == op.new_shard_count {
3147 : // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
3148 : // is infallible, so if we got an error we shouldn't have got that far.
3149 : tracing::warn!(
3150 : "During split abort, child shard {tenant_shard_id} found in-memory"
3151 : );
3152 : continue;
3153 : }
3154 :
3155 : // Add the children of this shard to this list of things to detach
3156 : if let Some(node_id) = shard.intent.get_attached() {
3157 : for child_id in tenant_shard_id.split(*new_shard_count) {
3158 : detach_locations.push((
3159 : nodes
3160 : .get(node_id)
3161 : .expect("Intent references nonexistent node")
3162 : .clone(),
3163 : child_id,
3164 : ));
3165 : }
3166 : } else {
3167 : tracing::warn!(
3168 : "During split abort, shard {tenant_shard_id} has no attached location"
3169 : );
3170 : }
3171 :
3172 : tracing::info!("Restoring parent shard {tenant_shard_id}");
3173 : shard.splitting = SplitState::Idle;
3174 : if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
3175 : // If this shard can't be scheduled now (perhaps due to offline nodes or
3176 : // capacity issues), that must not prevent us rolling back a split. In this
3177 : // case it should be eventually scheduled in the background.
3178 : tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
3179 : }
3180 :
3181 : self.maybe_reconcile_shard(shard, nodes);
3182 : }
3183 :
3184 : // We don't expect any new_shard_count shards to exist here, but drop them just in case
3185 0 : tenants.retain(|_id, s| s.shard.count != *new_shard_count);
3186 :
3187 : detach_locations
3188 : };
3189 :
3190 : for (node, child_id) in detach_locations {
3191 : if !node.is_available() {
3192 : // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
3193 : // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
3194 : // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
3195 : // them from the node.
3196 : tracing::warn!("Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated.");
3197 : continue;
3198 : }
3199 :
3200 : // Detach the remote child. If the pageserver split API call is still in progress, this call will get
3201 : // a 503 and retry, up to our limit.
3202 : tracing::info!("Detaching {child_id} on {node}...");
3203 : match node
3204 : .with_client_retries(
3205 0 : |client| async move {
3206 0 : let config = LocationConfig {
3207 0 : mode: LocationConfigMode::Detached,
3208 0 : generation: None,
3209 0 : secondary_conf: None,
3210 0 : shard_number: child_id.shard_number.0,
3211 0 : shard_count: child_id.shard_count.literal(),
3212 0 : // Stripe size and tenant config don't matter when detaching
3213 0 : shard_stripe_size: 0,
3214 0 : tenant_conf: TenantConfig::default(),
3215 0 : };
3216 0 :
3217 0 : client.location_config(child_id, config, None, false).await
3218 0 : },
3219 : &self.config.jwt_token,
3220 : 1,
3221 : 10,
3222 : Duration::from_secs(5),
3223 : &self.cancel,
3224 : )
3225 : .await
3226 : {
3227 : Some(Ok(_)) => {}
3228 : Some(Err(e)) => {
3229 : // We failed to communicate with the remote node. This is problematic: we may be
3230 : // leaving it with a rogue child shard.
3231 : tracing::warn!(
3232 : "Failed to detach child {child_id} from node {node} during abort"
3233 : );
3234 : return Err(e.into());
3235 : }
3236 : None => {
3237 : // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
3238 : // clean up on restart. The node going offline requires a retry.
3239 : return Err(TenantShardSplitAbortError::Unavailable);
3240 : }
3241 : };
3242 : }
3243 :
3244 : tracing::info!("Successfully aborted split");
3245 : Ok(())
3246 : }
3247 :
3248 : /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
3249 : /// of the tenant map to reflect the child shards that exist after the split.
3250 0 : fn tenant_shard_split_commit_inmem(
3251 0 : &self,
3252 0 : tenant_id: TenantId,
3253 0 : new_shard_count: ShardCount,
3254 0 : new_stripe_size: Option<ShardStripeSize>,
3255 0 : ) -> (
3256 0 : TenantShardSplitResponse,
3257 0 : Vec<(TenantShardId, NodeId, ShardStripeSize)>,
3258 0 : Vec<ReconcilerWaiter>,
3259 0 : ) {
3260 0 : let mut response = TenantShardSplitResponse {
3261 0 : new_shards: Vec::new(),
3262 0 : };
3263 0 : let mut child_locations = Vec::new();
3264 0 : let mut waiters = Vec::new();
3265 0 :
3266 0 : {
3267 0 : let mut locked = self.inner.write().unwrap();
3268 0 :
3269 0 : let parent_ids = locked
3270 0 : .tenants
3271 0 : .range(TenantShardId::tenant_range(tenant_id))
3272 0 : .map(|(shard_id, _)| *shard_id)
3273 0 : .collect::<Vec<_>>();
3274 0 :
3275 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3276 0 : for parent_id in parent_ids {
3277 0 : let child_ids = parent_id.split(new_shard_count);
3278 :
3279 0 : let (pageserver, generation, policy, parent_ident, config) = {
3280 0 : let mut old_state = tenants
3281 0 : .remove(&parent_id)
3282 0 : .expect("It was present, we just split it");
3283 0 :
3284 0 : // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
3285 0 : // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
3286 0 : // nothing else can clear this.
3287 0 : assert!(matches!(old_state.splitting, SplitState::Splitting));
3288 :
3289 0 : let old_attached = old_state.intent.get_attached().unwrap();
3290 0 : old_state.intent.clear(scheduler);
3291 0 : let generation = old_state.generation.expect("Shard must have been attached");
3292 0 : (
3293 0 : old_attached,
3294 0 : generation,
3295 0 : old_state.policy,
3296 0 : old_state.shard,
3297 0 : old_state.config,
3298 0 : )
3299 0 : };
3300 0 :
3301 0 : let mut schedule_context = ScheduleContext::default();
3302 0 : for child in child_ids {
3303 0 : let mut child_shard = parent_ident;
3304 0 : child_shard.number = child.shard_number;
3305 0 : child_shard.count = child.shard_count;
3306 0 : if let Some(stripe_size) = new_stripe_size {
3307 0 : child_shard.stripe_size = stripe_size;
3308 0 : }
3309 :
3310 0 : let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
3311 0 : child_observed.insert(
3312 0 : pageserver,
3313 0 : ObservedStateLocation {
3314 0 : conf: Some(attached_location_conf(
3315 0 : generation,
3316 0 : &child_shard,
3317 0 : &config,
3318 0 : matches!(policy, PlacementPolicy::Attached(n) if n > 0),
3319 : )),
3320 : },
3321 : );
3322 :
3323 0 : let mut child_state = TenantShard::new(child, child_shard, policy.clone());
3324 0 : child_state.intent = IntentState::single(scheduler, Some(pageserver));
3325 0 : child_state.observed = ObservedState {
3326 0 : locations: child_observed,
3327 0 : };
3328 0 : child_state.generation = Some(generation);
3329 0 : child_state.config = config.clone();
3330 0 :
3331 0 : // The child's TenantShard::splitting is intentionally left at the default value of Idle,
3332 0 : // as at this point in the split process we have succeeded and this part is infallible:
3333 0 : // we will never need to do any special recovery from this state.
3334 0 :
3335 0 : child_locations.push((child, pageserver, child_shard.stripe_size));
3336 :
3337 0 : if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
3338 : // This is not fatal, because we've implicitly already got an attached
3339 : // location for the child shard. Failure here just means we couldn't
3340 : // find a secondary (e.g. because cluster is overloaded).
3341 0 : tracing::warn!("Failed to schedule child shard {child}: {e}");
3342 0 : }
3343 : // In the background, attach secondary locations for the new shards
3344 0 : if let Some(waiter) = self.maybe_reconcile_shard(&mut child_state, nodes) {
3345 0 : waiters.push(waiter);
3346 0 : }
3347 :
3348 0 : tenants.insert(child, child_state);
3349 0 : response.new_shards.push(child);
3350 : }
3351 : }
3352 0 : (response, child_locations, waiters)
3353 0 : }
3354 0 : }
3355 :
3356 0 : async fn tenant_shard_split_start_secondaries(
3357 0 : &self,
3358 0 : tenant_id: TenantId,
3359 0 : waiters: Vec<ReconcilerWaiter>,
3360 0 : ) {
3361 : // Wait for initial reconcile of child shards, this creates the secondary locations
3362 0 : if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
3363 : // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
3364 : // their secondaries couldn't be attached.
3365 0 : tracing::warn!("Failed to reconcile after split: {e}");
3366 0 : return;
3367 0 : }
3368 :
3369 : // Take the state lock to discover the attached & secondary intents for all shards
3370 0 : let (attached, secondary) = {
3371 0 : let locked = self.inner.read().unwrap();
3372 0 : let mut attached = Vec::new();
3373 0 : let mut secondary = Vec::new();
3374 :
3375 0 : for (tenant_shard_id, shard) in
3376 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3377 : {
3378 0 : let Some(node_id) = shard.intent.get_attached() else {
3379 : // Unexpected. Race with a PlacementPolicy change?
3380 0 : tracing::warn!(
3381 0 : "No attached node on {tenant_shard_id} immediately after shard split!"
3382 : );
3383 0 : continue;
3384 : };
3385 :
3386 0 : let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
3387 : // No secondary location. Nothing for us to do.
3388 0 : continue;
3389 : };
3390 :
3391 0 : let attached_node = locked
3392 0 : .nodes
3393 0 : .get(node_id)
3394 0 : .expect("Pageservers may not be deleted while referenced");
3395 0 :
3396 0 : let secondary_node = locked
3397 0 : .nodes
3398 0 : .get(secondary_node_id)
3399 0 : .expect("Pageservers may not be deleted while referenced");
3400 0 :
3401 0 : attached.push((*tenant_shard_id, attached_node.clone()));
3402 0 : secondary.push((*tenant_shard_id, secondary_node.clone()));
3403 : }
3404 0 : (attached, secondary)
3405 0 : };
3406 0 :
3407 0 : if secondary.is_empty() {
3408 : // No secondary locations; nothing for us to do
3409 0 : return;
3410 0 : }
3411 :
3412 0 : for result in self
3413 0 : .tenant_for_shards_api(
3414 0 : attached,
3415 0 : |tenant_shard_id, client| async move {
3416 0 : client.tenant_heatmap_upload(tenant_shard_id).await
3417 0 : },
3418 0 : 1,
3419 0 : 1,
3420 0 : SHORT_RECONCILE_TIMEOUT,
3421 0 : &self.cancel,
3422 0 : )
3423 0 : .await
3424 : {
3425 0 : if let Err(e) = result {
3426 0 : tracing::warn!("Error calling heatmap upload after shard split: {e}");
3427 0 : return;
3428 0 : }
3429 : }
3430 :
3431 0 : for result in self
3432 0 : .tenant_for_shards_api(
3433 0 : secondary,
3434 0 : |tenant_shard_id, client| async move {
3435 0 : client
3436 0 : .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
3437 0 : .await
3438 0 : },
3439 0 : 1,
3440 0 : 1,
3441 0 : SHORT_RECONCILE_TIMEOUT,
3442 0 : &self.cancel,
3443 0 : )
3444 0 : .await
3445 : {
3446 0 : if let Err(e) = result {
3447 0 : tracing::warn!("Error calling secondary download after shard split: {e}");
3448 0 : return;
3449 0 : }
3450 : }
3451 0 : }
3452 :
3453 0 : pub(crate) async fn tenant_shard_split(
3454 0 : &self,
3455 0 : tenant_id: TenantId,
3456 0 : split_req: TenantShardSplitRequest,
3457 0 : ) -> Result<TenantShardSplitResponse, ApiError> {
3458 : // TODO: return 503 if we get stuck waiting for this lock
3459 : // (issue https://github.com/neondatabase/neon/issues/7108)
3460 0 : let _tenant_lock = trace_exclusive_lock(
3461 0 : &self.tenant_op_locks,
3462 0 : tenant_id,
3463 0 : TenantOperations::ShardSplit,
3464 0 : )
3465 0 : .await;
3466 :
3467 0 : let new_shard_count = ShardCount::new(split_req.new_shard_count);
3468 0 : let new_stripe_size = split_req.new_stripe_size;
3469 :
3470 : // Validate the request and construct parameters. This phase is fallible, but does not require
3471 : // rollback on errors, as it does no I/O and mutates no state.
3472 0 : let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
3473 0 : ShardSplitAction::NoOp(resp) => return Ok(resp),
3474 0 : ShardSplitAction::Split(params) => params,
3475 : };
3476 :
3477 : // Execute this split: this phase mutates state and does remote I/O on pageservers. If it fails,
3478 : // we must roll back.
3479 0 : let r = self
3480 0 : .do_tenant_shard_split(tenant_id, shard_split_params)
3481 0 : .await;
3482 :
3483 0 : let (response, waiters) = match r {
3484 0 : Ok(r) => r,
3485 0 : Err(e) => {
3486 0 : // Split might be part-done, we must do work to abort it.
3487 0 : tracing::warn!("Enqueuing background abort of split on {tenant_id}");
3488 0 : self.abort_tx
3489 0 : .send(TenantShardSplitAbort {
3490 0 : tenant_id,
3491 0 : new_shard_count,
3492 0 : new_stripe_size,
3493 0 : _tenant_lock,
3494 0 : })
3495 0 : // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
3496 0 : .ok();
3497 0 : return Err(e);
3498 : }
3499 : };
3500 :
3501 : // The split is now complete. As an optimization, we will trigger all the child shards to upload
3502 : // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
3503 : // for the background heatmap/download interval before secondaries get warm enough to migrate shards
3504 : // in [`Self::optimize_all`]
3505 0 : self.tenant_shard_split_start_secondaries(tenant_id, waiters)
3506 0 : .await;
3507 0 : Ok(response)
3508 0 : }
3509 :
3510 0 : fn prepare_tenant_shard_split(
3511 0 : &self,
3512 0 : tenant_id: TenantId,
3513 0 : split_req: TenantShardSplitRequest,
3514 0 : ) -> Result<ShardSplitAction, ApiError> {
3515 0 : fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
3516 0 : anyhow::anyhow!("failpoint")
3517 0 : )));
3518 :
3519 0 : let mut policy = None;
3520 0 : let mut config = None;
3521 0 : let mut shard_ident = None;
3522 : // Validate input, and calculate which shards we will create
3523 0 : let (old_shard_count, targets) =
3524 : {
3525 0 : let locked = self.inner.read().unwrap();
3526 0 :
3527 0 : let pageservers = locked.nodes.clone();
3528 0 :
3529 0 : let mut targets = Vec::new();
3530 0 :
3531 0 : // In case this is a retry, count how many already-split shards we found
3532 0 : let mut children_found = Vec::new();
3533 0 : let mut old_shard_count = None;
3534 :
3535 0 : for (tenant_shard_id, shard) in
3536 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3537 : {
3538 0 : match shard.shard.count.count().cmp(&split_req.new_shard_count) {
3539 : Ordering::Equal => {
3540 : // Already split this
3541 0 : children_found.push(*tenant_shard_id);
3542 0 : continue;
3543 : }
3544 : Ordering::Greater => {
3545 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3546 0 : "Requested count {} but already have shards at count {}",
3547 0 : split_req.new_shard_count,
3548 0 : shard.shard.count.count()
3549 0 : )));
3550 : }
3551 0 : Ordering::Less => {
3552 0 : // Fall through: this shard has lower count than requested,
3553 0 : // is a candidate for splitting.
3554 0 : }
3555 0 : }
3556 0 :
3557 0 : match old_shard_count {
3558 0 : None => old_shard_count = Some(shard.shard.count),
3559 0 : Some(old_shard_count) => {
3560 0 : if old_shard_count != shard.shard.count {
3561 : // We may hit this case if a caller asked for two splits to
3562 : // different sizes, before the first one is complete.
3563 : // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
3564 : // of shard_count=1 and shard_count=2 shards in the map.
3565 0 : return Err(ApiError::Conflict(
3566 0 : "Cannot split, currently mid-split".to_string(),
3567 0 : ));
3568 0 : }
3569 : }
3570 : }
3571 0 : if policy.is_none() {
3572 0 : policy = Some(shard.policy.clone());
3573 0 : }
3574 0 : if shard_ident.is_none() {
3575 0 : shard_ident = Some(shard.shard);
3576 0 : }
3577 0 : if config.is_none() {
3578 0 : config = Some(shard.config.clone());
3579 0 : }
3580 :
3581 0 : if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
3582 0 : tracing::info!(
3583 0 : "Tenant shard {} already has shard count {}",
3584 : tenant_shard_id,
3585 : split_req.new_shard_count
3586 : );
3587 0 : continue;
3588 0 : }
3589 :
3590 0 : let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
3591 0 : anyhow::anyhow!("Cannot split a tenant that is not attached"),
3592 0 : ))?;
3593 :
3594 0 : let node = pageservers
3595 0 : .get(&node_id)
3596 0 : .expect("Pageservers may not be deleted while referenced");
3597 0 :
3598 0 : targets.push(ShardSplitTarget {
3599 0 : parent_id: *tenant_shard_id,
3600 0 : node: node.clone(),
3601 0 : child_ids: tenant_shard_id
3602 0 : .split(ShardCount::new(split_req.new_shard_count)),
3603 0 : });
3604 : }
3605 :
3606 0 : if targets.is_empty() {
3607 0 : if children_found.len() == split_req.new_shard_count as usize {
3608 0 : return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
3609 0 : new_shards: children_found,
3610 0 : }));
3611 : } else {
3612 : // No shards found to split, and no existing children found: the
3613 : // tenant doesn't exist at all.
3614 0 : return Err(ApiError::NotFound(
3615 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
3616 0 : ));
3617 : }
3618 0 : }
3619 0 :
3620 0 : (old_shard_count, targets)
3621 0 : };
3622 0 :
3623 0 : // unwrap safety: we would have returned above if we didn't find at least one shard to split
3624 0 : let old_shard_count = old_shard_count.unwrap();
3625 0 : let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
3626 : // This ShardIdentity will be used as the template for all children, so this implicitly
3627 : // applies the new stripe size to the children.
3628 0 : let mut shard_ident = shard_ident.unwrap();
3629 0 : if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
3630 0 : return Err(ApiError::BadRequest(anyhow::anyhow!("Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards", shard_ident.stripe_size)));
3631 0 : }
3632 0 :
3633 0 : shard_ident.stripe_size = new_stripe_size;
3634 0 : tracing::info!("applied stripe size {}", shard_ident.stripe_size.0);
3635 0 : shard_ident
3636 : } else {
3637 0 : shard_ident.unwrap()
3638 : };
3639 0 : let policy = policy.unwrap();
3640 0 : let config = config.unwrap();
3641 0 :
3642 0 : Ok(ShardSplitAction::Split(ShardSplitParams {
3643 0 : old_shard_count,
3644 0 : new_shard_count: ShardCount::new(split_req.new_shard_count),
3645 0 : new_stripe_size: split_req.new_stripe_size,
3646 0 : targets,
3647 0 : policy,
3648 0 : config,
3649 0 : shard_ident,
3650 0 : }))
3651 0 : }
3652 :
3653 0 : async fn do_tenant_shard_split(
3654 0 : &self,
3655 0 : tenant_id: TenantId,
3656 0 : params: ShardSplitParams,
3657 0 : ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
3658 0 : // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
3659 0 : // request could occur here, deleting or mutating the tenant. begin_shard_split checks that the
3660 0 : // parent shards exist as expected, but it would be neater to do the above pre-checks within the
3661 0 : // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
3662 0 : // (https://github.com/neondatabase/neon/issues/6676)
3663 0 :
3664 0 : let ShardSplitParams {
3665 0 : old_shard_count,
3666 0 : new_shard_count,
3667 0 : new_stripe_size,
3668 0 : mut targets,
3669 0 : policy,
3670 0 : config,
3671 0 : shard_ident,
3672 0 : } = params;
3673 :
3674 : // Drop any secondary locations: pageservers do not support splitting these, and in any case the
3675 : // end-state for a split tenant will usually be to have secondary locations on different nodes.
3676 : // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
3677 : // at the time of split.
3678 0 : let waiters = {
3679 0 : let mut locked = self.inner.write().unwrap();
3680 0 : let mut waiters = Vec::new();
3681 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3682 0 : for target in &mut targets {
3683 0 : let Some(shard) = tenants.get_mut(&target.parent_id) else {
3684 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
3685 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3686 0 : "Shard {} not found",
3687 0 : target.parent_id
3688 0 : )));
3689 : };
3690 :
3691 0 : if shard.intent.get_attached() != &Some(target.node.get_id()) {
3692 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
3693 0 : return Err(ApiError::Conflict(format!(
3694 0 : "Shard {} unexpectedly rescheduled during split",
3695 0 : target.parent_id
3696 0 : )));
3697 0 : }
3698 0 :
3699 0 : // Irrespective of PlacementPolicy, clear secondary locations from intent
3700 0 : shard.intent.clear_secondary(scheduler);
3701 :
3702 : // Run Reconciler to execute detach fo secondary locations.
3703 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
3704 0 : waiters.push(waiter);
3705 0 : }
3706 : }
3707 0 : waiters
3708 0 : };
3709 0 : self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
3710 :
3711 : // Before creating any new child shards in memory or on the pageservers, persist them: this
3712 : // enables us to ensure that we will always be able to clean up if something goes wrong. This also
3713 : // acts as the protection against two concurrent attempts to split: one of them will get a database
3714 : // error trying to insert the child shards.
3715 0 : let mut child_tsps = Vec::new();
3716 0 : for target in &targets {
3717 0 : let mut this_child_tsps = Vec::new();
3718 0 : for child in &target.child_ids {
3719 0 : let mut child_shard = shard_ident;
3720 0 : child_shard.number = child.shard_number;
3721 0 : child_shard.count = child.shard_count;
3722 0 :
3723 0 : tracing::info!(
3724 0 : "Create child shard persistence with stripe size {}",
3725 : shard_ident.stripe_size.0
3726 : );
3727 :
3728 0 : this_child_tsps.push(TenantShardPersistence {
3729 0 : tenant_id: child.tenant_id.to_string(),
3730 0 : shard_number: child.shard_number.0 as i32,
3731 0 : shard_count: child.shard_count.literal() as i32,
3732 0 : shard_stripe_size: shard_ident.stripe_size.0 as i32,
3733 0 : // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
3734 0 : // populate the correct generation as part of its transaction, to protect us
3735 0 : // against racing with changes in the state of the parent.
3736 0 : generation: None,
3737 0 : generation_pageserver: Some(target.node.get_id().0 as i64),
3738 0 : placement_policy: serde_json::to_string(&policy).unwrap(),
3739 0 : config: serde_json::to_string(&config).unwrap(),
3740 0 : splitting: SplitState::Splitting,
3741 0 :
3742 0 : // Scheduling policies do not carry through to children
3743 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
3744 0 : .unwrap(),
3745 0 : });
3746 : }
3747 :
3748 0 : child_tsps.push((target.parent_id, this_child_tsps));
3749 : }
3750 :
3751 0 : if let Err(e) = self
3752 0 : .persistence
3753 0 : .begin_shard_split(old_shard_count, tenant_id, child_tsps)
3754 0 : .await
3755 : {
3756 0 : match e {
3757 : DatabaseError::Query(diesel::result::Error::DatabaseError(
3758 : DatabaseErrorKind::UniqueViolation,
3759 : _,
3760 : )) => {
3761 : // Inserting a child shard violated a unique constraint: we raced with another call to
3762 : // this function
3763 0 : tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
3764 0 : return Err(ApiError::Conflict("Tenant is already splitting".into()));
3765 : }
3766 0 : _ => return Err(ApiError::InternalServerError(e.into())),
3767 : }
3768 0 : }
3769 0 : fail::fail_point!("shard-split-post-begin", |_| Err(
3770 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
3771 0 : ));
3772 :
3773 : // Now that I have persisted the splitting state, apply it in-memory. This is infallible, so
3774 : // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
3775 : // is not set in memory, then it was not persisted.
3776 : {
3777 0 : let mut locked = self.inner.write().unwrap();
3778 0 : for target in &targets {
3779 0 : if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
3780 0 : parent_shard.splitting = SplitState::Splitting;
3781 0 : // Put the observed state to None, to reflect that it is indeterminate once we start the
3782 0 : // split operation.
3783 0 : parent_shard
3784 0 : .observed
3785 0 : .locations
3786 0 : .insert(target.node.get_id(), ObservedStateLocation { conf: None });
3787 0 : }
3788 : }
3789 : }
3790 :
3791 : // TODO: issue split calls concurrently (this only matters once we're splitting
3792 : // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
3793 :
3794 0 : for target in &targets {
3795 : let ShardSplitTarget {
3796 0 : parent_id,
3797 0 : node,
3798 0 : child_ids,
3799 0 : } = target;
3800 0 : let client = PageserverClient::new(
3801 0 : node.get_id(),
3802 0 : node.base_url(),
3803 0 : self.config.jwt_token.as_deref(),
3804 0 : );
3805 0 : let response = client
3806 0 : .tenant_shard_split(
3807 0 : *parent_id,
3808 0 : TenantShardSplitRequest {
3809 0 : new_shard_count: new_shard_count.literal(),
3810 0 : new_stripe_size,
3811 0 : },
3812 0 : )
3813 0 : .await
3814 0 : .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?;
3815 :
3816 0 : fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
3817 0 : "failpoint".to_string()
3818 0 : )));
3819 :
3820 0 : tracing::info!(
3821 0 : "Split {} into {}",
3822 0 : parent_id,
3823 0 : response
3824 0 : .new_shards
3825 0 : .iter()
3826 0 : .map(|s| format!("{:?}", s))
3827 0 : .collect::<Vec<_>>()
3828 0 : .join(",")
3829 : );
3830 :
3831 0 : if &response.new_shards != child_ids {
3832 : // This should never happen: the pageserver should agree with us on how shard splits work.
3833 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3834 0 : "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
3835 0 : parent_id,
3836 0 : response.new_shards,
3837 0 : child_ids
3838 0 : )));
3839 0 : }
3840 : }
3841 :
3842 : // TODO: if the pageserver restarted concurrently with our split API call,
3843 : // the actual generation of the child shard might differ from the generation
3844 : // we expect it to have. In order for our in-database generation to end up
3845 : // correct, we should carry the child generation back in the response and apply it here
3846 : // in complete_shard_split (and apply the correct generation in memory)
3847 : // (or, we can carry generation in the request and reject the request if
3848 : // it doesn't match, but that requires more retry logic on this side)
3849 :
3850 0 : self.persistence
3851 0 : .complete_shard_split(tenant_id, old_shard_count)
3852 0 : .await?;
3853 :
3854 0 : fail::fail_point!("shard-split-post-complete", |_| Err(
3855 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
3856 0 : ));
3857 :
3858 : // Replace all the shards we just split with their children: this phase is infallible.
3859 0 : let (response, child_locations, waiters) =
3860 0 : self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
3861 0 :
3862 0 : // Send compute notifications for all the new shards
3863 0 : let mut failed_notifications = Vec::new();
3864 0 : for (child_id, child_ps, stripe_size) in child_locations {
3865 0 : if let Err(e) = self
3866 0 : .compute_hook
3867 0 : .notify(child_id, child_ps, stripe_size, &self.cancel)
3868 0 : .await
3869 : {
3870 0 : tracing::warn!("Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
3871 : child_id, child_ps);
3872 0 : failed_notifications.push(child_id);
3873 0 : }
3874 : }
3875 :
3876 : // If we failed any compute notifications, make a note to retry later.
3877 0 : if !failed_notifications.is_empty() {
3878 0 : let mut locked = self.inner.write().unwrap();
3879 0 : for failed in failed_notifications {
3880 0 : if let Some(shard) = locked.tenants.get_mut(&failed) {
3881 0 : shard.pending_compute_notification = true;
3882 0 : }
3883 : }
3884 0 : }
3885 :
3886 0 : Ok((response, waiters))
3887 0 : }
3888 :
3889 0 : pub(crate) async fn tenant_shard_migrate(
3890 0 : &self,
3891 0 : tenant_shard_id: TenantShardId,
3892 0 : migrate_req: TenantShardMigrateRequest,
3893 0 : ) -> Result<TenantShardMigrateResponse, ApiError> {
3894 0 : let waiter = {
3895 0 : let mut locked = self.inner.write().unwrap();
3896 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3897 :
3898 0 : let Some(node) = nodes.get(&migrate_req.node_id) else {
3899 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3900 0 : "Node {} not found",
3901 0 : migrate_req.node_id
3902 0 : )));
3903 : };
3904 :
3905 0 : if !node.is_available() {
3906 : // Warn but proceed: the caller may intend to manually adjust the placement of
3907 : // a shard even if the node is down, e.g. if intervening during an incident.
3908 0 : tracing::warn!("Migrating to unavailable node {node}");
3909 0 : }
3910 :
3911 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
3912 0 : return Err(ApiError::NotFound(
3913 0 : anyhow::anyhow!("Tenant shard not found").into(),
3914 0 : ));
3915 : };
3916 :
3917 0 : if shard.intent.get_attached() == &Some(migrate_req.node_id) {
3918 : // No-op case: we will still proceed to wait for reconciliation in case it is
3919 : // incomplete from an earlier update to the intent.
3920 0 : tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
3921 : } else {
3922 0 : let old_attached = *shard.intent.get_attached();
3923 0 :
3924 0 : match shard.policy {
3925 0 : PlacementPolicy::Attached(n) => {
3926 0 : // If our new attached node was a secondary, it no longer should be.
3927 0 : shard.intent.remove_secondary(scheduler, migrate_req.node_id);
3928 :
3929 : // If we were already attached to something, demote that to a secondary
3930 0 : if let Some(old_attached) = old_attached {
3931 0 : if n > 0 {
3932 : // Remove other secondaries to make room for the location we'll demote
3933 0 : while shard.intent.get_secondary().len() >= n {
3934 0 : shard.intent.pop_secondary(scheduler);
3935 0 : }
3936 :
3937 0 : shard.intent.push_secondary(scheduler, old_attached);
3938 0 : }
3939 0 : }
3940 :
3941 0 : shard.intent.set_attached(scheduler, Some(migrate_req.node_id));
3942 : }
3943 0 : PlacementPolicy::Secondary => {
3944 0 : shard.intent.clear(scheduler);
3945 0 : shard.intent.push_secondary(scheduler, migrate_req.node_id);
3946 0 : }
3947 : PlacementPolicy::Detached => {
3948 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3949 0 : "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
3950 0 : )))
3951 : }
3952 : }
3953 :
3954 0 : tracing::info!("Migrating: new intent {:?}", shard.intent);
3955 0 : shard.sequence = shard.sequence.next();
3956 : }
3957 :
3958 0 : self.maybe_reconcile_shard(shard, nodes)
3959 : };
3960 :
3961 0 : if let Some(waiter) = waiter {
3962 0 : waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
3963 : } else {
3964 0 : tracing::info!("Migration is a no-op");
3965 : }
3966 :
3967 0 : Ok(TenantShardMigrateResponse {})
3968 0 : }
3969 :
3970 : /// This is for debug/support only: we simply drop all state for a tenant, without
3971 : /// detaching or deleting it on pageservers.
3972 0 : pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
3973 0 : self.persistence.delete_tenant(tenant_id).await?;
3974 :
3975 0 : let mut locked = self.inner.write().unwrap();
3976 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
3977 0 : let mut shards = Vec::new();
3978 0 : for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
3979 0 : shards.push(*tenant_shard_id);
3980 0 : }
3981 :
3982 0 : for shard_id in shards {
3983 0 : if let Some(mut shard) = tenants.remove(&shard_id) {
3984 0 : shard.intent.clear(scheduler);
3985 0 : }
3986 : }
3987 :
3988 0 : Ok(())
3989 0 : }
3990 :
3991 : /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
3992 : /// tenant with a very high generation number so that it will see the existing data.
3993 0 : pub(crate) async fn tenant_import(
3994 0 : &self,
3995 0 : tenant_id: TenantId,
3996 0 : ) -> Result<TenantCreateResponse, ApiError> {
3997 0 : // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
3998 0 : let maybe_node = {
3999 0 : self.inner
4000 0 : .read()
4001 0 : .unwrap()
4002 0 : .nodes
4003 0 : .values()
4004 0 : .find(|n| n.is_available())
4005 0 : .cloned()
4006 : };
4007 0 : let Some(node) = maybe_node else {
4008 0 : return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
4009 : };
4010 :
4011 0 : let client = PageserverClient::new(
4012 0 : node.get_id(),
4013 0 : node.base_url(),
4014 0 : self.config.jwt_token.as_deref(),
4015 0 : );
4016 :
4017 0 : let scan_result = client
4018 0 : .tenant_scan_remote_storage(tenant_id)
4019 0 : .await
4020 0 : .map_err(|e| passthrough_api_error(&node, e))?;
4021 :
4022 : // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
4023 0 : let Some(shard_count) = scan_result
4024 0 : .shards
4025 0 : .iter()
4026 0 : .map(|s| s.tenant_shard_id.shard_count)
4027 0 : .max()
4028 : else {
4029 0 : return Err(ApiError::NotFound(
4030 0 : anyhow::anyhow!("No shards found").into(),
4031 0 : ));
4032 : };
4033 :
4034 : // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
4035 : // to
4036 0 : let generation = scan_result
4037 0 : .shards
4038 0 : .iter()
4039 0 : .map(|s| s.generation)
4040 0 : .max()
4041 0 : .expect("We already validated >0 shards");
4042 0 :
4043 0 : // FIXME: we have no way to recover the shard stripe size from contents of remote storage: this will
4044 0 : // only work if they were using the default stripe size.
4045 0 : let stripe_size = ShardParameters::DEFAULT_STRIPE_SIZE;
4046 :
4047 0 : let (response, waiters) = self
4048 0 : .do_tenant_create(TenantCreateRequest {
4049 0 : new_tenant_id: TenantShardId::unsharded(tenant_id),
4050 0 : generation,
4051 0 :
4052 0 : shard_parameters: ShardParameters {
4053 0 : count: shard_count,
4054 0 : stripe_size,
4055 0 : },
4056 0 : placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
4057 0 :
4058 0 : // There is no way to know what the tenant's config was: revert to defaults
4059 0 : config: TenantConfig::default(),
4060 0 : })
4061 0 : .await?;
4062 :
4063 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
4064 : // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
4065 : // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
4066 : // reconcile, as reconciliation includes notifying compute.
4067 0 : tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
4068 0 : }
4069 :
4070 0 : Ok(response)
4071 0 : }
4072 :
4073 : /// For debug/support: a full JSON dump of TenantShards. Returns a response so that
4074 : /// we don't have to make TenantShard clonable in the return path.
4075 0 : pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
4076 0 : let serialized = {
4077 0 : let locked = self.inner.read().unwrap();
4078 0 : let result = locked.tenants.values().collect::<Vec<_>>();
4079 0 : serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
4080 : };
4081 :
4082 0 : hyper::Response::builder()
4083 0 : .status(hyper::StatusCode::OK)
4084 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
4085 0 : .body(hyper::Body::from(serialized))
4086 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
4087 0 : }
4088 :
4089 : /// Check the consistency of in-memory state vs. persistent state, and check that the
4090 : /// scheduler's statistics are up to date.
4091 : ///
4092 : /// These consistency checks expect an **idle** system. If changes are going on while
4093 : /// we run, then we can falsely indicate a consistency issue. This is sufficient for end-of-test
4094 : /// checks, but not suitable for running continuously in the background in the field.
4095 0 : pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
4096 0 : let (mut expect_nodes, mut expect_shards) = {
4097 0 : let locked = self.inner.read().unwrap();
4098 0 :
4099 0 : locked
4100 0 : .scheduler
4101 0 : .consistency_check(locked.nodes.values(), locked.tenants.values())
4102 0 : .context("Scheduler checks")
4103 0 : .map_err(ApiError::InternalServerError)?;
4104 :
4105 0 : let expect_nodes = locked
4106 0 : .nodes
4107 0 : .values()
4108 0 : .map(|n| n.to_persistent())
4109 0 : .collect::<Vec<_>>();
4110 0 :
4111 0 : let expect_shards = locked
4112 0 : .tenants
4113 0 : .values()
4114 0 : .map(|t| t.to_persistent())
4115 0 : .collect::<Vec<_>>();
4116 :
4117 : // This method can only validate the state of an idle system: if a reconcile is in
4118 : // progress, fail out early to avoid giving false errors on state that won't match
4119 : // between database and memory under a ReconcileResult is processed.
4120 0 : for t in locked.tenants.values() {
4121 0 : if t.reconciler.is_some() {
4122 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4123 0 : "Shard {} reconciliation in progress",
4124 0 : t.tenant_shard_id
4125 0 : )));
4126 0 : }
4127 : }
4128 :
4129 0 : (expect_nodes, expect_shards)
4130 : };
4131 :
4132 0 : let mut nodes = self.persistence.list_nodes().await?;
4133 0 : expect_nodes.sort_by_key(|n| n.node_id);
4134 0 : nodes.sort_by_key(|n| n.node_id);
4135 0 :
4136 0 : if nodes != expect_nodes {
4137 0 : tracing::error!("Consistency check failed on nodes.");
4138 0 : tracing::error!(
4139 0 : "Nodes in memory: {}",
4140 0 : serde_json::to_string(&expect_nodes)
4141 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4142 : );
4143 0 : tracing::error!(
4144 0 : "Nodes in database: {}",
4145 0 : serde_json::to_string(&nodes)
4146 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4147 : );
4148 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4149 0 : "Node consistency failure"
4150 0 : )));
4151 0 : }
4152 :
4153 0 : let mut shards = self.persistence.list_tenant_shards().await?;
4154 0 : shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
4155 0 : expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
4156 0 :
4157 0 : if shards != expect_shards {
4158 0 : tracing::error!("Consistency check failed on shards.");
4159 0 : tracing::error!(
4160 0 : "Shards in memory: {}",
4161 0 : serde_json::to_string(&expect_shards)
4162 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4163 : );
4164 0 : tracing::error!(
4165 0 : "Shards in database: {}",
4166 0 : serde_json::to_string(&shards)
4167 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4168 : );
4169 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4170 0 : "Shard consistency failure"
4171 0 : )));
4172 0 : }
4173 0 :
4174 0 : Ok(())
4175 0 : }
4176 :
4177 : /// For debug/support: a JSON dump of the [`Scheduler`]. Returns a response so that
4178 : /// we don't have to make TenantShard clonable in the return path.
4179 0 : pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
4180 0 : let serialized = {
4181 0 : let locked = self.inner.read().unwrap();
4182 0 : serde_json::to_string(&locked.scheduler)
4183 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4184 : };
4185 :
4186 0 : hyper::Response::builder()
4187 0 : .status(hyper::StatusCode::OK)
4188 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
4189 0 : .body(hyper::Body::from(serialized))
4190 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
4191 0 : }
4192 :
4193 : /// This is for debug/support only: we simply drop all state for a tenant, without
4194 : /// detaching or deleting it on pageservers. We do not try and re-schedule any
4195 : /// tenants that were on this node.
4196 : ///
4197 : /// TODO: proper node deletion API that unhooks things more gracefully
4198 0 : pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
4199 0 : self.persistence.delete_node(node_id).await?;
4200 :
4201 0 : let mut locked = self.inner.write().unwrap();
4202 :
4203 0 : for shard in locked.tenants.values_mut() {
4204 0 : shard.deref_node(node_id);
4205 0 : }
4206 :
4207 0 : let mut nodes = (*locked.nodes).clone();
4208 0 : nodes.remove(&node_id);
4209 0 : locked.nodes = Arc::new(nodes);
4210 0 :
4211 0 : locked.scheduler.node_remove(node_id);
4212 0 :
4213 0 : Ok(())
4214 0 : }
4215 :
4216 0 : pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
4217 0 : let nodes = {
4218 0 : self.inner
4219 0 : .read()
4220 0 : .unwrap()
4221 0 : .nodes
4222 0 : .values()
4223 0 : .cloned()
4224 0 : .collect::<Vec<_>>()
4225 0 : };
4226 0 :
4227 0 : Ok(nodes)
4228 0 : }
4229 :
4230 0 : pub(crate) async fn get_node(&self, node_id: NodeId) -> Result<Node, ApiError> {
4231 0 : self.inner
4232 0 : .read()
4233 0 : .unwrap()
4234 0 : .nodes
4235 0 : .get(&node_id)
4236 0 : .cloned()
4237 0 : .ok_or(ApiError::NotFound(
4238 0 : format!("Node {node_id} not registered").into(),
4239 0 : ))
4240 0 : }
4241 :
4242 0 : pub(crate) async fn node_register(
4243 0 : &self,
4244 0 : register_req: NodeRegisterRequest,
4245 0 : ) -> Result<(), ApiError> {
4246 0 : let _node_lock = trace_exclusive_lock(
4247 0 : &self.node_op_locks,
4248 0 : register_req.node_id,
4249 0 : NodeOperations::Register,
4250 0 : )
4251 0 : .await;
4252 :
4253 : {
4254 0 : let locked = self.inner.read().unwrap();
4255 0 : if let Some(node) = locked.nodes.get(®ister_req.node_id) {
4256 : // Note that we do not do a total equality of the struct, because we don't require
4257 : // the availability/scheduling states to agree for a POST to be idempotent.
4258 0 : if node.registration_match(®ister_req) {
4259 0 : tracing::info!(
4260 0 : "Node {} re-registered with matching address",
4261 : register_req.node_id
4262 : );
4263 0 : return Ok(());
4264 : } else {
4265 : // TODO: decide if we want to allow modifying node addresses without removing and re-adding
4266 : // the node. Safest/simplest thing is to refuse it, and usually we deploy with
4267 : // a fixed address through the lifetime of a node.
4268 0 : tracing::warn!(
4269 0 : "Node {} tried to register with different address",
4270 : register_req.node_id
4271 : );
4272 0 : return Err(ApiError::Conflict(
4273 0 : "Node is already registered with different address".to_string(),
4274 0 : ));
4275 : }
4276 0 : }
4277 0 : }
4278 0 :
4279 0 : // We do not require that a node is actually online when registered (it will start life
4280 0 : // with it's availability set to Offline), but we _do_ require that its DNS record exists. We're
4281 0 : // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
4282 0 : // that register themselves with a broken DNS config. We check only the HTTP hostname, because
4283 0 : // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
4284 0 : if tokio::net::lookup_host(format!(
4285 0 : "{}:{}",
4286 0 : register_req.listen_http_addr, register_req.listen_http_port
4287 0 : ))
4288 0 : .await
4289 0 : .is_err()
4290 : {
4291 : // If we have a transient DNS issue, it's up to the caller to retry their registration. Because
4292 : // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
4293 : // we return a soft 503 error, to encourage callers to retry past transient issues.
4294 0 : return Err(ApiError::ResourceUnavailable(
4295 0 : format!(
4296 0 : "Node {} tried to register with unknown DNS name '{}'",
4297 0 : register_req.node_id, register_req.listen_http_addr
4298 0 : )
4299 0 : .into(),
4300 0 : ));
4301 0 : }
4302 0 :
4303 0 : // Ordering: we must persist the new node _before_ adding it to in-memory state.
4304 0 : // This ensures that before we use it for anything or expose it via any external
4305 0 : // API, it is guaranteed to be available after a restart.
4306 0 : let new_node = Node::new(
4307 0 : register_req.node_id,
4308 0 : register_req.listen_http_addr,
4309 0 : register_req.listen_http_port,
4310 0 : register_req.listen_pg_addr,
4311 0 : register_req.listen_pg_port,
4312 0 : );
4313 0 :
4314 0 : // TODO: idempotency if the node already exists in the database
4315 0 : self.persistence.insert_node(&new_node).await?;
4316 :
4317 0 : let mut locked = self.inner.write().unwrap();
4318 0 : let mut new_nodes = (*locked.nodes).clone();
4319 0 :
4320 0 : locked.scheduler.node_upsert(&new_node);
4321 0 : new_nodes.insert(register_req.node_id, new_node);
4322 0 :
4323 0 : locked.nodes = Arc::new(new_nodes);
4324 0 :
4325 0 : tracing::info!(
4326 0 : "Registered pageserver {}, now have {} pageservers",
4327 0 : register_req.node_id,
4328 0 : locked.nodes.len()
4329 : );
4330 0 : Ok(())
4331 0 : }
4332 :
4333 0 : pub(crate) async fn node_configure(
4334 0 : &self,
4335 0 : node_id: NodeId,
4336 0 : availability: Option<NodeAvailability>,
4337 0 : scheduling: Option<NodeSchedulingPolicy>,
4338 0 : ) -> Result<(), ApiError> {
4339 0 : let _node_lock =
4340 0 : trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
4341 :
4342 0 : if let Some(scheduling) = scheduling {
4343 : // Scheduling is a persistent part of Node: we must write updates to the database before
4344 : // applying them in memory
4345 0 : self.persistence.update_node(node_id, scheduling).await?;
4346 0 : }
4347 :
4348 : // If we're activating a node, then before setting it active we must reconcile any shard locations
4349 : // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
4350 : // by calling [`Self::node_activate_reconcile`]
4351 : //
4352 : // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
4353 : // nothing else can mutate its availability while we run.
4354 0 : let availability_transition = if let Some(input_availability) = availability {
4355 0 : let (activate_node, availability_transition) = {
4356 0 : let locked = self.inner.read().unwrap();
4357 0 : let Some(node) = locked.nodes.get(&node_id) else {
4358 0 : return Err(ApiError::NotFound(
4359 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
4360 0 : ));
4361 : };
4362 :
4363 0 : (
4364 0 : node.clone(),
4365 0 : node.get_availability_transition(input_availability),
4366 0 : )
4367 : };
4368 :
4369 0 : if matches!(availability_transition, AvailabilityTransition::ToActive) {
4370 0 : self.node_activate_reconcile(activate_node, &_node_lock)
4371 0 : .await?;
4372 0 : }
4373 0 : availability_transition
4374 : } else {
4375 0 : AvailabilityTransition::Unchanged
4376 : };
4377 :
4378 : // Apply changes from the request to our in-memory state for the Node
4379 0 : let mut locked = self.inner.write().unwrap();
4380 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4381 0 :
4382 0 : let mut new_nodes = (**nodes).clone();
4383 :
4384 0 : let Some(node) = new_nodes.get_mut(&node_id) else {
4385 0 : return Err(ApiError::NotFound(
4386 0 : anyhow::anyhow!("Node not registered").into(),
4387 0 : ));
4388 : };
4389 :
4390 0 : if let Some(availability) = &availability {
4391 0 : node.set_availability(*availability);
4392 0 : }
4393 :
4394 0 : if let Some(scheduling) = scheduling {
4395 0 : node.set_scheduling(scheduling);
4396 0 : }
4397 :
4398 : // Update the scheduler, in case the elegibility of the node for new shards has changed
4399 0 : scheduler.node_upsert(node);
4400 0 :
4401 0 : let new_nodes = Arc::new(new_nodes);
4402 0 :
4403 0 : // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
4404 0 : match availability_transition {
4405 : AvailabilityTransition::ToOffline => {
4406 0 : tracing::info!("Node {} transition to offline", node_id);
4407 0 : let mut tenants_affected: usize = 0;
4408 :
4409 0 : for (tenant_shard_id, tenant_shard) in tenants {
4410 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
4411 0 : // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
4412 0 : // not assume our knowledge of the node's configuration is accurate until it comes back online
4413 0 : observed_loc.conf = None;
4414 0 : }
4415 :
4416 0 : if new_nodes.len() == 1 {
4417 : // Special case for single-node cluster: there is no point trying to reschedule
4418 : // any tenant shards: avoid doing so, in order to avoid spewing warnings about
4419 : // failures to schedule them.
4420 0 : continue;
4421 0 : }
4422 0 :
4423 0 : if !new_nodes
4424 0 : .values()
4425 0 : .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_)))
4426 : {
4427 : // Special case for when all nodes are unavailable and/or unschedulable: there is no point
4428 : // trying to reschedule since there's nowhere else to go. Without this
4429 : // branch we incorrectly detach tenants in response to node unavailability.
4430 0 : continue;
4431 0 : }
4432 0 :
4433 0 : if tenant_shard.intent.demote_attached(scheduler, node_id) {
4434 0 : tenant_shard.sequence = tenant_shard.sequence.next();
4435 0 :
4436 0 : // TODO: populate a ScheduleContext including all shards in the same tenant_id (only matters
4437 0 : // for tenants without secondary locations: if they have a secondary location, then this
4438 0 : // schedule() call is just promoting an existing secondary)
4439 0 : let mut schedule_context = ScheduleContext::default();
4440 0 :
4441 0 : match tenant_shard.schedule(scheduler, &mut schedule_context) {
4442 0 : Err(e) => {
4443 0 : // It is possible that some tenants will become unschedulable when too many pageservers
4444 0 : // go offline: in this case there isn't much we can do other than make the issue observable.
4445 0 : // TODO: give TenantShard a scheduling error attribute to be queried later.
4446 0 : tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
4447 : }
4448 : Ok(()) => {
4449 0 : if self
4450 0 : .maybe_reconcile_shard(tenant_shard, &new_nodes)
4451 0 : .is_some()
4452 0 : {
4453 0 : tenants_affected += 1;
4454 0 : };
4455 : }
4456 : }
4457 0 : }
4458 : }
4459 0 : tracing::info!(
4460 0 : "Launched {} reconciler tasks for tenants affected by node {} going offline",
4461 : tenants_affected,
4462 : node_id
4463 : )
4464 : }
4465 : AvailabilityTransition::ToActive => {
4466 0 : tracing::info!("Node {} transition to active", node_id);
4467 : // When a node comes back online, we must reconcile any tenant that has a None observed
4468 : // location on the node.
4469 0 : for tenant_shard in locked.tenants.values_mut() {
4470 : // If a reconciliation is already in progress, rely on the previous scheduling
4471 : // decision and skip triggering a new reconciliation.
4472 0 : if tenant_shard.reconciler.is_some() {
4473 0 : continue;
4474 0 : }
4475 :
4476 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
4477 0 : if observed_loc.conf.is_none() {
4478 0 : self.maybe_reconcile_shard(tenant_shard, &new_nodes);
4479 0 : }
4480 0 : }
4481 : }
4482 :
4483 : // TODO: in the background, we should balance work back onto this pageserver
4484 : }
4485 : AvailabilityTransition::Unchanged => {
4486 0 : tracing::debug!("Node {} no availability change during config", node_id);
4487 : }
4488 : }
4489 :
4490 0 : locked.nodes = new_nodes;
4491 0 :
4492 0 : Ok(())
4493 0 : }
4494 :
4495 0 : pub(crate) async fn start_node_drain(
4496 0 : self: &Arc<Self>,
4497 0 : node_id: NodeId,
4498 0 : ) -> Result<(), ApiError> {
4499 0 : let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = {
4500 0 : let locked = self.inner.read().unwrap();
4501 0 : let nodes = &locked.nodes;
4502 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
4503 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
4504 0 : ))?;
4505 0 : let schedulable_nodes_count = nodes
4506 0 : .iter()
4507 0 : .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
4508 0 : .count();
4509 0 :
4510 0 : (
4511 0 : locked
4512 0 : .ongoing_operation
4513 0 : .as_ref()
4514 0 : .map(|ongoing| ongoing.operation),
4515 0 : node.is_available(),
4516 0 : node.get_scheduling(),
4517 0 : schedulable_nodes_count,
4518 0 : )
4519 : };
4520 :
4521 0 : if let Some(ongoing) = ongoing_op {
4522 0 : return Err(ApiError::PreconditionFailed(
4523 0 : format!("Background operation already ongoing for node: {}", ongoing).into(),
4524 0 : ));
4525 0 : }
4526 0 :
4527 0 : if !node_available {
4528 0 : return Err(ApiError::ResourceUnavailable(
4529 0 : format!("Node {node_id} is currently unavailable").into(),
4530 0 : ));
4531 0 : }
4532 0 :
4533 0 : if schedulable_nodes_count == 0 {
4534 0 : return Err(ApiError::PreconditionFailed(
4535 0 : "No other schedulable nodes to drain to".into(),
4536 0 : ));
4537 0 : }
4538 0 :
4539 0 : match node_policy {
4540 : NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => {
4541 0 : self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining))
4542 0 : .await?;
4543 :
4544 0 : let cancel = self.cancel.child_token();
4545 0 : let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
4546 :
4547 0 : self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
4548 0 : operation: Operation::Drain(Drain { node_id }),
4549 0 : cancel: cancel.clone(),
4550 0 : });
4551 0 :
4552 0 : tokio::task::spawn({
4553 0 : let service = self.clone();
4554 0 : let cancel = cancel.clone();
4555 0 : async move {
4556 0 : let _gate_guard = gate_guard;
4557 :
4558 : scopeguard::defer! {
4559 : let prev = service.inner.write().unwrap().ongoing_operation.take();
4560 :
4561 0 : if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) {
4562 : assert_eq!(removed_drain.node_id, node_id, "We always take the same operation");
4563 : } else {
4564 : panic!("We always remove the same operation")
4565 : }
4566 : }
4567 :
4568 0 : tracing::info!(%node_id, "Drain background operation starting");
4569 0 : let res = service.drain_node(node_id, cancel).await;
4570 0 : match res {
4571 : Ok(()) => {
4572 0 : tracing::info!(%node_id, "Drain background operation completed successfully");
4573 : }
4574 : Err(OperationError::Cancelled) => {
4575 0 : tracing::info!(%node_id, "Drain background operation was cancelled");
4576 : }
4577 0 : Err(err) => {
4578 0 : tracing::error!(%node_id, "Drain background operation encountered: {err}")
4579 : }
4580 : }
4581 0 : }
4582 0 : });
4583 : }
4584 : NodeSchedulingPolicy::Draining => {
4585 0 : return Err(ApiError::Conflict(format!(
4586 0 : "Node {node_id} has drain in progress"
4587 0 : )));
4588 : }
4589 0 : policy => {
4590 0 : return Err(ApiError::PreconditionFailed(
4591 0 : format!("Node {node_id} cannot be drained due to {policy:?} policy").into(),
4592 0 : ));
4593 : }
4594 : }
4595 :
4596 0 : Ok(())
4597 0 : }
4598 :
4599 0 : pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> {
4600 0 : let (node_available, node_policy) = {
4601 0 : let locked = self.inner.read().unwrap();
4602 0 : let nodes = &locked.nodes;
4603 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
4604 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
4605 0 : ))?;
4606 :
4607 0 : (node.is_available(), node.get_scheduling())
4608 0 : };
4609 0 :
4610 0 : if !node_available {
4611 0 : return Err(ApiError::ResourceUnavailable(
4612 0 : format!("Node {node_id} is currently unavailable").into(),
4613 0 : ));
4614 0 : }
4615 :
4616 0 : if !matches!(node_policy, NodeSchedulingPolicy::Draining) {
4617 0 : return Err(ApiError::PreconditionFailed(
4618 0 : format!("Node {node_id} has no drain in progress").into(),
4619 0 : ));
4620 0 : }
4621 :
4622 0 : if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
4623 0 : if let Operation::Drain(drain) = op_handler.operation {
4624 0 : if drain.node_id == node_id {
4625 0 : tracing::info!("Cancelling background drain operation for node {node_id}");
4626 0 : op_handler.cancel.cancel();
4627 0 : return Ok(());
4628 0 : }
4629 0 : }
4630 0 : }
4631 :
4632 0 : Err(ApiError::PreconditionFailed(
4633 0 : format!("Node {node_id} has no drain in progress").into(),
4634 0 : ))
4635 0 : }
4636 :
4637 0 : pub(crate) async fn start_node_fill(self: &Arc<Self>, node_id: NodeId) -> Result<(), ApiError> {
4638 0 : let (ongoing_op, node_available, node_policy, total_nodes_count) = {
4639 0 : let locked = self.inner.read().unwrap();
4640 0 : let nodes = &locked.nodes;
4641 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
4642 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
4643 0 : ))?;
4644 :
4645 0 : (
4646 0 : locked
4647 0 : .ongoing_operation
4648 0 : .as_ref()
4649 0 : .map(|ongoing| ongoing.operation),
4650 0 : node.is_available(),
4651 0 : node.get_scheduling(),
4652 0 : nodes.len(),
4653 0 : )
4654 : };
4655 :
4656 0 : if let Some(ongoing) = ongoing_op {
4657 0 : return Err(ApiError::PreconditionFailed(
4658 0 : format!("Background operation already ongoing for node: {}", ongoing).into(),
4659 0 : ));
4660 0 : }
4661 0 :
4662 0 : if !node_available {
4663 0 : return Err(ApiError::ResourceUnavailable(
4664 0 : format!("Node {node_id} is currently unavailable").into(),
4665 0 : ));
4666 0 : }
4667 0 :
4668 0 : if total_nodes_count <= 1 {
4669 0 : return Err(ApiError::PreconditionFailed(
4670 0 : "No other nodes to fill from".into(),
4671 0 : ));
4672 0 : }
4673 0 :
4674 0 : match node_policy {
4675 : NodeSchedulingPolicy::Active => {
4676 0 : self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling))
4677 0 : .await?;
4678 :
4679 0 : let cancel = self.cancel.child_token();
4680 0 : let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
4681 :
4682 0 : self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
4683 0 : operation: Operation::Fill(Fill { node_id }),
4684 0 : cancel: cancel.clone(),
4685 0 : });
4686 0 :
4687 0 : tokio::task::spawn({
4688 0 : let service = self.clone();
4689 0 : let cancel = cancel.clone();
4690 0 : async move {
4691 0 : let _gate_guard = gate_guard;
4692 :
4693 : scopeguard::defer! {
4694 : let prev = service.inner.write().unwrap().ongoing_operation.take();
4695 :
4696 0 : if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) {
4697 : assert_eq!(removed_fill.node_id, node_id, "We always take the same operation");
4698 : } else {
4699 : panic!("We always remove the same operation")
4700 : }
4701 : }
4702 :
4703 0 : tracing::info!(%node_id, "Fill background operation starting");
4704 0 : let res = service.fill_node(node_id, cancel).await;
4705 0 : match res {
4706 : Ok(()) => {
4707 0 : tracing::info!(%node_id, "Fill background operation completed successfully");
4708 : }
4709 : Err(OperationError::Cancelled) => {
4710 0 : tracing::info!(%node_id, "Fill background operation was cancelled");
4711 : }
4712 0 : Err(err) => {
4713 0 : tracing::error!(%node_id, "Fill background operation encountered: {err}")
4714 : }
4715 : }
4716 0 : }
4717 0 : });
4718 : }
4719 : NodeSchedulingPolicy::Filling => {
4720 0 : return Err(ApiError::Conflict(format!(
4721 0 : "Node {node_id} has fill in progress"
4722 0 : )));
4723 : }
4724 0 : policy => {
4725 0 : return Err(ApiError::PreconditionFailed(
4726 0 : format!("Node {node_id} cannot be filled due to {policy:?} policy").into(),
4727 0 : ));
4728 : }
4729 : }
4730 :
4731 0 : Ok(())
4732 0 : }
4733 :
4734 0 : pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> {
4735 0 : let (node_available, node_policy) = {
4736 0 : let locked = self.inner.read().unwrap();
4737 0 : let nodes = &locked.nodes;
4738 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
4739 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
4740 0 : ))?;
4741 :
4742 0 : (node.is_available(), node.get_scheduling())
4743 0 : };
4744 0 :
4745 0 : if !node_available {
4746 0 : return Err(ApiError::ResourceUnavailable(
4747 0 : format!("Node {node_id} is currently unavailable").into(),
4748 0 : ));
4749 0 : }
4750 :
4751 0 : if !matches!(node_policy, NodeSchedulingPolicy::Filling) {
4752 0 : return Err(ApiError::PreconditionFailed(
4753 0 : format!("Node {node_id} has no fill in progress").into(),
4754 0 : ));
4755 0 : }
4756 :
4757 0 : if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
4758 0 : if let Operation::Fill(fill) = op_handler.operation {
4759 0 : if fill.node_id == node_id {
4760 0 : tracing::info!("Cancelling background drain operation for node {node_id}");
4761 0 : op_handler.cancel.cancel();
4762 0 : return Ok(());
4763 0 : }
4764 0 : }
4765 0 : }
4766 :
4767 0 : Err(ApiError::PreconditionFailed(
4768 0 : format!("Node {node_id} has no fill in progress").into(),
4769 0 : ))
4770 0 : }
4771 :
4772 : /// Helper for methods that will try and call pageserver APIs for
4773 : /// a tenant, such as timeline CRUD: they cannot proceed unless the tenant
4774 : /// is attached somewhere.
4775 0 : fn ensure_attached_schedule(
4776 0 : &self,
4777 0 : mut locked: std::sync::RwLockWriteGuard<'_, ServiceState>,
4778 0 : tenant_id: TenantId,
4779 0 : ) -> Result<Vec<ReconcilerWaiter>, anyhow::Error> {
4780 0 : let mut waiters = Vec::new();
4781 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4782 0 :
4783 0 : let mut schedule_context = ScheduleContext::default();
4784 0 : for (tenant_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
4785 0 : shard.schedule(scheduler, &mut schedule_context)?;
4786 :
4787 : // The shard's policies may not result in an attached location being scheduled: this
4788 : // is an error because our caller needs it attached somewhere.
4789 0 : if shard.intent.get_attached().is_none() {
4790 0 : return Err(anyhow::anyhow!(
4791 0 : "Tenant {tenant_id} not scheduled to be attached"
4792 0 : ));
4793 0 : };
4794 0 :
4795 0 : if shard.stably_attached().is_some() {
4796 : // We do not require the shard to be totally up to date on reconciliation: we just require
4797 : // that it has been attached on the intended node. Other dirty state such as unattached secondary
4798 : // locations, or compute hook notifications can be ignored.
4799 0 : continue;
4800 0 : }
4801 :
4802 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
4803 0 : tracing::info!("Waiting for shard {tenant_shard_id} to reconcile, in order to ensure it is attached");
4804 0 : waiters.push(waiter);
4805 0 : }
4806 : }
4807 0 : Ok(waiters)
4808 0 : }
4809 :
4810 0 : async fn ensure_attached_wait(&self, tenant_id: TenantId) -> Result<(), ApiError> {
4811 0 : let ensure_waiters = {
4812 0 : let locked = self.inner.write().unwrap();
4813 :
4814 : // Check if the tenant is splitting: in this case, even if it is attached,
4815 : // we must act as if it is not: this blocks e.g. timeline creation/deletion
4816 : // operations during the split.
4817 0 : for (_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
4818 0 : if !matches!(shard.splitting, SplitState::Idle) {
4819 0 : return Err(ApiError::ResourceUnavailable(
4820 0 : "Tenant shards are currently splitting".into(),
4821 0 : ));
4822 0 : }
4823 : }
4824 :
4825 0 : self.ensure_attached_schedule(locked, tenant_id)
4826 0 : .map_err(ApiError::InternalServerError)?
4827 : };
4828 :
4829 0 : let deadline = Instant::now().checked_add(Duration::from_secs(5)).unwrap();
4830 0 : for waiter in ensure_waiters {
4831 0 : let timeout = deadline.duration_since(Instant::now());
4832 0 : waiter.wait_timeout(timeout).await?;
4833 : }
4834 :
4835 0 : Ok(())
4836 0 : }
4837 :
4838 : /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
4839 0 : fn maybe_reconcile_shard(
4840 0 : &self,
4841 0 : shard: &mut TenantShard,
4842 0 : nodes: &Arc<HashMap<NodeId, Node>>,
4843 0 : ) -> Option<ReconcilerWaiter> {
4844 0 : let reconcile_needed = shard.get_reconcile_needed(nodes);
4845 0 :
4846 0 : match reconcile_needed {
4847 0 : ReconcileNeeded::No => return None,
4848 0 : ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
4849 0 : ReconcileNeeded::Yes => {
4850 0 : // Fall through to try and acquire units for spawning reconciler
4851 0 : }
4852 : };
4853 :
4854 0 : let units = match self.reconciler_concurrency.clone().try_acquire_owned() {
4855 0 : Ok(u) => ReconcileUnits::new(u),
4856 : Err(_) => {
4857 0 : tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
4858 0 : "Concurrency limited: enqueued for reconcile later");
4859 0 : if !shard.delayed_reconcile {
4860 0 : match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
4861 0 : Err(TrySendError::Closed(_)) => {
4862 0 : // Weird mid-shutdown case?
4863 0 : }
4864 : Err(TrySendError::Full(_)) => {
4865 : // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
4866 0 : tracing::warn!(
4867 0 : "Many shards are waiting to reconcile: delayed_reconcile queue is full"
4868 : );
4869 : }
4870 0 : Ok(()) => {
4871 0 : shard.delayed_reconcile = true;
4872 0 : }
4873 : }
4874 0 : }
4875 :
4876 : // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
4877 : // number to advance. When this function is eventually called again and succeeds in getting units,
4878 : // it will spawn a reconciler that makes this waiter complete.
4879 0 : return Some(shard.future_reconcile_waiter());
4880 : }
4881 : };
4882 :
4883 0 : let Ok(gate_guard) = self.gate.enter() else {
4884 : // Gate closed: we're shutting down, drop out.
4885 0 : return None;
4886 : };
4887 :
4888 0 : shard.spawn_reconciler(
4889 0 : &self.result_tx,
4890 0 : nodes,
4891 0 : &self.compute_hook,
4892 0 : &self.config,
4893 0 : &self.persistence,
4894 0 : units,
4895 0 : gate_guard,
4896 0 : &self.cancel,
4897 0 : )
4898 0 : }
4899 :
4900 : /// Check all tenants for pending reconciliation work, and reconcile those in need.
4901 : /// Additionally, reschedule tenants that require it.
4902 : ///
4903 : /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
4904 : /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
4905 : /// available. A return value of 0 indicates that everything is fully reconciled already.
4906 0 : fn reconcile_all(&self) -> usize {
4907 0 : let mut locked = self.inner.write().unwrap();
4908 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
4909 0 : let pageservers = nodes.clone();
4910 0 :
4911 0 : let mut schedule_context = ScheduleContext::default();
4912 0 :
4913 0 : let mut reconciles_spawned = 0;
4914 0 : for (tenant_shard_id, shard) in tenants.iter_mut() {
4915 0 : if tenant_shard_id.is_shard_zero() {
4916 0 : schedule_context = ScheduleContext::default();
4917 0 : }
4918 :
4919 : // Skip checking if this shard is already enqueued for reconciliation
4920 0 : if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
4921 : // If there is something delayed, then return a nonzero count so that
4922 : // callers like reconcile_all_now do not incorrectly get the impression
4923 : // that the system is in a quiescent state.
4924 0 : reconciles_spawned = std::cmp::max(1, reconciles_spawned);
4925 0 : continue;
4926 0 : }
4927 0 :
4928 0 : // Eventual consistency: if an earlier reconcile job failed, and the shard is still
4929 0 : // dirty, spawn another rone
4930 0 : if self.maybe_reconcile_shard(shard, &pageservers).is_some() {
4931 0 : reconciles_spawned += 1;
4932 0 : }
4933 :
4934 0 : schedule_context.avoid(&shard.intent.all_pageservers());
4935 : }
4936 :
4937 0 : reconciles_spawned
4938 0 : }
4939 :
4940 : /// `optimize` in this context means identifying shards which have valid scheduled locations, but
4941 : /// could be scheduled somewhere better:
4942 : /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
4943 : /// * e.g. after a node fails then recovers, to move some work back to it
4944 : /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
4945 : /// * e.g. after a shard split, the initial attached locations will all be on the node where
4946 : /// we did the split, but are probably better placed elsewhere.
4947 : /// - Creating new secondary locations if it improves the spreading of a sharded tenant
4948 : /// * e.g. after a shard split, some locations will be on the same node (where the split
4949 : /// happened), and will probably be better placed elsewhere.
4950 : ///
4951 : /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
4952 : /// the time of scheduling, this function looks for cases where a better-scoring location is available
4953 : /// according to those same soft constraints.
4954 0 : async fn optimize_all(&self) -> usize {
4955 0 : // Limit on how many shards' optmizations each call to this function will execute. Combined
4956 0 : // with the frequency of background calls, this acts as an implicit rate limit that runs a small
4957 0 : // trickle of optimizations in the background, rather than executing a large number in parallel
4958 0 : // when a change occurs.
4959 0 : const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 2;
4960 0 :
4961 0 : // Synchronous prepare: scan shards for possible scheduling optimizations
4962 0 : let candidate_work = self.optimize_all_plan();
4963 0 : let candidate_work_len = candidate_work.len();
4964 :
4965 : // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
4966 0 : let validated_work = self.optimize_all_validate(candidate_work).await;
4967 :
4968 0 : let was_work_filtered = validated_work.len() != candidate_work_len;
4969 0 :
4970 0 : // Synchronous apply: update the shards' intent states according to validated optimisations
4971 0 : let mut reconciles_spawned = 0;
4972 0 : let mut optimizations_applied = 0;
4973 0 : let mut locked = self.inner.write().unwrap();
4974 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4975 0 : for (tenant_shard_id, optimization) in validated_work {
4976 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
4977 : // Shard was dropped between planning and execution;
4978 0 : continue;
4979 : };
4980 0 : if shard.apply_optimization(scheduler, optimization) {
4981 0 : optimizations_applied += 1;
4982 0 : if self.maybe_reconcile_shard(shard, nodes).is_some() {
4983 0 : reconciles_spawned += 1;
4984 0 : }
4985 0 : }
4986 :
4987 0 : if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
4988 0 : break;
4989 0 : }
4990 : }
4991 :
4992 0 : if was_work_filtered {
4993 0 : // If we filtered any work out during validation, ensure we return a nonzero value to indicate
4994 0 : // to callers that the system is not in a truly quiet state, it's going to do some work as soon
4995 0 : // as these validations start passing.
4996 0 : reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
4997 0 : }
4998 :
4999 0 : reconciles_spawned
5000 0 : }
5001 :
5002 0 : fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
5003 0 : let mut schedule_context = ScheduleContext::default();
5004 0 :
5005 0 : let mut tenant_shards: Vec<&TenantShard> = Vec::new();
5006 0 :
5007 0 : // How many candidate optimizations we will generate, before evaluating them for readniess: setting
5008 0 : // this higher than the execution limit gives us a chance to execute some work even if the first
5009 0 : // few optimizations we find are not ready.
5010 0 : const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 8;
5011 0 :
5012 0 : let mut work = Vec::new();
5013 0 :
5014 0 : let mut locked = self.inner.write().unwrap();
5015 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5016 0 : for (tenant_shard_id, shard) in tenants.iter() {
5017 0 : if tenant_shard_id.is_shard_zero() {
5018 0 : // Reset accumulators on the first shard in a tenant
5019 0 : schedule_context = ScheduleContext::default();
5020 0 : schedule_context.mode = ScheduleMode::Speculative;
5021 0 : tenant_shards.clear();
5022 0 : }
5023 :
5024 0 : if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
5025 0 : break;
5026 0 : }
5027 0 :
5028 0 : match shard.get_scheduling_policy() {
5029 0 : ShardSchedulingPolicy::Active => {
5030 0 : // Ok to do optimization
5031 0 : }
5032 : ShardSchedulingPolicy::Essential
5033 : | ShardSchedulingPolicy::Pause
5034 : | ShardSchedulingPolicy::Stop => {
5035 : // Policy prevents optimizing this shard.
5036 0 : continue;
5037 : }
5038 : }
5039 :
5040 : // Accumulate the schedule context for all the shards in a tenant: we must have
5041 : // the total view of all shards before we can try to optimize any of them.
5042 0 : schedule_context.avoid(&shard.intent.all_pageservers());
5043 0 : if let Some(attached) = shard.intent.get_attached() {
5044 0 : schedule_context.push_attached(*attached);
5045 0 : }
5046 0 : tenant_shards.push(shard);
5047 0 :
5048 0 : // Once we have seen the last shard in the tenant, proceed to search across all shards
5049 0 : // in the tenant for optimizations
5050 0 : if shard.shard.number.0 == shard.shard.count.count() - 1 {
5051 0 : if tenant_shards.iter().any(|s| s.reconciler.is_some()) {
5052 : // Do not start any optimizations while another change to the tenant is ongoing: this
5053 : // is not necessary for correctness, but simplifies operations and implicitly throttles
5054 : // optimization changes to happen in a "trickle" over time.
5055 0 : continue;
5056 0 : }
5057 0 :
5058 0 : if tenant_shards.iter().any(|s| {
5059 0 : !matches!(s.splitting, SplitState::Idle)
5060 0 : || matches!(s.policy, PlacementPolicy::Detached)
5061 0 : }) {
5062 : // Never attempt to optimize a tenant that is currently being split, or
5063 : // a tenant that is meant to be detached
5064 0 : continue;
5065 0 : }
5066 :
5067 : // TODO: optimization calculations are relatively expensive: create some fast-path for
5068 : // the common idle case (avoiding the search on tenants that we have recently checked)
5069 :
5070 0 : for shard in &tenant_shards {
5071 0 : if let Some(optimization) =
5072 : // If idle, maybe ptimize attachments: if a shard has a secondary location that is preferable to
5073 : // its primary location based on soft constraints, cut it over.
5074 0 : shard.optimize_attachment(nodes, &schedule_context)
5075 : {
5076 0 : work.push((shard.tenant_shard_id, optimization));
5077 0 : break;
5078 0 : } else if let Some(optimization) =
5079 : // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
5080 : // better placed on another node, based on ScheduleContext, then adjust it. This
5081 : // covers cases like after a shard split, where we might have too many shards
5082 : // in the same tenant with secondary locations on the node where they originally split.
5083 0 : shard.optimize_secondary(scheduler, &schedule_context)
5084 : {
5085 0 : work.push((shard.tenant_shard_id, optimization));
5086 0 : break;
5087 0 : }
5088 :
5089 : // TODO: extend this mechanism to prefer attaching on nodes with fewer attached
5090 : // tenants (i.e. extend schedule state to distinguish attached from secondary counts),
5091 : // for the total number of attachments on a node (not just within a tenant.)
5092 : }
5093 0 : }
5094 : }
5095 :
5096 0 : work
5097 0 : }
5098 :
5099 0 : async fn optimize_all_validate(
5100 0 : &self,
5101 0 : candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
5102 0 : ) -> Vec<(TenantShardId, ScheduleOptimization)> {
5103 0 : // Take a clone of the node map to use outside the lock in async validation phase
5104 0 : let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
5105 0 :
5106 0 : let mut want_secondary_status = Vec::new();
5107 0 :
5108 0 : // Validate our plans: this is an async phase where we may do I/O to pageservers to
5109 0 : // check that the state of locations is acceptable to run the optimization, such as
5110 0 : // checking that a secondary location is sufficiently warmed-up to cleanly cut over
5111 0 : // in a live migration.
5112 0 : let mut validated_work = Vec::new();
5113 0 : for (tenant_shard_id, optimization) in candidate_work {
5114 0 : match optimization.action {
5115 : ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
5116 : old_attached_node_id: _,
5117 0 : new_attached_node_id,
5118 0 : }) => {
5119 0 : match validation_nodes.get(&new_attached_node_id) {
5120 0 : None => {
5121 0 : // Node was dropped between planning and validation
5122 0 : }
5123 0 : Some(node) => {
5124 0 : if !node.is_available() {
5125 0 : tracing::info!("Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable");
5126 0 : } else {
5127 0 : // Accumulate optimizations that require fetching secondary status, so that we can execute these
5128 0 : // remote API requests concurrently.
5129 0 : want_secondary_status.push((
5130 0 : tenant_shard_id,
5131 0 : node.clone(),
5132 0 : optimization,
5133 0 : ));
5134 0 : }
5135 : }
5136 : }
5137 : }
5138 : ScheduleOptimizationAction::ReplaceSecondary(_) => {
5139 : // No extra checks needed to replace a secondary: this does not interrupt client access
5140 0 : validated_work.push((tenant_shard_id, optimization))
5141 : }
5142 : };
5143 : }
5144 :
5145 : // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
5146 : // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
5147 : // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
5148 0 : let results = self
5149 0 : .tenant_for_shards_api(
5150 0 : want_secondary_status
5151 0 : .iter()
5152 0 : .map(|i| (i.0, i.1.clone()))
5153 0 : .collect(),
5154 0 : |tenant_shard_id, client| async move {
5155 0 : client.tenant_secondary_status(tenant_shard_id).await
5156 0 : },
5157 0 : 1,
5158 0 : 1,
5159 0 : SHORT_RECONCILE_TIMEOUT,
5160 0 : &self.cancel,
5161 0 : )
5162 0 : .await;
5163 :
5164 0 : for ((tenant_shard_id, node, optimization), secondary_status) in
5165 0 : want_secondary_status.into_iter().zip(results.into_iter())
5166 : {
5167 0 : match secondary_status {
5168 0 : Err(e) => {
5169 0 : tracing::info!("Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}");
5170 : }
5171 0 : Ok(progress) => {
5172 0 : // We require secondary locations to have less than 10GiB of downloads pending before we will use
5173 0 : // them in an optimization
5174 0 : const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
5175 0 :
5176 0 : if progress.heatmap_mtime.is_none()
5177 0 : || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
5178 0 : && progress.bytes_downloaded != progress.bytes_total
5179 0 : || progress.bytes_total - progress.bytes_downloaded
5180 0 : > DOWNLOAD_FRESHNESS_THRESHOLD
5181 : {
5182 0 : tracing::info!("Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}");
5183 : } else {
5184 : // Location looks ready: proceed
5185 0 : tracing::info!(
5186 0 : "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
5187 : );
5188 0 : validated_work.push((tenant_shard_id, optimization))
5189 : }
5190 : }
5191 : }
5192 : }
5193 :
5194 0 : validated_work
5195 0 : }
5196 :
5197 : /// Look for shards which are oversized and in need of splitting
5198 0 : async fn autosplit_tenants(self: &Arc<Self>) {
5199 0 : let Some(split_threshold) = self.config.split_threshold else {
5200 : // Auto-splitting is disabled
5201 0 : return;
5202 : };
5203 :
5204 0 : let nodes = self.inner.read().unwrap().nodes.clone();
5205 0 :
5206 0 : const SPLIT_TO_MAX: ShardCount = ShardCount::new(8);
5207 0 :
5208 0 : let mut top_n = Vec::new();
5209 0 :
5210 0 : // Call into each node to look for big tenants
5211 0 : let top_n_request = TopTenantShardsRequest {
5212 0 : // We currently split based on logical size, for simplicity: logical size is a signal of
5213 0 : // the user's intent to run a large database, whereas physical/resident size can be symptoms
5214 0 : // of compaction issues. Eventually we should switch to using resident size to bound the
5215 0 : // disk space impact of one shard.
5216 0 : order_by: models::TenantSorting::MaxLogicalSize,
5217 0 : limit: 10,
5218 0 : where_shards_lt: Some(SPLIT_TO_MAX),
5219 0 : where_gt: Some(split_threshold),
5220 0 : };
5221 0 : for node in nodes.values() {
5222 0 : let request_ref = &top_n_request;
5223 0 : match node
5224 0 : .with_client_retries(
5225 0 : |client| async move {
5226 0 : let request = request_ref.clone();
5227 0 : client.top_tenant_shards(request.clone()).await
5228 0 : },
5229 0 : &self.config.jwt_token,
5230 0 : 3,
5231 0 : 3,
5232 0 : Duration::from_secs(5),
5233 0 : &self.cancel,
5234 0 : )
5235 0 : .await
5236 : {
5237 0 : Some(Ok(node_top_n)) => {
5238 0 : top_n.extend(node_top_n.shards.into_iter());
5239 0 : }
5240 : Some(Err(mgmt_api::Error::Cancelled)) => {
5241 0 : continue;
5242 : }
5243 0 : Some(Err(e)) => {
5244 0 : tracing::warn!("Failed to fetch top N tenants from {node}: {e}");
5245 0 : continue;
5246 : }
5247 : None => {
5248 : // Node is shutting down
5249 0 : continue;
5250 : }
5251 : };
5252 : }
5253 :
5254 : // Pick the biggest tenant to split first
5255 0 : top_n.sort_by_key(|i| i.resident_size);
5256 0 : let Some(split_candidate) = top_n.into_iter().next() else {
5257 0 : tracing::debug!("No split-elegible shards found");
5258 0 : return;
5259 : };
5260 :
5261 : // We spawn a task to run this, so it's exactly like some external API client requesting it. We don't
5262 : // want to block the background reconcile loop on this.
5263 0 : tracing::info!("Auto-splitting tenant for size threshold {split_threshold}: current size {split_candidate:?}");
5264 :
5265 0 : let this = self.clone();
5266 0 : tokio::spawn(
5267 0 : async move {
5268 0 : match this
5269 0 : .tenant_shard_split(
5270 0 : split_candidate.id.tenant_id,
5271 0 : TenantShardSplitRequest {
5272 0 : // Always split to the max number of shards: this avoids stepping through
5273 0 : // intervening shard counts and encountering the overrhead of a split+cleanup
5274 0 : // each time as a tenant grows, and is not too expensive because our max shard
5275 0 : // count is relatively low anyway.
5276 0 : // This policy will be adjusted in future once we support higher shard count.
5277 0 : new_shard_count: SPLIT_TO_MAX.literal(),
5278 0 : new_stripe_size: Some(ShardParameters::DEFAULT_STRIPE_SIZE),
5279 0 : },
5280 0 : )
5281 0 : .await
5282 : {
5283 : Ok(_) => {
5284 0 : tracing::info!("Successful auto-split");
5285 : }
5286 0 : Err(e) => {
5287 0 : tracing::error!("Auto-split failed: {e}");
5288 : }
5289 : }
5290 0 : }
5291 0 : .instrument(tracing::info_span!("auto_split", tenant_id=%split_candidate.id.tenant_id)),
5292 : );
5293 0 : }
5294 :
5295 : /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
5296 : /// also wait for any generated Reconcilers to complete. Calling this until it returns zero should
5297 : /// put the system into a quiescent state where future background reconciliations won't do anything.
5298 0 : pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
5299 0 : let reconciles_spawned = self.reconcile_all();
5300 0 : let reconciles_spawned = if reconciles_spawned == 0 {
5301 : // Only optimize when we are otherwise idle
5302 0 : self.optimize_all().await
5303 : } else {
5304 0 : reconciles_spawned
5305 : };
5306 :
5307 0 : let waiters = {
5308 0 : let mut waiters = Vec::new();
5309 0 : let locked = self.inner.read().unwrap();
5310 0 : for (_tenant_shard_id, shard) in locked.tenants.iter() {
5311 0 : if let Some(waiter) = shard.get_waiter() {
5312 0 : waiters.push(waiter);
5313 0 : }
5314 : }
5315 0 : waiters
5316 0 : };
5317 0 :
5318 0 : let waiter_count = waiters.len();
5319 0 : match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
5320 0 : Ok(()) => {}
5321 0 : Err(ReconcileWaitError::Failed(_, reconcile_error))
5322 0 : if matches!(*reconcile_error, ReconcileError::Cancel) =>
5323 0 : {
5324 0 : // Ignore reconciler cancel errors: this reconciler might have shut down
5325 0 : // because some other change superceded it. We will return a nonzero number,
5326 0 : // so the caller knows they might have to call again to quiesce the system.
5327 0 : }
5328 0 : Err(e) => {
5329 0 : return Err(e);
5330 : }
5331 : };
5332 :
5333 0 : tracing::info!(
5334 0 : "{} reconciles in reconcile_all, {} waiters",
5335 : reconciles_spawned,
5336 : waiter_count
5337 : );
5338 :
5339 0 : Ok(std::cmp::max(waiter_count, reconciles_spawned))
5340 0 : }
5341 :
5342 0 : pub async fn shutdown(&self) {
5343 0 : // Note that this already stops processing any results from reconciles: so
5344 0 : // we do not expect that our [`TenantShard`] objects will reach a neat
5345 0 : // final state.
5346 0 : self.cancel.cancel();
5347 0 :
5348 0 : // The cancellation tokens in [`crate::reconciler::Reconciler`] are children
5349 0 : // of our cancellation token, so we do not need to explicitly cancel each of
5350 0 : // them.
5351 0 :
5352 0 : // Background tasks and reconcilers hold gate guards: this waits for them all
5353 0 : // to complete.
5354 0 : self.gate.close().await;
5355 0 : }
5356 :
5357 : /// Drain a node by moving the shards attached to it as primaries.
5358 : /// This is a long running operation and it should run as a separate Tokio task.
5359 0 : pub(crate) async fn drain_node(
5360 0 : &self,
5361 0 : node_id: NodeId,
5362 0 : cancel: CancellationToken,
5363 0 : ) -> Result<(), OperationError> {
5364 0 : let mut last_inspected_shard: Option<TenantShardId> = None;
5365 0 : let mut inspected_all_shards = false;
5366 0 : let mut waiters = Vec::new();
5367 :
5368 0 : while !inspected_all_shards {
5369 0 : if cancel.is_cancelled() {
5370 0 : match self
5371 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
5372 0 : .await
5373 : {
5374 0 : Ok(()) => return Err(OperationError::Cancelled),
5375 0 : Err(err) => {
5376 0 : return Err(OperationError::FinalizeError(
5377 0 : format!(
5378 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
5379 0 : node_id, err
5380 0 : )
5381 0 : .into(),
5382 0 : ));
5383 : }
5384 : }
5385 0 : }
5386 0 :
5387 0 : {
5388 0 : let mut locked = self.inner.write().unwrap();
5389 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5390 :
5391 0 : let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
5392 0 : format!("node {node_id} was removed").into(),
5393 0 : ))?;
5394 :
5395 0 : let current_policy = node.get_scheduling();
5396 0 : if !matches!(current_policy, NodeSchedulingPolicy::Draining) {
5397 : // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
5398 : // about it
5399 0 : return Err(OperationError::NodeStateChanged(
5400 0 : format!("node {node_id} changed state to {current_policy:?}").into(),
5401 0 : ));
5402 0 : }
5403 0 :
5404 0 : let mut cursor = tenants.iter_mut().skip_while({
5405 0 : let skip_past = last_inspected_shard;
5406 0 : move |(tid, _)| match skip_past {
5407 0 : Some(last) => **tid != last,
5408 0 : None => false,
5409 0 : }
5410 0 : });
5411 :
5412 0 : while waiters.len() < MAX_RECONCILES_PER_OPERATION {
5413 0 : let (tid, tenant_shard) = match cursor.next() {
5414 0 : Some(some) => some,
5415 : None => {
5416 0 : inspected_all_shards = true;
5417 0 : break;
5418 : }
5419 : };
5420 :
5421 : // If the shard is not attached to the node being drained, skip it.
5422 0 : if *tenant_shard.intent.get_attached() != Some(node_id) {
5423 0 : last_inspected_shard = Some(*tid);
5424 0 : continue;
5425 0 : }
5426 0 :
5427 0 : match tenant_shard.reschedule_to_secondary(None, scheduler) {
5428 0 : Err(e) => {
5429 0 : tracing::warn!(
5430 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
5431 0 : "Scheduling error when draining pageserver {} : {e}", node_id
5432 : );
5433 : }
5434 : Ok(()) => {
5435 0 : let scheduled_to = tenant_shard.intent.get_attached();
5436 0 : tracing::info!(
5437 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
5438 0 : "Rescheduled shard while draining node {}: {} -> {:?}",
5439 : node_id,
5440 : node_id,
5441 : scheduled_to
5442 : );
5443 :
5444 0 : let waiter = self.maybe_reconcile_shard(tenant_shard, nodes);
5445 0 : if let Some(some) = waiter {
5446 0 : waiters.push(some);
5447 0 : }
5448 : }
5449 : }
5450 :
5451 0 : last_inspected_shard = Some(*tid);
5452 : }
5453 : }
5454 :
5455 0 : waiters = self
5456 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
5457 0 : .await;
5458 :
5459 0 : failpoint_support::sleep_millis_async!("sleepy-drain-loop");
5460 : }
5461 :
5462 0 : while !waiters.is_empty() {
5463 0 : if cancel.is_cancelled() {
5464 0 : match self
5465 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
5466 0 : .await
5467 : {
5468 0 : Ok(()) => return Err(OperationError::Cancelled),
5469 0 : Err(err) => {
5470 0 : return Err(OperationError::FinalizeError(
5471 0 : format!(
5472 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
5473 0 : node_id, err
5474 0 : )
5475 0 : .into(),
5476 0 : ));
5477 : }
5478 : }
5479 0 : }
5480 0 :
5481 0 : tracing::info!("Awaiting {} pending drain reconciliations", waiters.len());
5482 :
5483 0 : waiters = self
5484 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
5485 0 : .await;
5486 : }
5487 :
5488 : // At this point we have done the best we could to drain shards from this node.
5489 : // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]`
5490 : // to complete the drain.
5491 0 : if let Err(err) = self
5492 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart))
5493 0 : .await
5494 : {
5495 : // This is not fatal. Anything that is polling the node scheduling policy to detect
5496 : // the end of the drain operations will hang, but all such places should enforce an
5497 : // overall timeout. The scheduling policy will be updated upon node re-attach and/or
5498 : // by the counterpart fill operation.
5499 0 : return Err(OperationError::FinalizeError(
5500 0 : format!(
5501 0 : "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}"
5502 0 : )
5503 0 : .into(),
5504 0 : ));
5505 0 : }
5506 0 :
5507 0 : Ok(())
5508 0 : }
5509 :
5510 : /// Create a node fill plan (pick secondaries to promote) that meets the following requirements:
5511 : /// 1. The node should be filled until it reaches the expected cluster average of
5512 : /// attached shards. If there are not enough secondaries on the node, the plan stops early.
5513 : /// 2. Select tenant shards to promote such that the number of attached shards is balanced
5514 : /// throughout the cluster. We achieve this by picking tenant shards from each node,
5515 : /// starting from the ones with the largest number of attached shards, until the node
5516 : /// reaches the expected cluster average.
5517 : /// 3. Avoid promoting more shards of the same tenant than required. The upper bound
5518 : /// for the number of tenants from the same shard promoted to the node being filled is:
5519 : /// shard count for the tenant divided by the number of nodes in the cluster.
5520 0 : fn fill_node_plan(&self, node_id: NodeId) -> Vec<TenantShardId> {
5521 0 : let mut locked = self.inner.write().unwrap();
5522 0 : let fill_requirement = locked.scheduler.compute_fill_requirement(node_id);
5523 0 :
5524 0 : let mut tids_by_node = locked
5525 0 : .tenants
5526 0 : .iter_mut()
5527 0 : .filter_map(|(tid, tenant_shard)| {
5528 0 : if tenant_shard.intent.get_secondary().contains(&node_id) {
5529 0 : if let Some(primary) = tenant_shard.intent.get_attached() {
5530 0 : return Some((*primary, *tid));
5531 0 : }
5532 0 : }
5533 :
5534 0 : None
5535 0 : })
5536 0 : .into_group_map();
5537 0 :
5538 0 : let expected_attached = locked.scheduler.expected_attached_shard_count();
5539 0 : let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count();
5540 0 :
5541 0 : let mut promoted_per_tenant: HashMap<TenantId, usize> = HashMap::new();
5542 0 : let mut plan = Vec::new();
5543 :
5544 0 : for (node_id, attached) in nodes_by_load {
5545 0 : let available = locked
5546 0 : .nodes
5547 0 : .get(&node_id)
5548 0 : .map_or(false, |n| n.is_available());
5549 0 : if !available {
5550 0 : continue;
5551 0 : }
5552 0 :
5553 0 : if plan.len() >= fill_requirement
5554 0 : || tids_by_node.is_empty()
5555 0 : || attached <= expected_attached
5556 : {
5557 0 : break;
5558 0 : }
5559 0 :
5560 0 : let mut can_take = attached - expected_attached;
5561 0 : let mut remove_node = false;
5562 0 : while can_take > 0 {
5563 0 : match tids_by_node.get_mut(&node_id) {
5564 0 : Some(tids) => match tids.pop() {
5565 0 : Some(tid) => {
5566 0 : let max_promote_for_tenant = std::cmp::max(
5567 0 : tid.shard_count.count() as usize / locked.nodes.len(),
5568 0 : 1,
5569 0 : );
5570 0 : let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default();
5571 0 : if *promoted < max_promote_for_tenant {
5572 0 : plan.push(tid);
5573 0 : *promoted += 1;
5574 0 : can_take -= 1;
5575 0 : }
5576 : }
5577 : None => {
5578 0 : remove_node = true;
5579 0 : break;
5580 : }
5581 : },
5582 : None => {
5583 0 : break;
5584 : }
5585 : }
5586 : }
5587 :
5588 0 : if remove_node {
5589 0 : tids_by_node.remove(&node_id);
5590 0 : }
5591 : }
5592 :
5593 0 : plan
5594 0 : }
5595 :
5596 : /// Fill a node by promoting its secondaries until the cluster is balanced
5597 : /// with regards to attached shard counts. Note that this operation only
5598 : /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`].
5599 : /// This is a long running operation and it should run as a separate Tokio task.
5600 0 : pub(crate) async fn fill_node(
5601 0 : &self,
5602 0 : node_id: NodeId,
5603 0 : cancel: CancellationToken,
5604 0 : ) -> Result<(), OperationError> {
5605 0 : // TODO(vlad): Currently this operates on the assumption that all
5606 0 : // secondaries are warm. This is not always true (e.g. we just migrated the
5607 0 : // tenant). Take that into consideration by checking the secondary status.
5608 0 : let mut tids_to_promote = self.fill_node_plan(node_id);
5609 0 : let mut waiters = Vec::new();
5610 :
5611 : // Execute the plan we've composed above. Before aplying each move from the plan,
5612 : // we validate to ensure that it has not gone stale in the meantime.
5613 0 : while !tids_to_promote.is_empty() {
5614 0 : if cancel.is_cancelled() {
5615 0 : match self
5616 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
5617 0 : .await
5618 : {
5619 0 : Ok(()) => return Err(OperationError::Cancelled),
5620 0 : Err(err) => {
5621 0 : return Err(OperationError::FinalizeError(
5622 0 : format!(
5623 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
5624 0 : node_id, err
5625 0 : )
5626 0 : .into(),
5627 0 : ));
5628 : }
5629 : }
5630 0 : }
5631 0 :
5632 0 : {
5633 0 : let mut locked = self.inner.write().unwrap();
5634 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5635 :
5636 0 : let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
5637 0 : format!("node {node_id} was removed").into(),
5638 0 : ))?;
5639 :
5640 0 : let current_policy = node.get_scheduling();
5641 0 : if !matches!(current_policy, NodeSchedulingPolicy::Filling) {
5642 : // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
5643 : // about it
5644 0 : return Err(OperationError::NodeStateChanged(
5645 0 : format!("node {node_id} changed state to {current_policy:?}").into(),
5646 0 : ));
5647 0 : }
5648 :
5649 0 : while waiters.len() < MAX_RECONCILES_PER_OPERATION {
5650 0 : if let Some(tid) = tids_to_promote.pop() {
5651 0 : if let Some(tenant_shard) = tenants.get_mut(&tid) {
5652 : // If the node being filled is not a secondary anymore,
5653 : // skip the promotion.
5654 0 : if !tenant_shard.intent.get_secondary().contains(&node_id) {
5655 0 : continue;
5656 0 : }
5657 0 :
5658 0 : let previously_attached_to = *tenant_shard.intent.get_attached();
5659 0 : match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) {
5660 0 : Err(e) => {
5661 0 : tracing::warn!(
5662 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
5663 0 : "Scheduling error when filling pageserver {} : {e}", node_id
5664 : );
5665 : }
5666 : Ok(()) => {
5667 0 : tracing::info!(
5668 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
5669 0 : "Rescheduled shard while filling node {}: {:?} -> {}",
5670 : node_id,
5671 : previously_attached_to,
5672 : node_id
5673 : );
5674 :
5675 0 : if let Some(waiter) =
5676 0 : self.maybe_reconcile_shard(tenant_shard, nodes)
5677 0 : {
5678 0 : waiters.push(waiter);
5679 0 : }
5680 : }
5681 : }
5682 0 : }
5683 : } else {
5684 0 : break;
5685 : }
5686 : }
5687 : }
5688 :
5689 0 : waiters = self
5690 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
5691 0 : .await;
5692 : }
5693 :
5694 0 : while !waiters.is_empty() {
5695 0 : if cancel.is_cancelled() {
5696 0 : match self
5697 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
5698 0 : .await
5699 : {
5700 0 : Ok(()) => return Err(OperationError::Cancelled),
5701 0 : Err(err) => {
5702 0 : return Err(OperationError::FinalizeError(
5703 0 : format!(
5704 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
5705 0 : node_id, err
5706 0 : )
5707 0 : .into(),
5708 0 : ));
5709 : }
5710 : }
5711 0 : }
5712 0 :
5713 0 : tracing::info!("Awaiting {} pending fill reconciliations", waiters.len());
5714 :
5715 0 : waiters = self
5716 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
5717 0 : .await;
5718 : }
5719 :
5720 0 : if let Err(err) = self
5721 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
5722 0 : .await
5723 : {
5724 : // This isn't a huge issue since the filling process starts upon request. However, it
5725 : // will prevent the next drain from starting. The only case in which this can fail
5726 : // is database unavailability. Such a case will require manual intervention.
5727 0 : return Err(OperationError::FinalizeError(
5728 0 : format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}")
5729 0 : .into(),
5730 0 : ));
5731 0 : }
5732 0 :
5733 0 : Ok(())
5734 0 : }
5735 : }
|