Line data Source code
1 : use hyper::Uri;
2 : use std::{
3 : borrow::Cow,
4 : cmp::Ordering,
5 : collections::{BTreeMap, HashMap, HashSet},
6 : ops::Deref,
7 : path::PathBuf,
8 : str::FromStr,
9 : sync::Arc,
10 : time::{Duration, Instant},
11 : };
12 :
13 : use crate::{
14 : background_node_operations::{
15 : Drain, Fill, Operation, OperationError, OperationHandler, MAX_RECONCILES_PER_OPERATION,
16 : },
17 : compute_hook::NotifyError,
18 : drain_utils::{self, TenantShardDrain, TenantShardIterator},
19 : id_lock_map::{trace_exclusive_lock, trace_shared_lock, IdLockMap, TracingExclusiveGuard},
20 : leadership::Leadership,
21 : metrics,
22 : peer_client::GlobalObservedState,
23 : persistence::{
24 : AbortShardSplitStatus, ControllerPersistence, DatabaseResult, MetadataHealthPersistence,
25 : TenantFilter,
26 : },
27 : reconciler::{ReconcileError, ReconcileUnits, ReconcilerConfig, ReconcilerConfigBuilder},
28 : scheduler::{MaySchedule, ScheduleContext, ScheduleMode},
29 : tenant_shard::{
30 : MigrateAttachment, ReconcileNeeded, ReconcilerStatus, ScheduleOptimization,
31 : ScheduleOptimizationAction,
32 : },
33 : };
34 : use anyhow::Context;
35 : use control_plane::storage_controller::{
36 : AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
37 : };
38 : use diesel::result::DatabaseErrorKind;
39 : use futures::{stream::FuturesUnordered, StreamExt};
40 : use itertools::Itertools;
41 : use pageserver_api::{
42 : controller_api::{
43 : MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability, NodeRegisterRequest,
44 : NodeSchedulingPolicy, PlacementPolicy, ShardSchedulingPolicy, TenantCreateRequest,
45 : TenantCreateResponse, TenantCreateResponseShard, TenantDescribeResponse,
46 : TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
47 : TenantShardMigrateRequest, TenantShardMigrateResponse, UtilizationScore,
48 : },
49 : models::{SecondaryProgress, TenantConfigRequest, TopTenantShardsRequest},
50 : };
51 : use reqwest::StatusCode;
52 : use tracing::{instrument, Instrument};
53 :
54 : use crate::pageserver_client::PageserverClient;
55 : use pageserver_api::{
56 : models::{
57 : self, LocationConfig, LocationConfigListResponse, LocationConfigMode,
58 : PageserverUtilization, ShardParameters, TenantConfig, TenantLocationConfigRequest,
59 : TenantLocationConfigResponse, TenantShardLocation, TenantShardSplitRequest,
60 : TenantShardSplitResponse, TenantTimeTravelRequest, TimelineCreateRequest, TimelineInfo,
61 : },
62 : shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId},
63 : upcall_api::{
64 : ReAttachRequest, ReAttachResponse, ReAttachResponseTenant, ValidateRequest,
65 : ValidateResponse, ValidateResponseTenant,
66 : },
67 : };
68 : use pageserver_client::mgmt_api;
69 : use tokio::sync::mpsc::error::TrySendError;
70 : use tokio_util::sync::CancellationToken;
71 : use utils::{
72 : completion::Barrier,
73 : failpoint_support,
74 : generation::Generation,
75 : http::error::ApiError,
76 : id::{NodeId, TenantId, TimelineId},
77 : sync::gate::Gate,
78 : };
79 :
80 : use crate::{
81 : compute_hook::ComputeHook,
82 : heartbeater::{Heartbeater, PageserverState},
83 : node::{AvailabilityTransition, Node},
84 : persistence::{split_state::SplitState, DatabaseError, Persistence, TenantShardPersistence},
85 : reconciler::attached_location_conf,
86 : scheduler::Scheduler,
87 : tenant_shard::{
88 : IntentState, ObservedState, ObservedStateLocation, ReconcileResult, ReconcileWaitError,
89 : ReconcilerWaiter, TenantShard,
90 : },
91 : };
92 :
93 : pub mod chaos_injector;
94 :
95 : // For operations that should be quick, like attaching a new tenant
96 : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
97 :
98 : // For operations that might be slow, like migrating a tenant with
99 : // some data in it.
100 : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
101 :
102 : // If we receive a call using Secondary mode initially, it will omit generation. We will initialize
103 : // tenant shards into this generation, and as long as it remains in this generation, we will accept
104 : // input generation from future requests as authoritative.
105 : const INITIAL_GENERATION: Generation = Generation::new(0);
106 :
107 : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
108 : /// up on unresponsive pageservers and proceed.
109 : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
110 :
111 : /// How long a node may be unresponsive to heartbeats before we declare it offline.
112 : /// This must be long enough to cover node restarts as well as normal operations: in future
113 : pub const MAX_OFFLINE_INTERVAL_DEFAULT: Duration = Duration::from_secs(30);
114 :
115 : /// How long a node may be unresponsive to heartbeats during start up before we declare it
116 : /// offline. This is much more lenient than [`MAX_OFFLINE_INTERVAL_DEFAULT`] since the pageserver's
117 : /// handling of the re-attach response may take a long time and blocks heartbeats from
118 : /// being handled on the pageserver side.
119 : pub const MAX_WARMING_UP_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
120 :
121 0 : #[derive(Clone, strum_macros::Display)]
122 : enum TenantOperations {
123 : Create,
124 : LocationConfig,
125 : ConfigSet,
126 : TimeTravelRemoteStorage,
127 : Delete,
128 : UpdatePolicy,
129 : ShardSplit,
130 : SecondaryDownload,
131 : TimelineCreate,
132 : TimelineDelete,
133 : AttachHook,
134 : TimelineDetachAncestor,
135 : }
136 :
137 0 : #[derive(Clone, strum_macros::Display)]
138 : enum NodeOperations {
139 : Register,
140 : Configure,
141 : Delete,
142 : }
143 :
144 : /// The leadership status for the storage controller process.
145 : /// Allowed transitions are:
146 : /// 1. Leader -> SteppedDown
147 : /// 2. Candidate -> Leader
148 : #[derive(
149 : Eq,
150 : PartialEq,
151 : Copy,
152 : Clone,
153 0 : strum_macros::Display,
154 0 : strum_macros::EnumIter,
155 : measured::FixedCardinalityLabel,
156 : )]
157 : #[strum(serialize_all = "snake_case")]
158 : pub(crate) enum LeadershipStatus {
159 : /// This is the steady state where the storage controller can produce
160 : /// side effects in the cluster.
161 : Leader,
162 : /// We've been notified to step down by another candidate. No reconciliations
163 : /// take place in this state.
164 : SteppedDown,
165 : /// Initial state for a new storage controller instance. Will attempt to assume leadership.
166 : #[allow(unused)]
167 : Candidate,
168 : }
169 :
170 : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
171 :
172 : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
173 : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
174 : // than they're being pushed onto the queue.
175 : const MAX_DELAYED_RECONCILES: usize = 10000;
176 :
177 : // Top level state available to all HTTP handlers
178 : struct ServiceState {
179 : leadership_status: LeadershipStatus,
180 :
181 : tenants: BTreeMap<TenantShardId, TenantShard>,
182 :
183 : nodes: Arc<HashMap<NodeId, Node>>,
184 :
185 : scheduler: Scheduler,
186 :
187 : /// Ongoing background operation on the cluster if any is running.
188 : /// Note that only one such operation may run at any given time,
189 : /// hence the type choice.
190 : ongoing_operation: Option<OperationHandler>,
191 :
192 : /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
193 : delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
194 : }
195 :
196 : /// Transform an error from a pageserver into an error to return to callers of a storage
197 : /// controller API.
198 0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
199 0 : match e {
200 0 : mgmt_api::Error::SendRequest(e) => {
201 0 : // Presume errors sending requests are connectivity/availability issues
202 0 : ApiError::ResourceUnavailable(format!("{node} error sending request: {e}").into())
203 : }
204 0 : mgmt_api::Error::ReceiveErrorBody(str) => {
205 0 : // Presume errors receiving body are connectivity/availability issues
206 0 : ApiError::ResourceUnavailable(
207 0 : format!("{node} error receiving error body: {str}").into(),
208 0 : )
209 : }
210 0 : mgmt_api::Error::ReceiveBody(str) => {
211 0 : // Presume errors receiving body are connectivity/availability issues
212 0 : ApiError::ResourceUnavailable(format!("{node} error receiving body: {str}").into())
213 : }
214 0 : mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
215 0 : ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
216 : }
217 0 : mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
218 0 : ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
219 : }
220 0 : mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
221 0 : | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
222 : // Auth errors talking to a pageserver are not auth errors for the caller: they are
223 : // internal server errors, showing that something is wrong with the pageserver or
224 : // storage controller's auth configuration.
225 0 : ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
226 : }
227 0 : mgmt_api::Error::ApiError(status, msg) => {
228 0 : // Presume general case of pageserver API errors is that we tried to do something
229 0 : // that can't be done right now.
230 0 : ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
231 : }
232 0 : mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
233 : }
234 0 : }
235 :
236 : impl ServiceState {
237 0 : fn new(
238 0 : nodes: HashMap<NodeId, Node>,
239 0 : tenants: BTreeMap<TenantShardId, TenantShard>,
240 0 : scheduler: Scheduler,
241 0 : delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
242 0 : initial_leadership_status: LeadershipStatus,
243 0 : ) -> Self {
244 0 : metrics::update_leadership_status(initial_leadership_status);
245 0 :
246 0 : Self {
247 0 : leadership_status: initial_leadership_status,
248 0 : tenants,
249 0 : nodes: Arc::new(nodes),
250 0 : scheduler,
251 0 : ongoing_operation: None,
252 0 : delayed_reconcile_rx,
253 0 : }
254 0 : }
255 :
256 0 : fn parts_mut(
257 0 : &mut self,
258 0 : ) -> (
259 0 : &mut Arc<HashMap<NodeId, Node>>,
260 0 : &mut BTreeMap<TenantShardId, TenantShard>,
261 0 : &mut Scheduler,
262 0 : ) {
263 0 : (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
264 0 : }
265 :
266 0 : fn get_leadership_status(&self) -> LeadershipStatus {
267 0 : self.leadership_status
268 0 : }
269 :
270 0 : fn step_down(&mut self) {
271 0 : self.leadership_status = LeadershipStatus::SteppedDown;
272 0 : metrics::update_leadership_status(self.leadership_status);
273 0 : }
274 :
275 0 : fn become_leader(&mut self) {
276 0 : self.leadership_status = LeadershipStatus::Leader;
277 0 : metrics::update_leadership_status(self.leadership_status);
278 0 : }
279 : }
280 :
281 : #[derive(Clone)]
282 : pub struct Config {
283 : // All pageservers managed by one instance of this service must have
284 : // the same public key. This JWT token will be used to authenticate
285 : // this service to the pageservers it manages.
286 : pub jwt_token: Option<String>,
287 :
288 : // This JWT token will be used to authenticate this service to the control plane.
289 : pub control_plane_jwt_token: Option<String>,
290 :
291 : // This JWT token will be used to authenticate with other storage controller instances
292 : pub peer_jwt_token: Option<String>,
293 :
294 : /// Where the compute hook should send notifications of pageserver attachment locations
295 : /// (this URL points to the control plane in prod). If this is None, the compute hook will
296 : /// assume it is running in a test environment and try to update neon_local.
297 : pub compute_hook_url: Option<String>,
298 :
299 : /// Grace period within which a pageserver does not respond to heartbeats, but is still
300 : /// considered active. Once the grace period elapses, the next heartbeat failure will
301 : /// mark the pagseserver offline.
302 : pub max_offline_interval: Duration,
303 :
304 : /// Extended grace period within which pageserver may not respond to heartbeats.
305 : /// This extended grace period kicks in after the node has been drained for restart
306 : /// and/or upon handling the re-attach request from a node.
307 : pub max_warming_up_interval: Duration,
308 :
309 : /// How many Reconcilers may be spawned concurrently
310 : pub reconciler_concurrency: usize,
311 :
312 : /// How large must a shard grow in bytes before we split it?
313 : /// None disables auto-splitting.
314 : pub split_threshold: Option<u64>,
315 :
316 : // TODO: make this cfg(feature = "testing")
317 : pub neon_local_repo_dir: Option<PathBuf>,
318 :
319 : // Maximum acceptable download lag for the secondary location
320 : // while draining a node. If the secondary location is lagging
321 : // by more than the configured amount, then the secondary is not
322 : // upgraded to primary.
323 : pub max_secondary_lag_bytes: Option<u64>,
324 :
325 : pub address_for_peers: Option<Uri>,
326 :
327 : pub start_as_candidate: bool,
328 :
329 : pub http_service_port: i32,
330 : }
331 :
332 : impl From<DatabaseError> for ApiError {
333 0 : fn from(err: DatabaseError) -> ApiError {
334 0 : match err {
335 0 : DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
336 : // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
337 : DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
338 0 : ApiError::ShuttingDown
339 : }
340 0 : DatabaseError::Logical(reason) | DatabaseError::Migration(reason) => {
341 0 : ApiError::InternalServerError(anyhow::anyhow!(reason))
342 : }
343 : }
344 0 : }
345 : }
346 :
347 : pub struct Service {
348 : inner: Arc<std::sync::RwLock<ServiceState>>,
349 : config: Config,
350 : persistence: Arc<Persistence>,
351 : compute_hook: Arc<ComputeHook>,
352 : result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResultRequest>,
353 :
354 : heartbeater: Heartbeater,
355 :
356 : // Channel for background cleanup from failed operations that require cleanup, such as shard split
357 : abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
358 :
359 : // Locking on a tenant granularity (covers all shards in the tenant):
360 : // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
361 : // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
362 : tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
363 :
364 : // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
365 : // that transition it to/from Active.
366 : node_op_locks: IdLockMap<NodeId, NodeOperations>,
367 :
368 : // Limit how many Reconcilers we will spawn concurrently
369 : reconciler_concurrency: Arc<tokio::sync::Semaphore>,
370 :
371 : /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
372 : /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
373 : ///
374 : /// Note that this state logically lives inside ServiceInner, but carrying Sender here makes the code simpler
375 : /// by avoiding needing a &mut ref to something inside the ServiceInner. This could be optimized to
376 : /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
377 : delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
378 :
379 : // Process shutdown will fire this token
380 : cancel: CancellationToken,
381 :
382 : // Child token of [`Service::cancel`] used by reconcilers
383 : reconcilers_cancel: CancellationToken,
384 :
385 : // Background tasks will hold this gate
386 : gate: Gate,
387 :
388 : // Reconcilers background tasks will hold this gate
389 : reconcilers_gate: Gate,
390 :
391 : /// This waits for initial reconciliation with pageservers to complete. Until this barrier
392 : /// passes, it isn't safe to do any actions that mutate tenants.
393 : pub(crate) startup_complete: Barrier,
394 : }
395 :
396 : impl From<ReconcileWaitError> for ApiError {
397 0 : fn from(value: ReconcileWaitError) -> Self {
398 0 : match value {
399 0 : ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
400 0 : e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
401 0 : e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
402 : }
403 0 : }
404 : }
405 :
406 : impl From<OperationError> for ApiError {
407 0 : fn from(value: OperationError) -> Self {
408 0 : match value {
409 0 : OperationError::NodeStateChanged(err) | OperationError::FinalizeError(err) => {
410 0 : ApiError::InternalServerError(anyhow::anyhow!(err))
411 : }
412 0 : OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()),
413 : }
414 0 : }
415 : }
416 :
417 : #[allow(clippy::large_enum_variant)]
418 : enum TenantCreateOrUpdate {
419 : Create(TenantCreateRequest),
420 : Update(Vec<ShardUpdate>),
421 : }
422 :
423 : struct ShardSplitParams {
424 : old_shard_count: ShardCount,
425 : new_shard_count: ShardCount,
426 : new_stripe_size: Option<ShardStripeSize>,
427 : targets: Vec<ShardSplitTarget>,
428 : policy: PlacementPolicy,
429 : config: TenantConfig,
430 : shard_ident: ShardIdentity,
431 : }
432 :
433 : // When preparing for a shard split, we may either choose to proceed with the split,
434 : // or find that the work is already done and return NoOp.
435 : enum ShardSplitAction {
436 : Split(ShardSplitParams),
437 : NoOp(TenantShardSplitResponse),
438 : }
439 :
440 : // A parent shard which will be split
441 : struct ShardSplitTarget {
442 : parent_id: TenantShardId,
443 : node: Node,
444 : child_ids: Vec<TenantShardId>,
445 : }
446 :
447 : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
448 : /// might not be available. We therefore use a queue of abort operations processed in the background.
449 : struct TenantShardSplitAbort {
450 : tenant_id: TenantId,
451 : /// The target values from the request that failed
452 : new_shard_count: ShardCount,
453 : new_stripe_size: Option<ShardStripeSize>,
454 : /// Until this abort op is complete, no other operations may be done on the tenant
455 : _tenant_lock: TracingExclusiveGuard<TenantOperations>,
456 : }
457 :
458 0 : #[derive(thiserror::Error, Debug)]
459 : enum TenantShardSplitAbortError {
460 : #[error(transparent)]
461 : Database(#[from] DatabaseError),
462 : #[error(transparent)]
463 : Remote(#[from] mgmt_api::Error),
464 : #[error("Unavailable")]
465 : Unavailable,
466 : }
467 :
468 : struct ShardUpdate {
469 : tenant_shard_id: TenantShardId,
470 : placement_policy: PlacementPolicy,
471 : tenant_config: TenantConfig,
472 :
473 : /// If this is None, generation is not updated.
474 : generation: Option<Generation>,
475 : }
476 :
477 : enum StopReconciliationsReason {
478 : ShuttingDown,
479 : SteppingDown,
480 : }
481 :
482 : impl std::fmt::Display for StopReconciliationsReason {
483 0 : fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result {
484 0 : let s = match self {
485 0 : Self::ShuttingDown => "Shutting down",
486 0 : Self::SteppingDown => "Stepping down",
487 : };
488 0 : write!(writer, "{}", s)
489 0 : }
490 : }
491 :
492 : pub(crate) enum ReconcileResultRequest {
493 : ReconcileResult(ReconcileResult),
494 : Stop,
495 : }
496 :
497 : impl Service {
498 0 : pub fn get_config(&self) -> &Config {
499 0 : &self.config
500 0 : }
501 :
502 : /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
503 : /// view of the world, and determine which pageservers are responsive.
504 0 : #[instrument(skip_all)]
505 : async fn startup_reconcile(
506 : self: &Arc<Service>,
507 : current_leader: Option<ControllerPersistence>,
508 : leader_step_down_state: Option<GlobalObservedState>,
509 : bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
510 : Result<(), (TenantShardId, NotifyError)>,
511 : >,
512 : ) {
513 : // Startup reconciliation does I/O to other services: whether they
514 : // are responsive or not, we should aim to finish within our deadline, because:
515 : // - If we don't, a k8s readiness hook watching /ready will kill us.
516 : // - While we're waiting for startup reconciliation, we are not fully
517 : // available for end user operations like creating/deleting tenants and timelines.
518 : //
519 : // We set multiple deadlines to break up the time available between the phases of work: this is
520 : // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
521 : let start_at = Instant::now();
522 : let node_scan_deadline = start_at
523 : .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
524 : .expect("Reconcile timeout is a modest constant");
525 :
526 : let observed = if let Some(state) = leader_step_down_state {
527 : tracing::info!(
528 : "Using observed state received from leader at {}",
529 : current_leader.as_ref().unwrap().address
530 : );
531 :
532 : state
533 : } else {
534 : self.build_global_observed_state(node_scan_deadline).await
535 : };
536 :
537 : // Accumulate a list of any tenant locations that ought to be detached
538 : let mut cleanup = Vec::new();
539 :
540 : // Send initial heartbeat requests to all nodes loaded from the database
541 : let all_nodes = {
542 : let locked = self.inner.read().unwrap();
543 : locked.nodes.clone()
544 : };
545 : let nodes_online = self.initial_heartbeat_round(all_nodes.keys()).await;
546 :
547 : // List of tenants for which we will attempt to notify compute of their location at startup
548 : let mut compute_notifications = Vec::new();
549 :
550 : // Populate intent and observed states for all tenants, based on reported state on pageservers
551 : tracing::info!("Populating tenant shards' states from initial pageserver scan...");
552 : let shard_count = {
553 : let mut locked = self.inner.write().unwrap();
554 : let (nodes, tenants, scheduler) = locked.parts_mut();
555 :
556 : // Mark nodes online if they responded to us: nodes are offline by default after a restart.
557 : let mut new_nodes = (**nodes).clone();
558 : for (node_id, node) in new_nodes.iter_mut() {
559 : if let Some(utilization) = nodes_online.get(node_id) {
560 : node.set_availability(NodeAvailability::Active(UtilizationScore(
561 : utilization.utilization_score,
562 : )));
563 : scheduler.node_upsert(node);
564 : }
565 : }
566 : *nodes = Arc::new(new_nodes);
567 :
568 : for (tenant_shard_id, observed_state) in observed.0 {
569 : let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
570 : for node_id in observed_state.locations.keys() {
571 : cleanup.push((tenant_shard_id, *node_id));
572 : }
573 :
574 : continue;
575 : };
576 :
577 : tenant_shard.observed = observed_state;
578 : }
579 :
580 : // Populate each tenant's intent state
581 : let mut schedule_context = ScheduleContext::default();
582 : for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
583 : if tenant_shard_id.shard_number == ShardNumber(0) {
584 : // Reset scheduling context each time we advance to the next Tenant
585 : schedule_context = ScheduleContext::default();
586 : }
587 :
588 : tenant_shard.intent_from_observed(scheduler);
589 : if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
590 : // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
591 : // not enough pageservers are available. The tenant may well still be available
592 : // to clients.
593 : tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
594 : } else {
595 : // If we're both intending and observed to be attached at a particular node, we will
596 : // emit a compute notification for this. In the case where our observed state does not
597 : // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
598 : if let Some(attached_at) = tenant_shard.stably_attached() {
599 : compute_notifications.push((
600 : *tenant_shard_id,
601 : attached_at,
602 : tenant_shard.shard.stripe_size,
603 : ));
604 : }
605 : }
606 : }
607 :
608 : tenants.len()
609 : };
610 :
611 : // Before making any obeservable changes to the cluster, persist self
612 : // as leader in database and memory.
613 : let leadership = Leadership::new(
614 : self.persistence.clone(),
615 : self.config.clone(),
616 : self.cancel.child_token(),
617 : );
618 :
619 : if let Err(e) = leadership.become_leader(current_leader).await {
620 : tracing::error!("Failed to persist self as leader: {e}. Aborting start-up ...");
621 : std::process::exit(1);
622 : }
623 :
624 : self.inner.write().unwrap().become_leader();
625 :
626 : // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
627 : // generation_pageserver in the database.
628 :
629 : // Emit compute hook notifications for all tenants which are already stably attached. Other tenants
630 : // will emit compute hook notifications when they reconcile.
631 : //
632 : // Ordering: our calls to notify_background synchronously establish a relative order for these notifications vs. any later
633 : // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
634 : // calls will be correctly ordered wrt these.
635 : //
636 : // Concurrency: we call notify_background for all tenants, which will create O(N) tokio tasks, but almost all of them
637 : // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
638 : // unit and start doing I/O.
639 : tracing::info!(
640 : "Sending {} compute notifications",
641 : compute_notifications.len()
642 : );
643 : self.compute_hook.notify_background(
644 : compute_notifications,
645 : bg_compute_notify_result_tx.clone(),
646 : &self.cancel,
647 : );
648 :
649 : // Finally, now that the service is up and running, launch reconcile operations for any tenants
650 : // which require it: under normal circumstances this should only include tenants that were in some
651 : // transient state before we restarted, or any tenants whose compute hooks failed above.
652 : tracing::info!("Checking for shards in need of reconciliation...");
653 : let reconcile_tasks = self.reconcile_all();
654 : // We will not wait for these reconciliation tasks to run here: we're now done with startup and
655 : // normal operations may proceed.
656 :
657 : // Clean up any tenants that were found on pageservers but are not known to us. Do this in the
658 : // background because it does not need to complete in order to proceed with other work.
659 : if !cleanup.is_empty() {
660 : tracing::info!("Cleaning up {} locations in the background", cleanup.len());
661 : tokio::task::spawn({
662 : let cleanup_self = self.clone();
663 0 : async move { cleanup_self.cleanup_locations(cleanup).await }
664 : });
665 : }
666 :
667 : tracing::info!("Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)");
668 : }
669 :
670 0 : async fn initial_heartbeat_round<'a>(
671 0 : &self,
672 0 : node_ids: impl Iterator<Item = &'a NodeId>,
673 0 : ) -> HashMap<NodeId, PageserverUtilization> {
674 0 : assert!(!self.startup_complete.is_ready());
675 :
676 0 : let all_nodes = {
677 0 : let locked = self.inner.read().unwrap();
678 0 : locked.nodes.clone()
679 0 : };
680 0 :
681 0 : let mut nodes_to_heartbeat = HashMap::new();
682 0 : for node_id in node_ids {
683 0 : match all_nodes.get(node_id) {
684 0 : Some(node) => {
685 0 : nodes_to_heartbeat.insert(*node_id, node.clone());
686 0 : }
687 : None => {
688 0 : tracing::warn!("Node {node_id} was removed during start-up");
689 : }
690 : }
691 : }
692 :
693 0 : tracing::info!("Sending initial heartbeats...");
694 0 : let res = self
695 0 : .heartbeater
696 0 : .heartbeat(Arc::new(nodes_to_heartbeat))
697 0 : .await;
698 :
699 0 : let mut online_nodes = HashMap::new();
700 0 : if let Ok(deltas) = res {
701 0 : for (node_id, status) in deltas.0 {
702 0 : match status {
703 0 : PageserverState::Available { utilization, .. } => {
704 0 : online_nodes.insert(node_id, utilization);
705 0 : }
706 0 : PageserverState::Offline => {}
707 : PageserverState::WarmingUp { .. } => {
708 0 : unreachable!("Nodes are never marked warming-up during startup reconcile")
709 : }
710 : }
711 : }
712 0 : }
713 :
714 0 : online_nodes
715 0 : }
716 :
717 : /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
718 : ///
719 : /// The result includes only nodes which responded within the deadline
720 0 : async fn scan_node_locations(
721 0 : &self,
722 0 : deadline: Instant,
723 0 : ) -> HashMap<NodeId, LocationConfigListResponse> {
724 0 : let nodes = {
725 0 : let locked = self.inner.read().unwrap();
726 0 : locked.nodes.clone()
727 0 : };
728 0 :
729 0 : let mut node_results = HashMap::new();
730 0 :
731 0 : let mut node_list_futs = FuturesUnordered::new();
732 0 :
733 0 : tracing::info!("Scanning shards on {} nodes...", nodes.len());
734 0 : for node in nodes.values() {
735 0 : node_list_futs.push({
736 0 : async move {
737 0 : tracing::info!("Scanning shards on node {node}...");
738 0 : let timeout = Duration::from_secs(1);
739 0 : let response = node
740 0 : .with_client_retries(
741 0 : |client| async move { client.list_location_config().await },
742 0 : &self.config.jwt_token,
743 0 : 1,
744 0 : 5,
745 0 : timeout,
746 0 : &self.cancel,
747 0 : )
748 0 : .await;
749 0 : (node.get_id(), response)
750 0 : }
751 0 : });
752 0 : }
753 :
754 : loop {
755 0 : let (node_id, result) = tokio::select! {
756 : next = node_list_futs.next() => {
757 : match next {
758 : Some(result) => result,
759 : None =>{
760 : // We got results for all our nodes
761 : break;
762 : }
763 :
764 : }
765 : },
766 : _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
767 : // Give up waiting for anyone who hasn't responded: we will yield the results that we have
768 : tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
769 : break;
770 : }
771 : };
772 :
773 0 : let Some(list_response) = result else {
774 0 : tracing::info!("Shutdown during startup_reconcile");
775 0 : break;
776 : };
777 :
778 0 : match list_response {
779 0 : Err(e) => {
780 0 : tracing::warn!("Could not scan node {} ({e})", node_id);
781 : }
782 0 : Ok(listing) => {
783 0 : node_results.insert(node_id, listing);
784 0 : }
785 : }
786 : }
787 :
788 0 : node_results
789 0 : }
790 :
791 0 : async fn build_global_observed_state(&self, deadline: Instant) -> GlobalObservedState {
792 0 : let node_listings = self.scan_node_locations(deadline).await;
793 0 : let mut observed = GlobalObservedState::default();
794 :
795 0 : for (node_id, location_confs) in node_listings {
796 0 : tracing::info!(
797 0 : "Received {} shard statuses from pageserver {}",
798 0 : location_confs.tenant_shards.len(),
799 : node_id
800 : );
801 :
802 0 : for (tid, location_conf) in location_confs.tenant_shards {
803 0 : let entry = observed.0.entry(tid).or_default();
804 0 : entry.locations.insert(
805 0 : node_id,
806 0 : ObservedStateLocation {
807 0 : conf: location_conf,
808 0 : },
809 0 : );
810 0 : }
811 : }
812 :
813 0 : observed
814 0 : }
815 :
816 : /// Used during [`Self::startup_reconcile`]: detach a list of unknown-to-us tenants from pageservers.
817 : ///
818 : /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
819 : /// tenants, then it is probably something incompletely deleted before: we will not fight with any
820 : /// other task trying to attach it.
821 0 : #[instrument(skip_all)]
822 : async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
823 : let nodes = self.inner.read().unwrap().nodes.clone();
824 :
825 : for (tenant_shard_id, node_id) in cleanup {
826 : // A node reported a tenant_shard_id which is unknown to us: detach it.
827 : let Some(node) = nodes.get(&node_id) else {
828 : // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
829 : // a location to clean up on a node that has since been removed.
830 : tracing::info!(
831 : "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
832 : );
833 : continue;
834 : };
835 :
836 : if self.cancel.is_cancelled() {
837 : break;
838 : }
839 :
840 : let client = PageserverClient::new(
841 : node.get_id(),
842 : node.base_url(),
843 : self.config.jwt_token.as_deref(),
844 : );
845 : match client
846 : .location_config(
847 : tenant_shard_id,
848 : LocationConfig {
849 : mode: LocationConfigMode::Detached,
850 : generation: None,
851 : secondary_conf: None,
852 : shard_number: tenant_shard_id.shard_number.0,
853 : shard_count: tenant_shard_id.shard_count.literal(),
854 : shard_stripe_size: 0,
855 : tenant_conf: models::TenantConfig::default(),
856 : },
857 : None,
858 : false,
859 : )
860 : .await
861 : {
862 : Ok(()) => {
863 : tracing::info!(
864 : "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
865 : );
866 : }
867 : Err(e) => {
868 : // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
869 : // break anything.
870 : tracing::error!(
871 : "Failed to detach unknkown shard {tenant_shard_id} on pageserver {node_id}: {e}"
872 : );
873 : }
874 : }
875 : }
876 : }
877 :
878 : /// Long running background task that periodically wakes up and looks for shards that need
879 : /// reconciliation. Reconciliation is fallible, so any reconciliation tasks that fail during
880 : /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
881 : /// for those retries.
882 0 : #[instrument(skip_all)]
883 : async fn background_reconcile(self: &Arc<Self>) {
884 : self.startup_complete.clone().wait().await;
885 :
886 : const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
887 :
888 : let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
889 : while !self.reconcilers_cancel.is_cancelled() {
890 : tokio::select! {
891 : _ = interval.tick() => {
892 : let reconciles_spawned = self.reconcile_all();
893 : if reconciles_spawned == 0 {
894 : // Run optimizer only when we didn't find any other work to do
895 : let optimizations = self.optimize_all().await;
896 : if optimizations == 0 {
897 : // Run new splits only when no optimizations are pending
898 : self.autosplit_tenants().await;
899 : }
900 : }
901 : }
902 : _ = self.reconcilers_cancel.cancelled() => return
903 : }
904 : }
905 : }
906 0 : #[instrument(skip_all)]
907 : async fn spawn_heartbeat_driver(&self) {
908 : self.startup_complete.clone().wait().await;
909 :
910 : const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5);
911 :
912 : let mut interval = tokio::time::interval(HEARTBEAT_INTERVAL);
913 : while !self.cancel.is_cancelled() {
914 : tokio::select! {
915 : _ = interval.tick() => { }
916 : _ = self.cancel.cancelled() => return
917 : };
918 :
919 : let nodes = {
920 : let locked = self.inner.read().unwrap();
921 : locked.nodes.clone()
922 : };
923 :
924 : let res = self.heartbeater.heartbeat(nodes).await;
925 : if let Ok(deltas) = res {
926 : for (node_id, state) in deltas.0 {
927 : let new_availability = match state {
928 : PageserverState::Available { utilization, .. } => NodeAvailability::Active(
929 : UtilizationScore(utilization.utilization_score),
930 : ),
931 : PageserverState::WarmingUp { started_at } => {
932 : NodeAvailability::WarmingUp(started_at)
933 : }
934 : PageserverState::Offline => {
935 : // The node might have been placed in the WarmingUp state
936 : // while the heartbeat round was on-going. Hence, filter out
937 : // offline transitions for WarmingUp nodes that are still within
938 : // their grace period.
939 : if let Ok(NodeAvailability::WarmingUp(started_at)) =
940 0 : self.get_node(node_id).await.map(|n| n.get_availability())
941 : {
942 : let now = Instant::now();
943 : if now - started_at >= self.config.max_warming_up_interval {
944 : NodeAvailability::Offline
945 : } else {
946 : NodeAvailability::WarmingUp(started_at)
947 : }
948 : } else {
949 : NodeAvailability::Offline
950 : }
951 : }
952 : };
953 :
954 : // This is the code path for geniune availability transitions (i.e node
955 : // goes unavailable and/or comes back online).
956 : let res = self
957 : .node_configure(node_id, Some(new_availability), None)
958 : .await;
959 :
960 : match res {
961 : Ok(()) => {}
962 : Err(ApiError::NotFound(_)) => {
963 : // This should be rare, but legitimate since the heartbeats are done
964 : // on a snapshot of the nodes.
965 : tracing::info!("Node {} was not found after heartbeat round", node_id);
966 : }
967 : Err(err) => {
968 : // Transition to active involves reconciling: if a node responds to a heartbeat then
969 : // becomes unavailable again, we may get an error here.
970 : tracing::error!(
971 : "Failed to update node {} after heartbeat round: {}",
972 : node_id,
973 : err
974 : );
975 : }
976 : }
977 : }
978 : }
979 : }
980 : }
981 :
982 : /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
983 : /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
984 : /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
985 : /// will indicate that reconciliation is not needed.
986 0 : #[instrument(skip_all, fields(
987 : tenant_id=%result.tenant_shard_id.tenant_id, shard_id=%result.tenant_shard_id.shard_slug(),
988 : sequence=%result.sequence
989 0 : ))]
990 : fn process_result(&self, mut result: ReconcileResult) {
991 : let mut locked = self.inner.write().unwrap();
992 : let (nodes, tenants, _scheduler) = locked.parts_mut();
993 : let Some(tenant) = tenants.get_mut(&result.tenant_shard_id) else {
994 : // A reconciliation result might race with removing a tenant: drop results for
995 : // tenants that aren't in our map.
996 : return;
997 : };
998 :
999 : // Usually generation should only be updated via this path, so the max() isn't
1000 : // needed, but it is used to handle out-of-band updates via. e.g. test hook.
1001 : tenant.generation = std::cmp::max(tenant.generation, result.generation);
1002 :
1003 : // If the reconciler signals that it failed to notify compute, set this state on
1004 : // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
1005 : tenant.pending_compute_notification = result.pending_compute_notification;
1006 :
1007 : // Let the TenantShard know it is idle.
1008 : tenant.reconcile_complete(result.sequence);
1009 :
1010 : // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
1011 : // make to the tenant
1012 : result
1013 : .observed
1014 : .locations
1015 0 : .retain(|node_id, _loc| nodes.contains_key(node_id));
1016 :
1017 : match result.result {
1018 : Ok(()) => {
1019 : for (node_id, loc) in &result.observed.locations {
1020 : if let Some(conf) = &loc.conf {
1021 : tracing::info!("Updating observed location {}: {:?}", node_id, conf);
1022 : } else {
1023 : tracing::info!("Setting observed location {} to None", node_id,)
1024 : }
1025 : }
1026 :
1027 : tenant.observed = result.observed;
1028 : tenant.waiter.advance(result.sequence);
1029 : }
1030 : Err(e) => {
1031 : match e {
1032 : ReconcileError::Cancel => {
1033 : tracing::info!("Reconciler was cancelled");
1034 : }
1035 : ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
1036 : // This might be due to the reconciler getting cancelled, or it might
1037 : // be due to the `Node` being marked offline.
1038 : tracing::info!("Reconciler cancelled during pageserver API call");
1039 : }
1040 : _ => {
1041 : tracing::warn!("Reconcile error: {}", e);
1042 : }
1043 : }
1044 :
1045 : // Ordering: populate last_error before advancing error_seq,
1046 : // so that waiters will see the correct error after waiting.
1047 : tenant.set_last_error(result.sequence, e);
1048 :
1049 : for (node_id, o) in result.observed.locations {
1050 : tenant.observed.locations.insert(node_id, o);
1051 : }
1052 : }
1053 : }
1054 :
1055 : // Maybe some other work can proceed now that this job finished.
1056 : if self.reconciler_concurrency.available_permits() > 0 {
1057 : while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
1058 : let (nodes, tenants, _scheduler) = locked.parts_mut();
1059 : if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
1060 : shard.delayed_reconcile = false;
1061 : self.maybe_reconcile_shard(shard, nodes);
1062 : }
1063 :
1064 : if self.reconciler_concurrency.available_permits() == 0 {
1065 : break;
1066 : }
1067 : }
1068 : }
1069 : }
1070 :
1071 0 : async fn process_results(
1072 0 : &self,
1073 0 : mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResultRequest>,
1074 0 : mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
1075 0 : Result<(), (TenantShardId, NotifyError)>,
1076 0 : >,
1077 0 : ) {
1078 0 : loop {
1079 0 : // Wait for the next result, or for cancellation
1080 0 : tokio::select! {
1081 : r = result_rx.recv() => {
1082 : match r {
1083 : Some(ReconcileResultRequest::ReconcileResult(result)) => {self.process_result(result);},
1084 : None | Some(ReconcileResultRequest::Stop) => {break;}
1085 : }
1086 : }
1087 0 : _ = async{
1088 0 : match bg_compute_hook_result_rx.recv().await {
1089 0 : Some(result) => {
1090 0 : if let Err((tenant_shard_id, notify_error)) = result {
1091 0 : tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
1092 0 : let mut locked = self.inner.write().unwrap();
1093 0 : if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
1094 0 : shard.pending_compute_notification = true;
1095 0 : }
1096 :
1097 0 : }
1098 : },
1099 : None => {
1100 : // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
1101 0 : self.cancel.cancelled().await;
1102 : }
1103 : }
1104 0 : } => {},
1105 : _ = self.cancel.cancelled() => {
1106 : break;
1107 : }
1108 0 : };
1109 0 : }
1110 0 : }
1111 :
1112 0 : async fn process_aborts(
1113 0 : &self,
1114 0 : mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
1115 0 : ) {
1116 : loop {
1117 : // Wait for the next result, or for cancellation
1118 0 : let op = tokio::select! {
1119 : r = abort_rx.recv() => {
1120 : match r {
1121 : Some(op) => {op},
1122 : None => {break;}
1123 : }
1124 : }
1125 : _ = self.cancel.cancelled() => {
1126 : break;
1127 : }
1128 : };
1129 :
1130 : // Retry until shutdown: we must keep this request object alive until it is properly
1131 : // processed, as it holds a lock guard that prevents other operations trying to do things
1132 : // to the tenant while it is in a weird part-split state.
1133 0 : while !self.cancel.is_cancelled() {
1134 0 : match self.abort_tenant_shard_split(&op).await {
1135 0 : Ok(_) => break,
1136 0 : Err(e) => {
1137 0 : tracing::warn!(
1138 0 : "Failed to abort shard split on {}, will retry: {e}",
1139 : op.tenant_id
1140 : );
1141 :
1142 : // If a node is unavailable, we hope that it has been properly marked Offline
1143 : // when we retry, so that the abort op will succeed. If the abort op is failing
1144 : // for some other reason, we will keep retrying forever, or until a human notices
1145 : // and does something about it (either fixing a pageserver or restarting the controller).
1146 0 : tokio::time::timeout(Duration::from_secs(5), self.cancel.cancelled())
1147 0 : .await
1148 0 : .ok();
1149 : }
1150 : }
1151 : }
1152 : }
1153 0 : }
1154 :
1155 0 : pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
1156 0 : let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
1157 0 : let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
1158 0 :
1159 0 : let leadership_cancel = CancellationToken::new();
1160 0 : let leadership = Leadership::new(persistence.clone(), config.clone(), leadership_cancel);
1161 0 : let (leader, leader_step_down_state) = leadership.step_down_current_leader().await?;
1162 :
1163 : // Apply the migrations **after** the current leader has stepped down
1164 : // (or we've given up waiting for it), but **before** reading from the
1165 : // database. The only exception is reading the current leader before
1166 : // migrating.
1167 0 : persistence.migration_run().await?;
1168 :
1169 0 : tracing::info!("Loading nodes from database...");
1170 0 : let nodes = persistence
1171 0 : .list_nodes()
1172 0 : .await?
1173 0 : .into_iter()
1174 0 : .map(Node::from_persistent)
1175 0 : .collect::<Vec<_>>();
1176 0 : let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
1177 0 : tracing::info!("Loaded {} nodes from database.", nodes.len());
1178 :
1179 0 : tracing::info!("Loading shards from database...");
1180 0 : let mut tenant_shard_persistence = persistence.list_tenant_shards().await?;
1181 0 : tracing::info!(
1182 0 : "Loaded {} shards from database.",
1183 0 : tenant_shard_persistence.len()
1184 : );
1185 :
1186 : // If any shard splits were in progress, reset the database state to abort them
1187 0 : let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
1188 0 : HashMap::new();
1189 0 : for tsp in &mut tenant_shard_persistence {
1190 0 : let shard = tsp.get_shard_identity()?;
1191 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
1192 0 : let entry = tenant_shard_count_min_max
1193 0 : .entry(tenant_shard_id.tenant_id)
1194 0 : .or_insert_with(|| (shard.count, shard.count));
1195 0 : entry.0 = std::cmp::min(entry.0, shard.count);
1196 0 : entry.1 = std::cmp::max(entry.1, shard.count);
1197 0 : }
1198 :
1199 0 : for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
1200 0 : if count_min != count_max {
1201 : // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
1202 : // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
1203 : // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
1204 0 : tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
1205 0 : let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
1206 :
1207 : // We may never see the Complete status here: if the split was complete, we wouldn't have
1208 : // identified this tenant has having mismatching min/max counts.
1209 0 : assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
1210 :
1211 : // Clear the splitting status in-memory, to reflect that we just aborted in the database
1212 0 : tenant_shard_persistence.iter_mut().for_each(|tsp| {
1213 0 : // Set idle split state on those shards that we will retain.
1214 0 : let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
1215 0 : if tsp_tenant_id == tenant_id
1216 0 : && tsp.get_shard_identity().unwrap().count == count_min
1217 0 : {
1218 0 : tsp.splitting = SplitState::Idle;
1219 0 : } else if tsp_tenant_id == tenant_id {
1220 : // Leave the splitting state on the child shards: this will be used next to
1221 : // drop them.
1222 0 : tracing::info!(
1223 0 : "Shard {tsp_tenant_id} will be dropped after shard split abort",
1224 : );
1225 0 : }
1226 0 : });
1227 0 :
1228 0 : // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
1229 0 : tenant_shard_persistence.retain(|tsp| {
1230 0 : TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
1231 0 : || tsp.splitting == SplitState::Idle
1232 0 : });
1233 0 : }
1234 : }
1235 :
1236 0 : let mut tenants = BTreeMap::new();
1237 0 :
1238 0 : let mut scheduler = Scheduler::new(nodes.values());
1239 0 :
1240 0 : #[cfg(feature = "testing")]
1241 0 : {
1242 0 : // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
1243 0 : // tests only store the shards, not the nodes. The nodes will be loaded shortly
1244 0 : // after when pageservers start up and register.
1245 0 : let mut node_ids = HashSet::new();
1246 0 : for tsp in &tenant_shard_persistence {
1247 0 : if let Some(node_id) = tsp.generation_pageserver {
1248 0 : node_ids.insert(node_id);
1249 0 : }
1250 : }
1251 0 : for node_id in node_ids {
1252 0 : tracing::info!("Creating node {} in scheduler for tests", node_id);
1253 0 : let node = Node::new(
1254 0 : NodeId(node_id as u64),
1255 0 : "".to_string(),
1256 0 : 123,
1257 0 : "".to_string(),
1258 0 : 123,
1259 0 : );
1260 0 :
1261 0 : scheduler.node_upsert(&node);
1262 : }
1263 : }
1264 0 : for tsp in tenant_shard_persistence {
1265 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
1266 :
1267 : // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
1268 : // it with what we can infer: the node for which a generation was most recently issued.
1269 0 : let mut intent = IntentState::new();
1270 0 : if let Some(generation_pageserver) = tsp.generation_pageserver.map(|n| NodeId(n as u64))
1271 : {
1272 0 : if nodes.contains_key(&generation_pageserver) {
1273 0 : intent.set_attached(&mut scheduler, Some(generation_pageserver));
1274 0 : } else {
1275 : // If a node was removed before being completely drained, it is legal for it to leave behind a `generation_pageserver` referring
1276 : // to a non-existent node, because node deletion doesn't block on completing the reconciliations that will issue new generations
1277 : // on different pageservers.
1278 0 : tracing::warn!("Tenant shard {tenant_shard_id} references non-existent node {generation_pageserver} in database, will be rescheduled");
1279 : }
1280 0 : }
1281 0 : let new_tenant = TenantShard::from_persistent(tsp, intent)?;
1282 :
1283 0 : tenants.insert(tenant_shard_id, new_tenant);
1284 : }
1285 :
1286 0 : let (startup_completion, startup_complete) = utils::completion::channel();
1287 0 :
1288 0 : // This channel is continuously consumed by process_results, so doesn't need to be very large.
1289 0 : let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
1290 0 : tokio::sync::mpsc::channel(512);
1291 0 :
1292 0 : let (delayed_reconcile_tx, delayed_reconcile_rx) =
1293 0 : tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
1294 0 :
1295 0 : let cancel = CancellationToken::new();
1296 0 : let reconcilers_cancel = cancel.child_token();
1297 0 :
1298 0 : let heartbeater = Heartbeater::new(
1299 0 : config.jwt_token.clone(),
1300 0 : config.max_offline_interval,
1301 0 : config.max_warming_up_interval,
1302 0 : cancel.clone(),
1303 0 : );
1304 :
1305 0 : let initial_leadership_status = if config.start_as_candidate {
1306 0 : LeadershipStatus::Candidate
1307 : } else {
1308 0 : LeadershipStatus::Leader
1309 : };
1310 :
1311 0 : let this = Arc::new(Self {
1312 0 : inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
1313 0 : nodes,
1314 0 : tenants,
1315 0 : scheduler,
1316 0 : delayed_reconcile_rx,
1317 0 : initial_leadership_status,
1318 0 : ))),
1319 0 : config: config.clone(),
1320 0 : persistence,
1321 0 : compute_hook: Arc::new(ComputeHook::new(config.clone())),
1322 0 : result_tx,
1323 0 : heartbeater,
1324 0 : reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
1325 0 : config.reconciler_concurrency,
1326 0 : )),
1327 0 : delayed_reconcile_tx,
1328 0 : abort_tx,
1329 0 : startup_complete: startup_complete.clone(),
1330 0 : cancel,
1331 0 : reconcilers_cancel,
1332 0 : gate: Gate::default(),
1333 0 : reconcilers_gate: Gate::default(),
1334 0 : tenant_op_locks: Default::default(),
1335 0 : node_op_locks: Default::default(),
1336 0 : });
1337 0 :
1338 0 : let result_task_this = this.clone();
1339 0 : tokio::task::spawn(async move {
1340 : // Block shutdown until we're done (we must respect self.cancel)
1341 0 : if let Ok(_gate) = result_task_this.gate.enter() {
1342 0 : result_task_this
1343 0 : .process_results(result_rx, bg_compute_notify_result_rx)
1344 0 : .await
1345 0 : }
1346 0 : });
1347 0 :
1348 0 : tokio::task::spawn({
1349 0 : let this = this.clone();
1350 0 : async move {
1351 : // Block shutdown until we're done (we must respect self.cancel)
1352 0 : if let Ok(_gate) = this.gate.enter() {
1353 0 : this.process_aborts(abort_rx).await
1354 0 : }
1355 0 : }
1356 0 : });
1357 0 :
1358 0 : tokio::task::spawn({
1359 0 : let this = this.clone();
1360 0 : async move {
1361 0 : if let Ok(_gate) = this.gate.enter() {
1362 0 : loop {
1363 0 : tokio::select! {
1364 : _ = this.cancel.cancelled() => {
1365 : break;
1366 : },
1367 : _ = tokio::time::sleep(Duration::from_secs(60)) => {}
1368 : };
1369 0 : this.tenant_op_locks.housekeeping();
1370 : }
1371 0 : }
1372 0 : }
1373 0 : });
1374 0 :
1375 0 : tokio::task::spawn({
1376 0 : let this = this.clone();
1377 0 : // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
1378 0 : // is done.
1379 0 : let startup_completion = startup_completion.clone();
1380 0 : async move {
1381 : // Block shutdown until we're done (we must respect self.cancel)
1382 0 : let Ok(_gate) = this.gate.enter() else {
1383 0 : return;
1384 : };
1385 :
1386 0 : this.startup_reconcile(leader, leader_step_down_state, bg_compute_notify_result_tx)
1387 0 : .await;
1388 :
1389 0 : drop(startup_completion);
1390 0 : }
1391 0 : });
1392 0 :
1393 0 : tokio::task::spawn({
1394 0 : let this = this.clone();
1395 0 : let startup_complete = startup_complete.clone();
1396 0 : async move {
1397 0 : startup_complete.wait().await;
1398 0 : this.background_reconcile().await;
1399 0 : }
1400 0 : });
1401 0 :
1402 0 : tokio::task::spawn({
1403 0 : let this = this.clone();
1404 0 : let startup_complete = startup_complete.clone();
1405 0 : async move {
1406 0 : startup_complete.wait().await;
1407 0 : this.spawn_heartbeat_driver().await;
1408 0 : }
1409 0 : });
1410 0 :
1411 0 : Ok(this)
1412 0 : }
1413 :
1414 0 : pub(crate) async fn attach_hook(
1415 0 : &self,
1416 0 : attach_req: AttachHookRequest,
1417 0 : ) -> anyhow::Result<AttachHookResponse> {
1418 0 : let _tenant_lock = trace_exclusive_lock(
1419 0 : &self.tenant_op_locks,
1420 0 : attach_req.tenant_shard_id.tenant_id,
1421 0 : TenantOperations::AttachHook,
1422 0 : )
1423 0 : .await;
1424 :
1425 : // This is a test hook. To enable using it on tenants that were created directly with
1426 : // the pageserver API (not via this service), we will auto-create any missing tenant
1427 : // shards with default state.
1428 0 : let insert = {
1429 0 : let locked = self.inner.write().unwrap();
1430 0 : !locked.tenants.contains_key(&attach_req.tenant_shard_id)
1431 0 : };
1432 0 :
1433 0 : if insert {
1434 0 : let tsp = TenantShardPersistence {
1435 0 : tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
1436 0 : shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
1437 0 : shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
1438 0 : shard_stripe_size: 0,
1439 0 : generation: attach_req.generation_override.or(Some(0)),
1440 0 : generation_pageserver: None,
1441 0 : placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
1442 0 : config: serde_json::to_string(&TenantConfig::default()).unwrap(),
1443 0 : splitting: SplitState::default(),
1444 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
1445 0 : .unwrap(),
1446 0 : };
1447 0 :
1448 0 : match self.persistence.insert_tenant_shards(vec![tsp]).await {
1449 0 : Err(e) => match e {
1450 : DatabaseError::Query(diesel::result::Error::DatabaseError(
1451 : DatabaseErrorKind::UniqueViolation,
1452 : _,
1453 : )) => {
1454 0 : tracing::info!(
1455 0 : "Raced with another request to insert tenant {}",
1456 : attach_req.tenant_shard_id
1457 : )
1458 : }
1459 0 : _ => return Err(e.into()),
1460 : },
1461 : Ok(()) => {
1462 0 : tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
1463 :
1464 0 : let mut locked = self.inner.write().unwrap();
1465 0 : locked.tenants.insert(
1466 0 : attach_req.tenant_shard_id,
1467 0 : TenantShard::new(
1468 0 : attach_req.tenant_shard_id,
1469 0 : ShardIdentity::unsharded(),
1470 0 : PlacementPolicy::Attached(0),
1471 0 : ),
1472 0 : );
1473 0 : tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
1474 : }
1475 : }
1476 0 : }
1477 :
1478 0 : let new_generation = if let Some(req_node_id) = attach_req.node_id {
1479 0 : let maybe_tenant_conf = {
1480 0 : let locked = self.inner.write().unwrap();
1481 0 : locked
1482 0 : .tenants
1483 0 : .get(&attach_req.tenant_shard_id)
1484 0 : .map(|t| t.config.clone())
1485 0 : };
1486 0 :
1487 0 : match maybe_tenant_conf {
1488 0 : Some(conf) => {
1489 0 : let new_generation = self
1490 0 : .persistence
1491 0 : .increment_generation(attach_req.tenant_shard_id, req_node_id)
1492 0 : .await?;
1493 :
1494 : // Persist the placement policy update. This is required
1495 : // when we reattaching a detached tenant.
1496 0 : self.persistence
1497 0 : .update_tenant_shard(
1498 0 : TenantFilter::Shard(attach_req.tenant_shard_id),
1499 0 : Some(PlacementPolicy::Attached(0)),
1500 0 : Some(conf),
1501 0 : None,
1502 0 : None,
1503 0 : )
1504 0 : .await?;
1505 0 : Some(new_generation)
1506 : }
1507 : None => {
1508 0 : anyhow::bail!("Attach hook handling raced with tenant removal")
1509 : }
1510 : }
1511 : } else {
1512 0 : self.persistence.detach(attach_req.tenant_shard_id).await?;
1513 0 : None
1514 : };
1515 :
1516 0 : let mut locked = self.inner.write().unwrap();
1517 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
1518 0 :
1519 0 : let tenant_shard = tenants
1520 0 : .get_mut(&attach_req.tenant_shard_id)
1521 0 : .expect("Checked for existence above");
1522 :
1523 0 : if let Some(new_generation) = new_generation {
1524 0 : tenant_shard.generation = Some(new_generation);
1525 0 : tenant_shard.policy = PlacementPolicy::Attached(0);
1526 0 : } else {
1527 : // This is a detach notification. We must update placement policy to avoid re-attaching
1528 : // during background scheduling/reconciliation, or during storage controller restart.
1529 0 : assert!(attach_req.node_id.is_none());
1530 0 : tenant_shard.policy = PlacementPolicy::Detached;
1531 : }
1532 :
1533 0 : if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
1534 0 : tracing::info!(
1535 : tenant_id = %attach_req.tenant_shard_id,
1536 : ps_id = %attaching_pageserver,
1537 : generation = ?tenant_shard.generation,
1538 0 : "issuing",
1539 : );
1540 0 : } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
1541 0 : tracing::info!(
1542 : tenant_id = %attach_req.tenant_shard_id,
1543 : %ps_id,
1544 : generation = ?tenant_shard.generation,
1545 0 : "dropping",
1546 : );
1547 : } else {
1548 0 : tracing::info!(
1549 : tenant_id = %attach_req.tenant_shard_id,
1550 0 : "no-op: tenant already has no pageserver");
1551 : }
1552 0 : tenant_shard
1553 0 : .intent
1554 0 : .set_attached(scheduler, attach_req.node_id);
1555 0 :
1556 0 : tracing::info!(
1557 0 : "attach_hook: tenant {} set generation {:?}, pageserver {}",
1558 0 : attach_req.tenant_shard_id,
1559 0 : tenant_shard.generation,
1560 0 : // TODO: this is an odd number of 0xf's
1561 0 : attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff))
1562 : );
1563 :
1564 : // Trick the reconciler into not doing anything for this tenant: this helps
1565 : // tests that manually configure a tenant on the pagesrever, and then call this
1566 : // attach hook: they don't want background reconciliation to modify what they
1567 : // did to the pageserver.
1568 : #[cfg(feature = "testing")]
1569 : {
1570 0 : if let Some(node_id) = attach_req.node_id {
1571 0 : tenant_shard.observed.locations = HashMap::from([(
1572 0 : node_id,
1573 0 : ObservedStateLocation {
1574 0 : conf: Some(attached_location_conf(
1575 0 : tenant_shard.generation.unwrap(),
1576 0 : &tenant_shard.shard,
1577 0 : &tenant_shard.config,
1578 0 : &PlacementPolicy::Attached(0),
1579 0 : )),
1580 0 : },
1581 0 : )]);
1582 0 : } else {
1583 0 : tenant_shard.observed.locations.clear();
1584 0 : }
1585 : }
1586 :
1587 0 : Ok(AttachHookResponse {
1588 0 : gen: attach_req
1589 0 : .node_id
1590 0 : .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
1591 0 : })
1592 0 : }
1593 :
1594 0 : pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
1595 0 : let locked = self.inner.read().unwrap();
1596 0 :
1597 0 : let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
1598 0 :
1599 0 : InspectResponse {
1600 0 : attachment: tenant_shard.and_then(|s| {
1601 0 : s.intent
1602 0 : .get_attached()
1603 0 : .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
1604 0 : }),
1605 0 : }
1606 0 : }
1607 :
1608 : // When the availability state of a node transitions to active, we must do a full reconciliation
1609 : // of LocationConfigs on that node. This is because while a node was offline:
1610 : // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
1611 : // - aborting a tenant shard split might have left rogue child shards behind on this node.
1612 : //
1613 : // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
1614 : // Reconcilers might communicate with the node, and these must not overlap with the work we do in
1615 : // this function.
1616 : //
1617 : // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
1618 : // for written for a single node rather than as a batch job for all nodes.
1619 0 : #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
1620 : async fn node_activate_reconcile(
1621 : &self,
1622 : mut node: Node,
1623 : _lock: &TracingExclusiveGuard<NodeOperations>,
1624 : ) -> Result<(), ApiError> {
1625 : // This Node is a mutable local copy: we will set it active so that we can use its
1626 : // API client to reconcile with the node. The Node in [`Self::nodes`] will get updated
1627 : // later.
1628 : node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
1629 :
1630 : let configs = match node
1631 : .with_client_retries(
1632 0 : |client| async move { client.list_location_config().await },
1633 : &self.config.jwt_token,
1634 : 1,
1635 : 5,
1636 : SHORT_RECONCILE_TIMEOUT,
1637 : &self.cancel,
1638 : )
1639 : .await
1640 : {
1641 : None => {
1642 : // We're shutting down (the Node's cancellation token can't have fired, because
1643 : // we're the only scope that has a reference to it, and we didn't fire it).
1644 : return Err(ApiError::ShuttingDown);
1645 : }
1646 : Some(Err(e)) => {
1647 : // This node didn't succeed listing its locations: it may not proceed to active state
1648 : // as it is apparently unavailable.
1649 : return Err(ApiError::PreconditionFailed(
1650 : format!("Failed to query node location configs, cannot activate ({e})").into(),
1651 : ));
1652 : }
1653 : Some(Ok(configs)) => configs,
1654 : };
1655 : tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
1656 :
1657 : let mut cleanup = Vec::new();
1658 : {
1659 : let mut locked = self.inner.write().unwrap();
1660 :
1661 : for (tenant_shard_id, observed_loc) in configs.tenant_shards {
1662 : let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
1663 : cleanup.push(tenant_shard_id);
1664 : continue;
1665 : };
1666 : tenant_shard
1667 : .observed
1668 : .locations
1669 : .insert(node.get_id(), ObservedStateLocation { conf: observed_loc });
1670 : }
1671 : }
1672 :
1673 : for tenant_shard_id in cleanup {
1674 : tracing::info!("Detaching {tenant_shard_id}");
1675 : match node
1676 : .with_client_retries(
1677 0 : |client| async move {
1678 0 : let config = LocationConfig {
1679 0 : mode: LocationConfigMode::Detached,
1680 0 : generation: None,
1681 0 : secondary_conf: None,
1682 0 : shard_number: tenant_shard_id.shard_number.0,
1683 0 : shard_count: tenant_shard_id.shard_count.literal(),
1684 0 : shard_stripe_size: 0,
1685 0 : tenant_conf: models::TenantConfig::default(),
1686 0 : };
1687 0 : client
1688 0 : .location_config(tenant_shard_id, config, None, false)
1689 0 : .await
1690 0 : },
1691 : &self.config.jwt_token,
1692 : 1,
1693 : 5,
1694 : SHORT_RECONCILE_TIMEOUT,
1695 : &self.cancel,
1696 : )
1697 : .await
1698 : {
1699 : None => {
1700 : // We're shutting down (the Node's cancellation token can't have fired, because
1701 : // we're the only scope that has a reference to it, and we didn't fire it).
1702 : return Err(ApiError::ShuttingDown);
1703 : }
1704 : Some(Err(e)) => {
1705 : // Do not let the node proceed to Active state if it is not responsive to requests
1706 : // to detach. This could happen if e.g. a shutdown bug in the pageserver is preventing
1707 : // detach completing: we should not let this node back into the set of nodes considered
1708 : // okay for scheduling.
1709 : return Err(ApiError::Conflict(format!(
1710 : "Node {node} failed to detach {tenant_shard_id}: {e}"
1711 : )));
1712 : }
1713 : Some(Ok(_)) => {}
1714 : };
1715 : }
1716 :
1717 : Ok(())
1718 : }
1719 :
1720 0 : pub(crate) async fn re_attach(
1721 0 : &self,
1722 0 : reattach_req: ReAttachRequest,
1723 0 : ) -> Result<ReAttachResponse, ApiError> {
1724 0 : if let Some(register_req) = reattach_req.register {
1725 0 : self.node_register(register_req).await?;
1726 0 : }
1727 :
1728 : // Ordering: we must persist generation number updates before making them visible in the in-memory state
1729 0 : let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
1730 :
1731 0 : tracing::info!(
1732 : node_id=%reattach_req.node_id,
1733 0 : "Incremented {} tenant shards' generations",
1734 0 : incremented_generations.len()
1735 : );
1736 :
1737 : // Apply the updated generation to our in-memory state, and
1738 : // gather discover secondary locations.
1739 0 : let mut locked = self.inner.write().unwrap();
1740 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
1741 0 :
1742 0 : let mut response = ReAttachResponse {
1743 0 : tenants: Vec::new(),
1744 0 : };
1745 :
1746 : // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
1747 : // to call location_conf API with an old generation. Wait for cancellation to complete
1748 : // before responding to this request. Requires well implemented CancellationToken logic
1749 : // all the way to where we call location_conf. Even then, there can still be a location_conf
1750 : // request in flight over the network: TODO handle that by making location_conf API refuse
1751 : // to go backward in generations.
1752 :
1753 : // Scan through all shards, applying updates for ones where we updated generation
1754 : // and identifying shards that intend to have a secondary location on this node.
1755 0 : for (tenant_shard_id, shard) in tenants {
1756 0 : if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
1757 0 : let new_gen = *new_gen;
1758 0 : response.tenants.push(ReAttachResponseTenant {
1759 0 : id: *tenant_shard_id,
1760 0 : gen: Some(new_gen.into().unwrap()),
1761 0 : // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
1762 0 : // execution. If a pageserver is restarted during that process, then the reconcile pass will
1763 0 : // fail, and start from scratch, so it doesn't make sense for us to try and preserve
1764 0 : // the stale/multi states at this point.
1765 0 : mode: LocationConfigMode::AttachedSingle,
1766 0 : });
1767 0 :
1768 0 : shard.generation = std::cmp::max(shard.generation, Some(new_gen));
1769 0 : if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
1770 : // Why can we update `observed` even though we're not sure our response will be received
1771 : // by the pageserver? Because the pageserver will not proceed with startup until
1772 : // it has processed response: if it loses it, we'll see another request and increment
1773 : // generation again, avoiding any uncertainty about dirtiness of tenant's state.
1774 0 : if let Some(conf) = observed.conf.as_mut() {
1775 0 : conf.generation = new_gen.into();
1776 0 : }
1777 0 : } else {
1778 0 : // This node has no observed state for the shard: perhaps it was offline
1779 0 : // when the pageserver restarted. Insert a None, so that the Reconciler
1780 0 : // will be prompted to learn the location's state before it makes changes.
1781 0 : shard
1782 0 : .observed
1783 0 : .locations
1784 0 : .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
1785 0 : }
1786 0 : } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
1787 0 : // Ordering: pageserver will not accept /location_config requests until it has
1788 0 : // finished processing the response from re-attach. So we can update our in-memory state
1789 0 : // now, and be confident that we are not stamping on the result of some later location config.
1790 0 : // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
1791 0 : // so we might update observed state here, and then get over-written by some racing
1792 0 : // ReconcileResult. The impact is low however, since we have set state on pageserver something
1793 0 : // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
1794 0 :
1795 0 : response.tenants.push(ReAttachResponseTenant {
1796 0 : id: *tenant_shard_id,
1797 0 : gen: None,
1798 0 : mode: LocationConfigMode::Secondary,
1799 0 : });
1800 0 :
1801 0 : // We must not update observed, because we have no guarantee that our
1802 0 : // response will be received by the pageserver. This could leave it
1803 0 : // falsely dirty, but the resulting reconcile should be idempotent.
1804 0 : }
1805 : }
1806 :
1807 : // We consider a node Active once we have composed a re-attach response, but we
1808 : // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
1809 : // implicitly synchronizes the LocationConfigs on the node.
1810 : //
1811 : // Setting a node active unblocks any Reconcilers that might write to the location config API,
1812 : // but those requests will not be accepted by the node until it has finished processing
1813 : // the re-attach response.
1814 : //
1815 : // Additionally, reset the nodes scheduling policy to match the conditional update done
1816 : // in [`Persistence::re_attach`].
1817 0 : if let Some(node) = nodes.get(&reattach_req.node_id) {
1818 0 : let reset_scheduling = matches!(
1819 0 : node.get_scheduling(),
1820 : NodeSchedulingPolicy::PauseForRestart
1821 : | NodeSchedulingPolicy::Draining
1822 : | NodeSchedulingPolicy::Filling
1823 : );
1824 :
1825 0 : let mut new_nodes = (**nodes).clone();
1826 0 : if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
1827 0 : if reset_scheduling {
1828 0 : node.set_scheduling(NodeSchedulingPolicy::Active);
1829 0 : }
1830 :
1831 0 : tracing::info!("Marking {} warming-up on reattach", reattach_req.node_id);
1832 0 : node.set_availability(NodeAvailability::WarmingUp(std::time::Instant::now()));
1833 0 :
1834 0 : scheduler.node_upsert(node);
1835 0 : let new_nodes = Arc::new(new_nodes);
1836 0 : *nodes = new_nodes;
1837 : } else {
1838 0 : tracing::error!(
1839 0 : "Reattaching node {} was removed while processing the request",
1840 : reattach_req.node_id
1841 : );
1842 : }
1843 0 : }
1844 :
1845 0 : Ok(response)
1846 0 : }
1847 :
1848 0 : pub(crate) fn validate(&self, validate_req: ValidateRequest) -> ValidateResponse {
1849 0 : let locked = self.inner.read().unwrap();
1850 0 :
1851 0 : let mut response = ValidateResponse {
1852 0 : tenants: Vec::new(),
1853 0 : };
1854 :
1855 0 : for req_tenant in validate_req.tenants {
1856 0 : if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
1857 0 : let valid = tenant_shard.generation == Some(Generation::new(req_tenant.gen));
1858 0 : tracing::info!(
1859 0 : "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
1860 : req_tenant.id,
1861 : req_tenant.gen,
1862 : tenant_shard.generation
1863 : );
1864 0 : response.tenants.push(ValidateResponseTenant {
1865 0 : id: req_tenant.id,
1866 0 : valid,
1867 0 : });
1868 0 : } else {
1869 0 : // After tenant deletion, we may approve any validation. This avoids
1870 0 : // spurious warnings on the pageserver if it has pending LSN updates
1871 0 : // at the point a deletion happens.
1872 0 : response.tenants.push(ValidateResponseTenant {
1873 0 : id: req_tenant.id,
1874 0 : valid: true,
1875 0 : });
1876 0 : }
1877 : }
1878 0 : response
1879 0 : }
1880 :
1881 0 : pub(crate) async fn tenant_create(
1882 0 : &self,
1883 0 : create_req: TenantCreateRequest,
1884 0 : ) -> Result<TenantCreateResponse, ApiError> {
1885 0 : let tenant_id = create_req.new_tenant_id.tenant_id;
1886 :
1887 : // Exclude any concurrent attempts to create/access the same tenant ID
1888 0 : let _tenant_lock = trace_exclusive_lock(
1889 0 : &self.tenant_op_locks,
1890 0 : create_req.new_tenant_id.tenant_id,
1891 0 : TenantOperations::Create,
1892 0 : )
1893 0 : .await;
1894 0 : let (response, waiters) = self.do_tenant_create(create_req).await?;
1895 :
1896 0 : if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
1897 : // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
1898 : // accept compute notifications while it is in the process of creating. Reconciliation will
1899 : // be retried in the background.
1900 0 : tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
1901 0 : }
1902 0 : Ok(response)
1903 0 : }
1904 :
1905 0 : pub(crate) async fn do_tenant_create(
1906 0 : &self,
1907 0 : create_req: TenantCreateRequest,
1908 0 : ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
1909 0 : let placement_policy = create_req
1910 0 : .placement_policy
1911 0 : .clone()
1912 0 : // As a default, zero secondaries is convenient for tests that don't choose a policy.
1913 0 : .unwrap_or(PlacementPolicy::Attached(0));
1914 :
1915 : // This service expects to handle sharding itself: it is an error to try and directly create
1916 : // a particular shard here.
1917 0 : let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
1918 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
1919 0 : "Attempted to create a specific shard, this API is for creating the whole tenant"
1920 0 : )));
1921 : } else {
1922 0 : create_req.new_tenant_id.tenant_id
1923 0 : };
1924 0 :
1925 0 : tracing::info!(
1926 0 : "Creating tenant {}, shard_count={:?}",
1927 : create_req.new_tenant_id,
1928 : create_req.shard_parameters.count,
1929 : );
1930 :
1931 0 : let create_ids = (0..create_req.shard_parameters.count.count())
1932 0 : .map(|i| TenantShardId {
1933 0 : tenant_id,
1934 0 : shard_number: ShardNumber(i),
1935 0 : shard_count: create_req.shard_parameters.count,
1936 0 : })
1937 0 : .collect::<Vec<_>>();
1938 :
1939 : // If the caller specifies a None generation, it means "start from default". This is different
1940 : // to [`Self::tenant_location_config`], where a None generation is used to represent
1941 : // an incompletely-onboarded tenant.
1942 0 : let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
1943 0 : tracing::info!(
1944 0 : "tenant_create: secondary mode, generation is_some={}",
1945 0 : create_req.generation.is_some()
1946 : );
1947 0 : create_req.generation.map(Generation::new)
1948 : } else {
1949 0 : tracing::info!(
1950 0 : "tenant_create: not secondary mode, generation is_some={}",
1951 0 : create_req.generation.is_some()
1952 : );
1953 0 : Some(
1954 0 : create_req
1955 0 : .generation
1956 0 : .map(Generation::new)
1957 0 : .unwrap_or(INITIAL_GENERATION),
1958 0 : )
1959 : };
1960 :
1961 : // Ordering: we persist tenant shards before creating them on the pageserver. This enables a caller
1962 : // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
1963 : // during the creation, rather than risking leaving orphan objects in S3.
1964 0 : let persist_tenant_shards = create_ids
1965 0 : .iter()
1966 0 : .map(|tenant_shard_id| TenantShardPersistence {
1967 0 : tenant_id: tenant_shard_id.tenant_id.to_string(),
1968 0 : shard_number: tenant_shard_id.shard_number.0 as i32,
1969 0 : shard_count: tenant_shard_id.shard_count.literal() as i32,
1970 0 : shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
1971 0 : generation: initial_generation.map(|g| g.into().unwrap() as i32),
1972 0 : // The pageserver is not known until scheduling happens: we will set this column when
1973 0 : // incrementing the generation the first time we attach to a pageserver.
1974 0 : generation_pageserver: None,
1975 0 : placement_policy: serde_json::to_string(&placement_policy).unwrap(),
1976 0 : config: serde_json::to_string(&create_req.config).unwrap(),
1977 0 : splitting: SplitState::default(),
1978 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
1979 0 : .unwrap(),
1980 0 : })
1981 0 : .collect();
1982 0 :
1983 0 : match self
1984 0 : .persistence
1985 0 : .insert_tenant_shards(persist_tenant_shards)
1986 0 : .await
1987 : {
1988 0 : Ok(_) => {}
1989 : Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
1990 : DatabaseErrorKind::UniqueViolation,
1991 : _,
1992 : ))) => {
1993 : // Unique key violation: this is probably a retry. Because the shard count is part of the unique key,
1994 : // if we see a unique key violation it means that the creation request's shard count matches the previous
1995 : // creation's shard count.
1996 0 : tracing::info!("Tenant shards already present in database, proceeding with idempotent creation...");
1997 : }
1998 : // Any other database error is unexpected and a bug.
1999 0 : Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
2000 : };
2001 :
2002 0 : let mut schedule_context = ScheduleContext::default();
2003 :
2004 0 : let (waiters, response_shards) = {
2005 0 : let mut locked = self.inner.write().unwrap();
2006 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2007 0 :
2008 0 : let mut response_shards = Vec::new();
2009 0 : let mut schcedule_error = None;
2010 :
2011 0 : for tenant_shard_id in create_ids {
2012 0 : tracing::info!("Creating shard {tenant_shard_id}...");
2013 :
2014 : use std::collections::btree_map::Entry;
2015 0 : match tenants.entry(tenant_shard_id) {
2016 0 : Entry::Occupied(mut entry) => {
2017 0 : tracing::info!(
2018 0 : "Tenant shard {tenant_shard_id} already exists while creating"
2019 : );
2020 :
2021 : // TODO: schedule() should take an anti-affinity expression that pushes
2022 : // attached and secondary locations (independently) away frorm those
2023 : // pageservers also holding a shard for this tenant.
2024 :
2025 0 : entry
2026 0 : .get_mut()
2027 0 : .schedule(scheduler, &mut schedule_context)
2028 0 : .map_err(|e| {
2029 0 : ApiError::Conflict(format!(
2030 0 : "Failed to schedule shard {tenant_shard_id}: {e}"
2031 0 : ))
2032 0 : })?;
2033 :
2034 0 : if let Some(node_id) = entry.get().intent.get_attached() {
2035 0 : let generation = entry
2036 0 : .get()
2037 0 : .generation
2038 0 : .expect("Generation is set when in attached mode");
2039 0 : response_shards.push(TenantCreateResponseShard {
2040 0 : shard_id: tenant_shard_id,
2041 0 : node_id: *node_id,
2042 0 : generation: generation.into().unwrap(),
2043 0 : });
2044 0 : }
2045 :
2046 0 : continue;
2047 : }
2048 0 : Entry::Vacant(entry) => {
2049 0 : let state = entry.insert(TenantShard::new(
2050 0 : tenant_shard_id,
2051 0 : ShardIdentity::from_params(
2052 0 : tenant_shard_id.shard_number,
2053 0 : &create_req.shard_parameters,
2054 0 : ),
2055 0 : placement_policy.clone(),
2056 0 : ));
2057 0 :
2058 0 : state.generation = initial_generation;
2059 0 : state.config = create_req.config.clone();
2060 0 : if let Err(e) = state.schedule(scheduler, &mut schedule_context) {
2061 0 : schcedule_error = Some(e);
2062 0 : }
2063 :
2064 : // Only include shards in result if we are attaching: the purpose
2065 : // of the response is to tell the caller where the shards are attached.
2066 0 : if let Some(node_id) = state.intent.get_attached() {
2067 0 : let generation = state
2068 0 : .generation
2069 0 : .expect("Generation is set when in attached mode");
2070 0 : response_shards.push(TenantCreateResponseShard {
2071 0 : shard_id: tenant_shard_id,
2072 0 : node_id: *node_id,
2073 0 : generation: generation.into().unwrap(),
2074 0 : });
2075 0 : }
2076 : }
2077 : };
2078 : }
2079 :
2080 : // If we failed to schedule shards, then they are still created in the controller,
2081 : // but we return an error to the requester to avoid a silent failure when someone
2082 : // tries to e.g. create a tenant whose placement policy requires more nodes than
2083 : // are present in the system. We do this here rather than in the above loop, to
2084 : // avoid situations where we only create a subset of shards in the tenant.
2085 0 : if let Some(e) = schcedule_error {
2086 0 : return Err(ApiError::Conflict(format!(
2087 0 : "Failed to schedule shard(s): {e}"
2088 0 : )));
2089 0 : }
2090 0 :
2091 0 : let waiters = tenants
2092 0 : .range_mut(TenantShardId::tenant_range(tenant_id))
2093 0 : .filter_map(|(_shard_id, shard)| self.maybe_reconcile_shard(shard, nodes))
2094 0 : .collect::<Vec<_>>();
2095 0 : (waiters, response_shards)
2096 0 : };
2097 0 :
2098 0 : Ok((
2099 0 : TenantCreateResponse {
2100 0 : shards: response_shards,
2101 0 : },
2102 0 : waiters,
2103 0 : ))
2104 0 : }
2105 :
2106 : /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
2107 : /// wait for reconciliation to complete before responding.
2108 0 : async fn await_waiters(
2109 0 : &self,
2110 0 : waiters: Vec<ReconcilerWaiter>,
2111 0 : timeout: Duration,
2112 0 : ) -> Result<(), ReconcileWaitError> {
2113 0 : let deadline = Instant::now().checked_add(timeout).unwrap();
2114 0 : for waiter in waiters {
2115 0 : let timeout = deadline.duration_since(Instant::now());
2116 0 : waiter.wait_timeout(timeout).await?;
2117 : }
2118 :
2119 0 : Ok(())
2120 0 : }
2121 :
2122 : /// Same as [`Service::await_waiters`], but returns the waiters which are still
2123 : /// in progress
2124 0 : async fn await_waiters_remainder(
2125 0 : &self,
2126 0 : waiters: Vec<ReconcilerWaiter>,
2127 0 : timeout: Duration,
2128 0 : ) -> Vec<ReconcilerWaiter> {
2129 0 : let deadline = Instant::now().checked_add(timeout).unwrap();
2130 0 : for waiter in waiters.iter() {
2131 0 : let timeout = deadline.duration_since(Instant::now());
2132 0 : let _ = waiter.wait_timeout(timeout).await;
2133 : }
2134 :
2135 0 : waiters
2136 0 : .into_iter()
2137 0 : .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress))
2138 0 : .collect::<Vec<_>>()
2139 0 : }
2140 :
2141 : /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
2142 : /// and transform it into either a tenant creation of a series of shard updates.
2143 : ///
2144 : /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
2145 : /// still be returned.
2146 0 : fn tenant_location_config_prepare(
2147 0 : &self,
2148 0 : tenant_id: TenantId,
2149 0 : req: TenantLocationConfigRequest,
2150 0 : ) -> TenantCreateOrUpdate {
2151 0 : let mut updates = Vec::new();
2152 0 : let mut locked = self.inner.write().unwrap();
2153 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
2154 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
2155 :
2156 : // Use location config mode as an indicator of policy.
2157 0 : let placement_policy = match req.config.mode {
2158 0 : LocationConfigMode::Detached => PlacementPolicy::Detached,
2159 0 : LocationConfigMode::Secondary => PlacementPolicy::Secondary,
2160 : LocationConfigMode::AttachedMulti
2161 : | LocationConfigMode::AttachedSingle
2162 : | LocationConfigMode::AttachedStale => {
2163 0 : if nodes.len() > 1 {
2164 0 : PlacementPolicy::Attached(1)
2165 : } else {
2166 : // Convenience for dev/test: if we just have one pageserver, import
2167 : // tenants into non-HA mode so that scheduling will succeed.
2168 0 : PlacementPolicy::Attached(0)
2169 : }
2170 : }
2171 : };
2172 :
2173 0 : let mut create = true;
2174 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2175 : // Saw an existing shard: this is not a creation
2176 0 : create = false;
2177 :
2178 : // Shards may have initially been created by a Secondary request, where we
2179 : // would have left generation as None.
2180 : //
2181 : // We only update generation the first time we see an attached-mode request,
2182 : // and if there is no existing generation set. The caller is responsible for
2183 : // ensuring that no non-storage-controller pageserver ever uses a higher
2184 : // generation than they passed in here.
2185 : use LocationConfigMode::*;
2186 0 : let set_generation = match req.config.mode {
2187 0 : AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
2188 0 : req.config.generation.map(Generation::new)
2189 : }
2190 0 : _ => None,
2191 : };
2192 :
2193 0 : updates.push(ShardUpdate {
2194 0 : tenant_shard_id: *shard_id,
2195 0 : placement_policy: placement_policy.clone(),
2196 0 : tenant_config: req.config.tenant_conf.clone(),
2197 0 : generation: set_generation,
2198 0 : });
2199 : }
2200 :
2201 0 : if create {
2202 : use LocationConfigMode::*;
2203 0 : let generation = match req.config.mode {
2204 0 : AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
2205 : // If a caller provided a generation in a non-attached request, ignore it
2206 : // and leave our generation as None: this enables a subsequent update to set
2207 : // the generation when setting an attached mode for the first time.
2208 0 : _ => None,
2209 : };
2210 :
2211 0 : TenantCreateOrUpdate::Create(
2212 0 : // Synthesize a creation request
2213 0 : TenantCreateRequest {
2214 0 : new_tenant_id: tenant_shard_id,
2215 0 : generation,
2216 0 : shard_parameters: ShardParameters {
2217 0 : count: tenant_shard_id.shard_count,
2218 0 : // We only import un-sharded or single-sharded tenants, so stripe
2219 0 : // size can be made up arbitrarily here.
2220 0 : stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE,
2221 0 : },
2222 0 : placement_policy: Some(placement_policy),
2223 0 : config: req.config.tenant_conf,
2224 0 : },
2225 0 : )
2226 : } else {
2227 0 : assert!(!updates.is_empty());
2228 0 : TenantCreateOrUpdate::Update(updates)
2229 : }
2230 0 : }
2231 :
2232 : /// This API is used by the cloud control plane to migrate unsharded tenants that it created
2233 : /// directly with pageservers into this service.
2234 : ///
2235 : /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
2236 : /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
2237 : /// Think of the first attempt to call this API as a transfer of absolute authority over the
2238 : /// tenant's source of generation numbers.
2239 : ///
2240 : /// The mode in this request coarse-grained control of tenants:
2241 : /// - Call with mode Attached* to upsert the tenant.
2242 : /// - Call with mode Secondary to either onboard a tenant without attaching it, or
2243 : /// to set an existing tenant to PolicyMode::Secondary
2244 : /// - Call with mode Detached to switch to PolicyMode::Detached
2245 0 : pub(crate) async fn tenant_location_config(
2246 0 : &self,
2247 0 : tenant_shard_id: TenantShardId,
2248 0 : req: TenantLocationConfigRequest,
2249 0 : ) -> Result<TenantLocationConfigResponse, ApiError> {
2250 : // We require an exclusive lock, because we are updating both persistent and in-memory state
2251 0 : let _tenant_lock = trace_exclusive_lock(
2252 0 : &self.tenant_op_locks,
2253 0 : tenant_shard_id.tenant_id,
2254 0 : TenantOperations::LocationConfig,
2255 0 : )
2256 0 : .await;
2257 :
2258 0 : if !tenant_shard_id.is_unsharded() {
2259 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
2260 0 : "This API is for importing single-sharded or unsharded tenants"
2261 0 : )));
2262 0 : }
2263 0 :
2264 0 : // First check if this is a creation or an update
2265 0 : let create_or_update = self.tenant_location_config_prepare(tenant_shard_id.tenant_id, req);
2266 0 :
2267 0 : let mut result = TenantLocationConfigResponse {
2268 0 : shards: Vec::new(),
2269 0 : stripe_size: None,
2270 0 : };
2271 0 : let waiters = match create_or_update {
2272 0 : TenantCreateOrUpdate::Create(create_req) => {
2273 0 : let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
2274 0 : result.shards = create_resp
2275 0 : .shards
2276 0 : .into_iter()
2277 0 : .map(|s| TenantShardLocation {
2278 0 : node_id: s.node_id,
2279 0 : shard_id: s.shard_id,
2280 0 : })
2281 0 : .collect();
2282 0 : waiters
2283 : }
2284 0 : TenantCreateOrUpdate::Update(updates) => {
2285 0 : // Persist updates
2286 0 : // Ordering: write to the database before applying changes in-memory, so that
2287 0 : // we will not appear time-travel backwards on a restart.
2288 0 : let mut schedule_context = ScheduleContext::default();
2289 : for ShardUpdate {
2290 0 : tenant_shard_id,
2291 0 : placement_policy,
2292 0 : tenant_config,
2293 0 : generation,
2294 0 : } in &updates
2295 : {
2296 0 : self.persistence
2297 0 : .update_tenant_shard(
2298 0 : TenantFilter::Shard(*tenant_shard_id),
2299 0 : Some(placement_policy.clone()),
2300 0 : Some(tenant_config.clone()),
2301 0 : *generation,
2302 0 : None,
2303 0 : )
2304 0 : .await?;
2305 : }
2306 :
2307 : // Apply updates in-memory
2308 0 : let mut waiters = Vec::new();
2309 0 : {
2310 0 : let mut locked = self.inner.write().unwrap();
2311 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2312 :
2313 : for ShardUpdate {
2314 0 : tenant_shard_id,
2315 0 : placement_policy,
2316 0 : tenant_config,
2317 0 : generation: update_generation,
2318 0 : } in updates
2319 : {
2320 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
2321 0 : tracing::warn!("Shard {tenant_shard_id} removed while updating");
2322 0 : continue;
2323 : };
2324 :
2325 : // Update stripe size
2326 0 : if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
2327 0 : result.stripe_size = Some(shard.shard.stripe_size);
2328 0 : }
2329 :
2330 0 : shard.policy = placement_policy;
2331 0 : shard.config = tenant_config;
2332 0 : if let Some(generation) = update_generation {
2333 0 : shard.generation = Some(generation);
2334 0 : }
2335 :
2336 0 : shard.schedule(scheduler, &mut schedule_context)?;
2337 :
2338 0 : let maybe_waiter = self.maybe_reconcile_shard(shard, nodes);
2339 0 : if let Some(waiter) = maybe_waiter {
2340 0 : waiters.push(waiter);
2341 0 : }
2342 :
2343 0 : if let Some(node_id) = shard.intent.get_attached() {
2344 0 : result.shards.push(TenantShardLocation {
2345 0 : shard_id: tenant_shard_id,
2346 0 : node_id: *node_id,
2347 0 : })
2348 0 : }
2349 : }
2350 : }
2351 0 : waiters
2352 : }
2353 : };
2354 :
2355 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
2356 : // Do not treat a reconcile error as fatal: we have already applied any requested
2357 : // Intent changes, and the reconcile can fail for external reasons like unavailable
2358 : // compute notification API. In these cases, it is important that we do not
2359 : // cause the cloud control plane to retry forever on this API.
2360 0 : tracing::warn!(
2361 0 : "Failed to reconcile after /location_config: {e}, returning success anyway"
2362 : );
2363 0 : }
2364 :
2365 : // Logging the full result is useful because it lets us cross-check what the cloud control
2366 : // plane's tenant_shards table should contain.
2367 0 : tracing::info!("Complete, returning {result:?}");
2368 :
2369 0 : Ok(result)
2370 0 : }
2371 :
2372 0 : pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
2373 : // We require an exclusive lock, because we are updating persistent and in-memory state
2374 0 : let _tenant_lock = trace_exclusive_lock(
2375 0 : &self.tenant_op_locks,
2376 0 : req.tenant_id,
2377 0 : TenantOperations::ConfigSet,
2378 0 : )
2379 0 : .await;
2380 :
2381 0 : let tenant_id = req.tenant_id;
2382 0 : let config = req.config;
2383 0 :
2384 0 : self.persistence
2385 0 : .update_tenant_shard(
2386 0 : TenantFilter::Tenant(req.tenant_id),
2387 0 : None,
2388 0 : Some(config.clone()),
2389 0 : None,
2390 0 : None,
2391 0 : )
2392 0 : .await?;
2393 :
2394 0 : let waiters = {
2395 0 : let mut waiters = Vec::new();
2396 0 : let mut locked = self.inner.write().unwrap();
2397 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
2398 0 : for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2399 0 : shard.config = config.clone();
2400 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
2401 0 : waiters.push(waiter);
2402 0 : }
2403 : }
2404 0 : waiters
2405 : };
2406 :
2407 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
2408 : // Treat this as success because we have stored the configuration. If e.g.
2409 : // a node was unavailable at this time, it should not stop us accepting a
2410 : // configuration change.
2411 0 : tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
2412 0 : }
2413 :
2414 0 : Ok(())
2415 0 : }
2416 :
2417 0 : pub(crate) fn tenant_config_get(
2418 0 : &self,
2419 0 : tenant_id: TenantId,
2420 0 : ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
2421 0 : let config = {
2422 0 : let locked = self.inner.read().unwrap();
2423 0 :
2424 0 : match locked
2425 0 : .tenants
2426 0 : .range(TenantShardId::tenant_range(tenant_id))
2427 0 : .next()
2428 : {
2429 0 : Some((_tenant_shard_id, shard)) => shard.config.clone(),
2430 : None => {
2431 0 : return Err(ApiError::NotFound(
2432 0 : anyhow::anyhow!("Tenant not found").into(),
2433 0 : ))
2434 : }
2435 : }
2436 : };
2437 :
2438 : // Unlike the pageserver, we do not have a set of global defaults: the config is
2439 : // entirely per-tenant. Therefore the distinction between `tenant_specific_overrides`
2440 : // and `effective_config` in the response is meaningless, but we retain that syntax
2441 : // in order to remain compatible with the pageserver API.
2442 :
2443 0 : let response = HashMap::from([
2444 : (
2445 : "tenant_specific_overrides",
2446 0 : serde_json::to_value(&config)
2447 0 : .context("serializing tenant specific overrides")
2448 0 : .map_err(ApiError::InternalServerError)?,
2449 : ),
2450 : (
2451 0 : "effective_config",
2452 0 : serde_json::to_value(&config)
2453 0 : .context("serializing effective config")
2454 0 : .map_err(ApiError::InternalServerError)?,
2455 : ),
2456 : ]);
2457 :
2458 0 : Ok(response)
2459 0 : }
2460 :
2461 0 : pub(crate) async fn tenant_time_travel_remote_storage(
2462 0 : &self,
2463 0 : time_travel_req: &TenantTimeTravelRequest,
2464 0 : tenant_id: TenantId,
2465 0 : timestamp: Cow<'_, str>,
2466 0 : done_if_after: Cow<'_, str>,
2467 0 : ) -> Result<(), ApiError> {
2468 0 : let _tenant_lock = trace_exclusive_lock(
2469 0 : &self.tenant_op_locks,
2470 0 : tenant_id,
2471 0 : TenantOperations::TimeTravelRemoteStorage,
2472 0 : )
2473 0 : .await;
2474 :
2475 0 : let node = {
2476 0 : let locked = self.inner.read().unwrap();
2477 : // Just a sanity check to prevent misuse: the API expects that the tenant is fully
2478 : // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
2479 : // but only at the start of the process, so it's really just to prevent operator
2480 : // mistakes.
2481 0 : for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
2482 0 : if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
2483 : {
2484 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2485 0 : "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
2486 0 : )));
2487 0 : }
2488 0 : let maybe_attached = shard
2489 0 : .observed
2490 0 : .locations
2491 0 : .iter()
2492 0 : .filter_map(|(node_id, observed_location)| {
2493 0 : observed_location
2494 0 : .conf
2495 0 : .as_ref()
2496 0 : .map(|loc| (node_id, observed_location, loc.mode))
2497 0 : })
2498 0 : .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
2499 0 : if let Some((node_id, _observed_location, mode)) = maybe_attached {
2500 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}")));
2501 0 : }
2502 : }
2503 0 : let scheduler = &locked.scheduler;
2504 : // Right now we only perform the operation on a single node without parallelization
2505 : // TODO fan out the operation to multiple nodes for better performance
2506 0 : let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?;
2507 0 : let node = locked
2508 0 : .nodes
2509 0 : .get(&node_id)
2510 0 : .expect("Pageservers may not be deleted while lock is active");
2511 0 : node.clone()
2512 0 : };
2513 0 :
2514 0 : // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
2515 0 : let mut counts = time_travel_req
2516 0 : .shard_counts
2517 0 : .iter()
2518 0 : .copied()
2519 0 : .collect::<HashSet<_>>()
2520 0 : .into_iter()
2521 0 : .collect::<Vec<_>>();
2522 0 : counts.sort_unstable();
2523 :
2524 0 : for count in counts {
2525 0 : let shard_ids = (0..count.count())
2526 0 : .map(|i| TenantShardId {
2527 0 : tenant_id,
2528 0 : shard_number: ShardNumber(i),
2529 0 : shard_count: count,
2530 0 : })
2531 0 : .collect::<Vec<_>>();
2532 0 : for tenant_shard_id in shard_ids {
2533 0 : let client = PageserverClient::new(
2534 0 : node.get_id(),
2535 0 : node.base_url(),
2536 0 : self.config.jwt_token.as_deref(),
2537 0 : );
2538 0 :
2539 0 : tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
2540 :
2541 0 : client
2542 0 : .tenant_time_travel_remote_storage(
2543 0 : tenant_shard_id,
2544 0 : ×tamp,
2545 0 : &done_if_after,
2546 0 : )
2547 0 : .await
2548 0 : .map_err(|e| {
2549 0 : ApiError::InternalServerError(anyhow::anyhow!(
2550 0 : "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
2551 0 : node
2552 0 : ))
2553 0 : })?;
2554 : }
2555 : }
2556 0 : Ok(())
2557 0 : }
2558 :
2559 0 : pub(crate) async fn tenant_secondary_download(
2560 0 : &self,
2561 0 : tenant_id: TenantId,
2562 0 : wait: Option<Duration>,
2563 0 : ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
2564 0 : let _tenant_lock = trace_shared_lock(
2565 0 : &self.tenant_op_locks,
2566 0 : tenant_id,
2567 0 : TenantOperations::SecondaryDownload,
2568 0 : )
2569 0 : .await;
2570 :
2571 : // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
2572 0 : let targets = {
2573 0 : let locked = self.inner.read().unwrap();
2574 0 : let mut targets = Vec::new();
2575 :
2576 0 : for (tenant_shard_id, shard) in
2577 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2578 : {
2579 0 : for node_id in shard.intent.get_secondary() {
2580 0 : let node = locked
2581 0 : .nodes
2582 0 : .get(node_id)
2583 0 : .expect("Pageservers may not be deleted while referenced");
2584 0 :
2585 0 : targets.push((*tenant_shard_id, node.clone()));
2586 0 : }
2587 : }
2588 0 : targets
2589 0 : };
2590 0 :
2591 0 : // Issue concurrent requests to all shards' locations
2592 0 : let mut futs = FuturesUnordered::new();
2593 0 : for (tenant_shard_id, node) in targets {
2594 0 : let client = PageserverClient::new(
2595 0 : node.get_id(),
2596 0 : node.base_url(),
2597 0 : self.config.jwt_token.as_deref(),
2598 0 : );
2599 0 : futs.push(async move {
2600 0 : let result = client
2601 0 : .tenant_secondary_download(tenant_shard_id, wait)
2602 0 : .await;
2603 0 : (result, node, tenant_shard_id)
2604 0 : })
2605 : }
2606 :
2607 : // Handle any errors returned by pageservers. This includes cases like this request racing with
2608 : // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
2609 : // well as more general cases like 503s, 500s, or timeouts.
2610 0 : let mut aggregate_progress = SecondaryProgress::default();
2611 0 : let mut aggregate_status: Option<StatusCode> = None;
2612 0 : let mut error: Option<mgmt_api::Error> = None;
2613 0 : while let Some((result, node, tenant_shard_id)) = futs.next().await {
2614 0 : match result {
2615 0 : Err(e) => {
2616 0 : // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
2617 0 : // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
2618 0 : // than they had hoped for.
2619 0 : tracing::warn!("Secondary download error from pageserver {node}: {e}",);
2620 0 : error = Some(e)
2621 : }
2622 0 : Ok((status_code, progress)) => {
2623 0 : tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
2624 0 : aggregate_progress.layers_downloaded += progress.layers_downloaded;
2625 0 : aggregate_progress.layers_total += progress.layers_total;
2626 0 : aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
2627 0 : aggregate_progress.bytes_total += progress.bytes_total;
2628 0 : aggregate_progress.heatmap_mtime =
2629 0 : std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
2630 0 : aggregate_status = match aggregate_status {
2631 0 : None => Some(status_code),
2632 0 : Some(StatusCode::OK) => Some(status_code),
2633 0 : Some(cur) => {
2634 0 : // Other status codes (e.g. 202) -- do not overwrite.
2635 0 : Some(cur)
2636 : }
2637 : };
2638 : }
2639 : }
2640 : }
2641 :
2642 : // If any of the shards return 202, indicate our result as 202.
2643 0 : match aggregate_status {
2644 : None => {
2645 0 : match error {
2646 0 : Some(e) => {
2647 0 : // No successes, and an error: surface it
2648 0 : Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
2649 : }
2650 : None => {
2651 : // No shards found
2652 0 : Err(ApiError::NotFound(
2653 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
2654 0 : ))
2655 : }
2656 : }
2657 : }
2658 0 : Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
2659 : }
2660 0 : }
2661 :
2662 0 : pub(crate) async fn tenant_delete(&self, tenant_id: TenantId) -> Result<StatusCode, ApiError> {
2663 0 : let _tenant_lock =
2664 0 : trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
2665 :
2666 : // Detach all shards
2667 0 : let (detach_waiters, shard_ids, node) = {
2668 0 : let mut shard_ids = Vec::new();
2669 0 : let mut detach_waiters = Vec::new();
2670 0 : let mut locked = self.inner.write().unwrap();
2671 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2672 0 : for (tenant_shard_id, shard) in
2673 0 : tenants.range_mut(TenantShardId::tenant_range(tenant_id))
2674 : {
2675 0 : shard_ids.push(*tenant_shard_id);
2676 0 :
2677 0 : // Update the tenant's intent to remove all attachments
2678 0 : shard.policy = PlacementPolicy::Detached;
2679 0 : shard
2680 0 : .schedule(scheduler, &mut ScheduleContext::default())
2681 0 : .expect("De-scheduling is infallible");
2682 0 : debug_assert!(shard.intent.get_attached().is_none());
2683 0 : debug_assert!(shard.intent.get_secondary().is_empty());
2684 :
2685 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
2686 0 : detach_waiters.push(waiter);
2687 0 : }
2688 : }
2689 :
2690 : // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant
2691 : // was attached, just has to be able to see the S3 content)
2692 0 : let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?;
2693 0 : let node = nodes
2694 0 : .get(&node_id)
2695 0 : .expect("Pageservers may not be deleted while lock is active");
2696 0 : (detach_waiters, shard_ids, node.clone())
2697 0 : };
2698 0 :
2699 0 : // This reconcile wait can fail in a few ways:
2700 0 : // A there is a very long queue for the reconciler semaphore
2701 0 : // B some pageserver is failing to handle a detach promptly
2702 0 : // C some pageserver goes offline right at the moment we send it a request.
2703 0 : //
2704 0 : // A and C are transient: the semaphore will eventually become available, and once a node is marked offline
2705 0 : // the next attempt to reconcile will silently skip detaches for an offline node and succeed. If B happens,
2706 0 : // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while
2707 0 : // deleting the underlying data).
2708 0 : self.await_waiters(detach_waiters, RECONCILE_TIMEOUT)
2709 0 : .await?;
2710 :
2711 0 : let locations = shard_ids
2712 0 : .into_iter()
2713 0 : .map(|s| (s, node.clone()))
2714 0 : .collect::<Vec<_>>();
2715 0 : let results = self.tenant_for_shards_api(
2716 0 : locations,
2717 0 : |tenant_shard_id, client| async move { client.tenant_delete(tenant_shard_id).await },
2718 0 : 1,
2719 0 : 3,
2720 0 : RECONCILE_TIMEOUT,
2721 0 : &self.cancel,
2722 0 : )
2723 0 : .await;
2724 0 : for result in results {
2725 0 : match result {
2726 : Ok(StatusCode::ACCEPTED) => {
2727 : // This should never happen: we waited for detaches to finish above
2728 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2729 0 : "Unexpectedly still attached on {}",
2730 0 : node
2731 0 : )));
2732 : }
2733 0 : Ok(_) => {}
2734 : Err(mgmt_api::Error::Cancelled) => {
2735 0 : return Err(ApiError::ShuttingDown);
2736 : }
2737 0 : Err(e) => {
2738 0 : // This is unexpected: remote deletion should be infallible, unless the object store
2739 0 : // at large is unavailable.
2740 0 : tracing::error!("Error deleting via node {}: {e}", node);
2741 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
2742 : }
2743 : }
2744 : }
2745 :
2746 : // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
2747 : // our in-memory state and database state.
2748 :
2749 : // Ordering: we delete persistent state first: if we then
2750 : // crash, we will drop the in-memory state.
2751 :
2752 : // Drop persistent state.
2753 0 : self.persistence.delete_tenant(tenant_id).await?;
2754 :
2755 : // Drop in-memory state
2756 : {
2757 0 : let mut locked = self.inner.write().unwrap();
2758 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
2759 :
2760 : // Dereference Scheduler from shards before dropping them
2761 0 : for (_tenant_shard_id, shard) in
2762 0 : tenants.range_mut(TenantShardId::tenant_range(tenant_id))
2763 0 : {
2764 0 : shard.intent.clear(scheduler);
2765 0 : }
2766 :
2767 0 : tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
2768 0 : tracing::info!(
2769 0 : "Deleted tenant {tenant_id}, now have {} tenants",
2770 0 : locked.tenants.len()
2771 : );
2772 : };
2773 :
2774 : // Success is represented as 404, to imitate the existing pageserver deletion API
2775 0 : Ok(StatusCode::NOT_FOUND)
2776 0 : }
2777 :
2778 : /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
2779 : /// for a tenant. The TenantConfig is passed through to pageservers, whereas this function modifies
2780 : /// the tenant's policies (configuration) within the storage controller
2781 0 : pub(crate) async fn tenant_update_policy(
2782 0 : &self,
2783 0 : tenant_id: TenantId,
2784 0 : req: TenantPolicyRequest,
2785 0 : ) -> Result<(), ApiError> {
2786 : // We require an exclusive lock, because we are updating persistent and in-memory state
2787 0 : let _tenant_lock = trace_exclusive_lock(
2788 0 : &self.tenant_op_locks,
2789 0 : tenant_id,
2790 0 : TenantOperations::UpdatePolicy,
2791 0 : )
2792 0 : .await;
2793 :
2794 0 : failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
2795 :
2796 : let TenantPolicyRequest {
2797 0 : placement,
2798 0 : scheduling,
2799 0 : } = req;
2800 0 :
2801 0 : self.persistence
2802 0 : .update_tenant_shard(
2803 0 : TenantFilter::Tenant(tenant_id),
2804 0 : placement.clone(),
2805 0 : None,
2806 0 : None,
2807 0 : scheduling,
2808 0 : )
2809 0 : .await?;
2810 :
2811 0 : let mut schedule_context = ScheduleContext::default();
2812 0 : let mut locked = self.inner.write().unwrap();
2813 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2814 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2815 0 : if let Some(placement) = &placement {
2816 0 : shard.policy = placement.clone();
2817 0 :
2818 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2819 0 : "Updated placement policy to {placement:?}");
2820 0 : }
2821 :
2822 0 : if let Some(scheduling) = &scheduling {
2823 0 : shard.set_scheduling_policy(*scheduling);
2824 0 :
2825 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2826 0 : "Updated scheduling policy to {scheduling:?}");
2827 0 : }
2828 :
2829 : // In case scheduling is being switched back on, try it now.
2830 0 : shard.schedule(scheduler, &mut schedule_context).ok();
2831 0 : self.maybe_reconcile_shard(shard, nodes);
2832 : }
2833 :
2834 0 : Ok(())
2835 0 : }
2836 :
2837 0 : pub(crate) async fn tenant_timeline_create(
2838 0 : &self,
2839 0 : tenant_id: TenantId,
2840 0 : mut create_req: TimelineCreateRequest,
2841 0 : ) -> Result<TimelineInfo, ApiError> {
2842 0 : tracing::info!(
2843 0 : "Creating timeline {}/{}",
2844 : tenant_id,
2845 : create_req.new_timeline_id,
2846 : );
2847 :
2848 0 : let _tenant_lock = trace_shared_lock(
2849 0 : &self.tenant_op_locks,
2850 0 : tenant_id,
2851 0 : TenantOperations::TimelineCreate,
2852 0 : )
2853 0 : .await;
2854 0 : failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock");
2855 :
2856 0 : self.ensure_attached_wait(tenant_id).await?;
2857 :
2858 0 : let mut targets = {
2859 0 : let locked = self.inner.read().unwrap();
2860 0 : let mut targets = Vec::new();
2861 :
2862 0 : for (tenant_shard_id, shard) in
2863 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2864 0 : {
2865 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2866 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2867 0 : })?;
2868 0 : let node = locked
2869 0 : .nodes
2870 0 : .get(&node_id)
2871 0 : .expect("Pageservers may not be deleted while referenced");
2872 0 :
2873 0 : targets.push((*tenant_shard_id, node.clone()));
2874 : }
2875 0 : targets
2876 0 : };
2877 0 :
2878 0 : if targets.is_empty() {
2879 0 : return Err(ApiError::NotFound(
2880 0 : anyhow::anyhow!("Tenant not found").into(),
2881 0 : ));
2882 0 : };
2883 0 : let shard_zero = targets.remove(0);
2884 :
2885 0 : async fn create_one(
2886 0 : tenant_shard_id: TenantShardId,
2887 0 : node: Node,
2888 0 : jwt: Option<String>,
2889 0 : create_req: TimelineCreateRequest,
2890 0 : ) -> Result<TimelineInfo, ApiError> {
2891 0 : tracing::info!(
2892 0 : "Creating timeline on shard {}/{}, attached to node {node}",
2893 : tenant_shard_id,
2894 : create_req.new_timeline_id,
2895 : );
2896 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
2897 0 :
2898 0 : client
2899 0 : .timeline_create(tenant_shard_id, &create_req)
2900 0 : .await
2901 0 : .map_err(|e| passthrough_api_error(&node, e))
2902 0 : }
2903 :
2904 : // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
2905 : // use whatever LSN that shard picked when creating on subsequent shards. We arbitrarily use shard zero as the shard
2906 : // that will get the first creation request, and propagate the LSN to all the >0 shards.
2907 0 : let timeline_info = create_one(
2908 0 : shard_zero.0,
2909 0 : shard_zero.1,
2910 0 : self.config.jwt_token.clone(),
2911 0 : create_req.clone(),
2912 0 : )
2913 0 : .await?;
2914 :
2915 : // Propagate the LSN that shard zero picked, if caller didn't provide one
2916 0 : if create_req.ancestor_timeline_id.is_some() && create_req.ancestor_start_lsn.is_none() {
2917 0 : create_req.ancestor_start_lsn = timeline_info.ancestor_lsn;
2918 0 : }
2919 :
2920 : // Create timeline on remaining shards with number >0
2921 0 : if !targets.is_empty() {
2922 : // If we had multiple shards, issue requests for the remainder now.
2923 0 : let jwt = &self.config.jwt_token;
2924 0 : self.tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
2925 0 : let create_req = create_req.clone();
2926 0 : Box::pin(create_one(tenant_shard_id, node, jwt.clone(), create_req))
2927 0 : })
2928 0 : .await?;
2929 0 : }
2930 :
2931 0 : Ok(timeline_info)
2932 0 : }
2933 :
2934 0 : pub(crate) async fn tenant_timeline_detach_ancestor(
2935 0 : &self,
2936 0 : tenant_id: TenantId,
2937 0 : timeline_id: TimelineId,
2938 0 : ) -> Result<models::detach_ancestor::AncestorDetached, ApiError> {
2939 0 : tracing::info!("Detaching timeline {tenant_id}/{timeline_id}",);
2940 :
2941 0 : let _tenant_lock = trace_shared_lock(
2942 0 : &self.tenant_op_locks,
2943 0 : tenant_id,
2944 0 : TenantOperations::TimelineDetachAncestor,
2945 0 : )
2946 0 : .await;
2947 :
2948 0 : self.ensure_attached_wait(tenant_id).await?;
2949 :
2950 0 : let targets = {
2951 0 : let locked = self.inner.read().unwrap();
2952 0 : let mut targets = Vec::new();
2953 :
2954 0 : for (tenant_shard_id, shard) in
2955 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2956 0 : {
2957 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2958 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2959 0 : })?;
2960 0 : let node = locked
2961 0 : .nodes
2962 0 : .get(&node_id)
2963 0 : .expect("Pageservers may not be deleted while referenced");
2964 0 :
2965 0 : targets.push((*tenant_shard_id, node.clone()));
2966 : }
2967 0 : targets
2968 0 : };
2969 0 :
2970 0 : if targets.is_empty() {
2971 0 : return Err(ApiError::NotFound(
2972 0 : anyhow::anyhow!("Tenant not found").into(),
2973 0 : ));
2974 0 : }
2975 :
2976 0 : async fn detach_one(
2977 0 : tenant_shard_id: TenantShardId,
2978 0 : timeline_id: TimelineId,
2979 0 : node: Node,
2980 0 : jwt: Option<String>,
2981 0 : ) -> Result<(ShardNumber, models::detach_ancestor::AncestorDetached), ApiError> {
2982 0 : tracing::info!(
2983 0 : "Detaching timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
2984 : );
2985 :
2986 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
2987 0 :
2988 0 : client
2989 0 : .timeline_detach_ancestor(tenant_shard_id, timeline_id)
2990 0 : .await
2991 0 : .map_err(|e| {
2992 0 : use mgmt_api::Error;
2993 0 :
2994 0 : match e {
2995 : // no ancestor (ever)
2996 0 : Error::ApiError(StatusCode::CONFLICT, msg) => ApiError::Conflict(format!(
2997 0 : "{node}: {}",
2998 0 : msg.strip_prefix("Conflict: ").unwrap_or(&msg)
2999 0 : )),
3000 : // too many ancestors
3001 0 : Error::ApiError(StatusCode::BAD_REQUEST, msg) => {
3002 0 : ApiError::BadRequest(anyhow::anyhow!("{node}: {msg}"))
3003 : }
3004 0 : Error::ApiError(StatusCode::INTERNAL_SERVER_ERROR, msg) => {
3005 0 : // avoid turning these into conflicts to remain compatible with
3006 0 : // pageservers, 500 errors are sadly retryable with timeline ancestor
3007 0 : // detach
3008 0 : ApiError::InternalServerError(anyhow::anyhow!("{node}: {msg}"))
3009 : }
3010 : // rest can be mapped as usual
3011 0 : other => passthrough_api_error(&node, other),
3012 : }
3013 0 : })
3014 0 : .map(|res| (tenant_shard_id.shard_number, res))
3015 0 : }
3016 :
3017 : // no shard needs to go first/last; the operation should be idempotent
3018 0 : let mut results = self
3019 0 : .tenant_for_shards(targets, |tenant_shard_id, node| {
3020 0 : futures::FutureExt::boxed(detach_one(
3021 0 : tenant_shard_id,
3022 0 : timeline_id,
3023 0 : node,
3024 0 : self.config.jwt_token.clone(),
3025 0 : ))
3026 0 : })
3027 0 : .await?;
3028 :
3029 0 : let any = results.pop().expect("we must have at least one response");
3030 0 :
3031 0 : let mismatching = results
3032 0 : .iter()
3033 0 : .filter(|(_, res)| res != &any.1)
3034 0 : .collect::<Vec<_>>();
3035 0 : if !mismatching.is_empty() {
3036 : // this can be hit by races which should not happen because operation lock on cplane
3037 0 : let matching = results.len() - mismatching.len();
3038 0 : tracing::error!(
3039 : matching,
3040 : compared_against=?any,
3041 : ?mismatching,
3042 0 : "shards returned different results"
3043 : );
3044 :
3045 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("pageservers returned mixed results for ancestor detach; manual intervention is required.")));
3046 0 : }
3047 0 :
3048 0 : Ok(any.1)
3049 0 : }
3050 :
3051 : /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
3052 : ///
3053 : /// On success, the returned vector contains exactly the same number of elements as the input `locations`.
3054 0 : async fn tenant_for_shards<F, R>(
3055 0 : &self,
3056 0 : locations: Vec<(TenantShardId, Node)>,
3057 0 : mut req_fn: F,
3058 0 : ) -> Result<Vec<R>, ApiError>
3059 0 : where
3060 0 : F: FnMut(
3061 0 : TenantShardId,
3062 0 : Node,
3063 0 : )
3064 0 : -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
3065 0 : {
3066 0 : let mut futs = FuturesUnordered::new();
3067 0 : let mut results = Vec::with_capacity(locations.len());
3068 :
3069 0 : for (tenant_shard_id, node) in locations {
3070 0 : futs.push(req_fn(tenant_shard_id, node));
3071 0 : }
3072 :
3073 0 : while let Some(r) = futs.next().await {
3074 0 : results.push(r?);
3075 : }
3076 :
3077 0 : Ok(results)
3078 0 : }
3079 :
3080 : /// Concurrently invoke a pageserver API call on many shards at once
3081 0 : pub(crate) async fn tenant_for_shards_api<T, O, F>(
3082 0 : &self,
3083 0 : locations: Vec<(TenantShardId, Node)>,
3084 0 : op: O,
3085 0 : warn_threshold: u32,
3086 0 : max_retries: u32,
3087 0 : timeout: Duration,
3088 0 : cancel: &CancellationToken,
3089 0 : ) -> Vec<mgmt_api::Result<T>>
3090 0 : where
3091 0 : O: Fn(TenantShardId, PageserverClient) -> F + Copy,
3092 0 : F: std::future::Future<Output = mgmt_api::Result<T>>,
3093 0 : {
3094 0 : let mut futs = FuturesUnordered::new();
3095 0 : let mut results = Vec::with_capacity(locations.len());
3096 :
3097 0 : for (tenant_shard_id, node) in locations {
3098 0 : futs.push(async move {
3099 0 : node.with_client_retries(
3100 0 : |client| op(tenant_shard_id, client),
3101 0 : &self.config.jwt_token,
3102 0 : warn_threshold,
3103 0 : max_retries,
3104 0 : timeout,
3105 0 : cancel,
3106 0 : )
3107 0 : .await
3108 0 : });
3109 0 : }
3110 :
3111 0 : while let Some(r) = futs.next().await {
3112 0 : let r = r.unwrap_or(Err(mgmt_api::Error::Cancelled));
3113 0 : results.push(r);
3114 0 : }
3115 :
3116 0 : results
3117 0 : }
3118 :
3119 0 : pub(crate) async fn tenant_timeline_delete(
3120 0 : &self,
3121 0 : tenant_id: TenantId,
3122 0 : timeline_id: TimelineId,
3123 0 : ) -> Result<StatusCode, ApiError> {
3124 0 : tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
3125 0 : let _tenant_lock = trace_shared_lock(
3126 0 : &self.tenant_op_locks,
3127 0 : tenant_id,
3128 0 : TenantOperations::TimelineDelete,
3129 0 : )
3130 0 : .await;
3131 :
3132 0 : self.ensure_attached_wait(tenant_id).await?;
3133 :
3134 0 : let mut targets = {
3135 0 : let locked = self.inner.read().unwrap();
3136 0 : let mut targets = Vec::new();
3137 :
3138 0 : for (tenant_shard_id, shard) in
3139 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3140 0 : {
3141 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
3142 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
3143 0 : })?;
3144 0 : let node = locked
3145 0 : .nodes
3146 0 : .get(&node_id)
3147 0 : .expect("Pageservers may not be deleted while referenced");
3148 0 :
3149 0 : targets.push((*tenant_shard_id, node.clone()));
3150 : }
3151 0 : targets
3152 0 : };
3153 0 :
3154 0 : if targets.is_empty() {
3155 0 : return Err(ApiError::NotFound(
3156 0 : anyhow::anyhow!("Tenant not found").into(),
3157 0 : ));
3158 0 : }
3159 0 : let shard_zero = targets.remove(0);
3160 :
3161 0 : async fn delete_one(
3162 0 : tenant_shard_id: TenantShardId,
3163 0 : timeline_id: TimelineId,
3164 0 : node: Node,
3165 0 : jwt: Option<String>,
3166 0 : ) -> Result<StatusCode, ApiError> {
3167 0 : tracing::info!(
3168 0 : "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
3169 : );
3170 :
3171 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
3172 0 : client
3173 0 : .timeline_delete(tenant_shard_id, timeline_id)
3174 0 : .await
3175 0 : .map_err(|e| {
3176 0 : ApiError::InternalServerError(anyhow::anyhow!(
3177 0 : "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
3178 0 : ))
3179 0 : })
3180 0 : }
3181 :
3182 0 : let statuses = self
3183 0 : .tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
3184 0 : Box::pin(delete_one(
3185 0 : tenant_shard_id,
3186 0 : timeline_id,
3187 0 : node,
3188 0 : self.config.jwt_token.clone(),
3189 0 : ))
3190 0 : })
3191 0 : .await?;
3192 :
3193 : // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero
3194 0 : if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
3195 0 : return Ok(StatusCode::ACCEPTED);
3196 0 : }
3197 :
3198 : // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
3199 : // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
3200 0 : let shard_zero_status = delete_one(
3201 0 : shard_zero.0,
3202 0 : timeline_id,
3203 0 : shard_zero.1,
3204 0 : self.config.jwt_token.clone(),
3205 0 : )
3206 0 : .await?;
3207 :
3208 0 : Ok(shard_zero_status)
3209 0 : }
3210 :
3211 : /// When you need to send an HTTP request to the pageserver that holds shard0 of a tenant, this
3212 : /// function looks up and returns node. If the tenant isn't found, returns Err(ApiError::NotFound)
3213 0 : pub(crate) fn tenant_shard0_node(
3214 0 : &self,
3215 0 : tenant_id: TenantId,
3216 0 : ) -> Result<(Node, TenantShardId), ApiError> {
3217 0 : let locked = self.inner.read().unwrap();
3218 0 : let Some((tenant_shard_id, shard)) = locked
3219 0 : .tenants
3220 0 : .range(TenantShardId::tenant_range(tenant_id))
3221 0 : .next()
3222 : else {
3223 0 : return Err(ApiError::NotFound(
3224 0 : anyhow::anyhow!("Tenant {tenant_id} not found").into(),
3225 0 : ));
3226 : };
3227 :
3228 : // TODO: should use the ID last published to compute_hook, rather than the intent: the intent might
3229 : // point to somewhere we haven't attached yet.
3230 0 : let Some(node_id) = shard.intent.get_attached() else {
3231 0 : tracing::warn!(
3232 0 : tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
3233 0 : "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
3234 : shard.policy
3235 : );
3236 0 : return Err(ApiError::Conflict(
3237 0 : "Cannot call timeline API on non-attached tenant".to_string(),
3238 0 : ));
3239 : };
3240 :
3241 0 : let Some(node) = locked.nodes.get(node_id) else {
3242 : // This should never happen
3243 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3244 0 : "Shard refers to nonexistent node"
3245 0 : )));
3246 : };
3247 :
3248 0 : Ok((node.clone(), *tenant_shard_id))
3249 0 : }
3250 :
3251 0 : pub(crate) fn tenant_locate(
3252 0 : &self,
3253 0 : tenant_id: TenantId,
3254 0 : ) -> Result<TenantLocateResponse, ApiError> {
3255 0 : let locked = self.inner.read().unwrap();
3256 0 : tracing::info!("Locating shards for tenant {tenant_id}");
3257 :
3258 0 : let mut result = Vec::new();
3259 0 : let mut shard_params: Option<ShardParameters> = None;
3260 :
3261 0 : for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3262 : {
3263 0 : let node_id =
3264 0 : shard
3265 0 : .intent
3266 0 : .get_attached()
3267 0 : .ok_or(ApiError::BadRequest(anyhow::anyhow!(
3268 0 : "Cannot locate a tenant that is not attached"
3269 0 : )))?;
3270 :
3271 0 : let node = locked
3272 0 : .nodes
3273 0 : .get(&node_id)
3274 0 : .expect("Pageservers may not be deleted while referenced");
3275 0 :
3276 0 : result.push(node.shard_location(*tenant_shard_id));
3277 0 :
3278 0 : match &shard_params {
3279 0 : None => {
3280 0 : shard_params = Some(ShardParameters {
3281 0 : stripe_size: shard.shard.stripe_size,
3282 0 : count: shard.shard.count,
3283 0 : });
3284 0 : }
3285 0 : Some(params) => {
3286 0 : if params.stripe_size != shard.shard.stripe_size {
3287 : // This should never happen. We enforce at runtime because it's simpler than
3288 : // adding an extra per-tenant data structure to store the things that should be the same
3289 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3290 0 : "Inconsistent shard stripe size parameters!"
3291 0 : )));
3292 0 : }
3293 : }
3294 : }
3295 : }
3296 :
3297 0 : if result.is_empty() {
3298 0 : return Err(ApiError::NotFound(
3299 0 : anyhow::anyhow!("No shards for this tenant ID found").into(),
3300 0 : ));
3301 0 : }
3302 0 : let shard_params = shard_params.expect("result is non-empty, therefore this is set");
3303 0 : tracing::info!(
3304 0 : "Located tenant {} with params {:?} on shards {}",
3305 0 : tenant_id,
3306 0 : shard_params,
3307 0 : result
3308 0 : .iter()
3309 0 : .map(|s| format!("{:?}", s))
3310 0 : .collect::<Vec<_>>()
3311 0 : .join(",")
3312 : );
3313 :
3314 0 : Ok(TenantLocateResponse {
3315 0 : shards: result,
3316 0 : shard_params,
3317 0 : })
3318 0 : }
3319 :
3320 : /// Returns None if the input iterator of shards does not include a shard with number=0
3321 0 : fn tenant_describe_impl<'a>(
3322 0 : &self,
3323 0 : shards: impl Iterator<Item = &'a TenantShard>,
3324 0 : ) -> Option<TenantDescribeResponse> {
3325 0 : let mut shard_zero = None;
3326 0 : let mut describe_shards = Vec::new();
3327 :
3328 0 : for shard in shards {
3329 0 : if shard.tenant_shard_id.is_shard_zero() {
3330 0 : shard_zero = Some(shard);
3331 0 : }
3332 :
3333 0 : describe_shards.push(TenantDescribeResponseShard {
3334 0 : tenant_shard_id: shard.tenant_shard_id,
3335 0 : node_attached: *shard.intent.get_attached(),
3336 0 : node_secondary: shard.intent.get_secondary().to_vec(),
3337 0 : last_error: shard
3338 0 : .last_error
3339 0 : .lock()
3340 0 : .unwrap()
3341 0 : .as_ref()
3342 0 : .map(|e| format!("{e}"))
3343 0 : .unwrap_or("".to_string())
3344 0 : .clone(),
3345 0 : is_reconciling: shard.reconciler.is_some(),
3346 0 : is_pending_compute_notification: shard.pending_compute_notification,
3347 0 : is_splitting: matches!(shard.splitting, SplitState::Splitting),
3348 0 : scheduling_policy: *shard.get_scheduling_policy(),
3349 : })
3350 : }
3351 :
3352 0 : let shard_zero = shard_zero?;
3353 :
3354 0 : Some(TenantDescribeResponse {
3355 0 : tenant_id: shard_zero.tenant_shard_id.tenant_id,
3356 0 : shards: describe_shards,
3357 0 : stripe_size: shard_zero.shard.stripe_size,
3358 0 : policy: shard_zero.policy.clone(),
3359 0 : config: shard_zero.config.clone(),
3360 0 : })
3361 0 : }
3362 :
3363 0 : pub(crate) fn tenant_describe(
3364 0 : &self,
3365 0 : tenant_id: TenantId,
3366 0 : ) -> Result<TenantDescribeResponse, ApiError> {
3367 0 : let locked = self.inner.read().unwrap();
3368 0 :
3369 0 : self.tenant_describe_impl(
3370 0 : locked
3371 0 : .tenants
3372 0 : .range(TenantShardId::tenant_range(tenant_id))
3373 0 : .map(|(_k, v)| v),
3374 0 : )
3375 0 : .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
3376 0 : }
3377 :
3378 0 : pub(crate) fn tenant_list(&self) -> Vec<TenantDescribeResponse> {
3379 0 : let locked = self.inner.read().unwrap();
3380 0 :
3381 0 : let mut result = Vec::new();
3382 0 : for (_tenant_id, tenant_shards) in
3383 0 : &locked.tenants.iter().group_by(|(id, _shard)| id.tenant_id)
3384 0 : {
3385 0 : result.push(
3386 0 : self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
3387 0 : .expect("Groups are always non-empty"),
3388 0 : );
3389 0 : }
3390 :
3391 0 : result
3392 0 : }
3393 :
3394 0 : #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
3395 : async fn abort_tenant_shard_split(
3396 : &self,
3397 : op: &TenantShardSplitAbort,
3398 : ) -> Result<(), TenantShardSplitAbortError> {
3399 : // Cleaning up a split:
3400 : // - Parent shards are not destroyed during a split, just detached.
3401 : // - Failed pageserver split API calls can leave the remote node with just the parent attached,
3402 : // just the children attached, or both.
3403 : //
3404 : // Therefore our work to do is to:
3405 : // 1. Clean up storage controller's internal state to just refer to parents, no children
3406 : // 2. Call out to pageservers to ensure that children are detached
3407 : // 3. Call out to pageservers to ensure that parents are attached.
3408 : //
3409 : // Crash safety:
3410 : // - If the storage controller stops running during this cleanup *after* clearing the splitting state
3411 : // from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
3412 : // and detach them.
3413 : // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
3414 : // from our database, then we will re-enter this cleanup routine on startup.
3415 :
3416 : let TenantShardSplitAbort {
3417 : tenant_id,
3418 : new_shard_count,
3419 : new_stripe_size,
3420 : ..
3421 : } = op;
3422 :
3423 : // First abort persistent state, if any exists.
3424 : match self
3425 : .persistence
3426 : .abort_shard_split(*tenant_id, *new_shard_count)
3427 : .await?
3428 : {
3429 : AbortShardSplitStatus::Aborted => {
3430 : // Proceed to roll back any child shards created on pageservers
3431 : }
3432 : AbortShardSplitStatus::Complete => {
3433 : // The split completed (we might hit that path if e.g. our database transaction
3434 : // to write the completion landed in the database, but we dropped connection
3435 : // before seeing the result).
3436 : //
3437 : // We must update in-memory state to reflect the successful split.
3438 : self.tenant_shard_split_commit_inmem(
3439 : *tenant_id,
3440 : *new_shard_count,
3441 : *new_stripe_size,
3442 : );
3443 : return Ok(());
3444 : }
3445 : }
3446 :
3447 : // Clean up in-memory state, and accumulate the list of child locations that need detaching
3448 : let detach_locations: Vec<(Node, TenantShardId)> = {
3449 : let mut detach_locations = Vec::new();
3450 : let mut locked = self.inner.write().unwrap();
3451 : let (nodes, tenants, scheduler) = locked.parts_mut();
3452 :
3453 : for (tenant_shard_id, shard) in
3454 : tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
3455 : {
3456 : if shard.shard.count == op.new_shard_count {
3457 : // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
3458 : // is infallible, so if we got an error we shouldn't have got that far.
3459 : tracing::warn!(
3460 : "During split abort, child shard {tenant_shard_id} found in-memory"
3461 : );
3462 : continue;
3463 : }
3464 :
3465 : // Add the children of this shard to this list of things to detach
3466 : if let Some(node_id) = shard.intent.get_attached() {
3467 : for child_id in tenant_shard_id.split(*new_shard_count) {
3468 : detach_locations.push((
3469 : nodes
3470 : .get(node_id)
3471 : .expect("Intent references nonexistent node")
3472 : .clone(),
3473 : child_id,
3474 : ));
3475 : }
3476 : } else {
3477 : tracing::warn!(
3478 : "During split abort, shard {tenant_shard_id} has no attached location"
3479 : );
3480 : }
3481 :
3482 : tracing::info!("Restoring parent shard {tenant_shard_id}");
3483 : shard.splitting = SplitState::Idle;
3484 : if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
3485 : // If this shard can't be scheduled now (perhaps due to offline nodes or
3486 : // capacity issues), that must not prevent us rolling back a split. In this
3487 : // case it should be eventually scheduled in the background.
3488 : tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
3489 : }
3490 :
3491 : self.maybe_reconcile_shard(shard, nodes);
3492 : }
3493 :
3494 : // We don't expect any new_shard_count shards to exist here, but drop them just in case
3495 0 : tenants.retain(|_id, s| s.shard.count != *new_shard_count);
3496 :
3497 : detach_locations
3498 : };
3499 :
3500 : for (node, child_id) in detach_locations {
3501 : if !node.is_available() {
3502 : // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
3503 : // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
3504 : // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
3505 : // them from the node.
3506 : tracing::warn!("Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated.");
3507 : continue;
3508 : }
3509 :
3510 : // Detach the remote child. If the pageserver split API call is still in progress, this call will get
3511 : // a 503 and retry, up to our limit.
3512 : tracing::info!("Detaching {child_id} on {node}...");
3513 : match node
3514 : .with_client_retries(
3515 0 : |client| async move {
3516 0 : let config = LocationConfig {
3517 0 : mode: LocationConfigMode::Detached,
3518 0 : generation: None,
3519 0 : secondary_conf: None,
3520 0 : shard_number: child_id.shard_number.0,
3521 0 : shard_count: child_id.shard_count.literal(),
3522 0 : // Stripe size and tenant config don't matter when detaching
3523 0 : shard_stripe_size: 0,
3524 0 : tenant_conf: TenantConfig::default(),
3525 0 : };
3526 0 :
3527 0 : client.location_config(child_id, config, None, false).await
3528 0 : },
3529 : &self.config.jwt_token,
3530 : 1,
3531 : 10,
3532 : Duration::from_secs(5),
3533 : &self.cancel,
3534 : )
3535 : .await
3536 : {
3537 : Some(Ok(_)) => {}
3538 : Some(Err(e)) => {
3539 : // We failed to communicate with the remote node. This is problematic: we may be
3540 : // leaving it with a rogue child shard.
3541 : tracing::warn!(
3542 : "Failed to detach child {child_id} from node {node} during abort"
3543 : );
3544 : return Err(e.into());
3545 : }
3546 : None => {
3547 : // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
3548 : // clean up on restart. The node going offline requires a retry.
3549 : return Err(TenantShardSplitAbortError::Unavailable);
3550 : }
3551 : };
3552 : }
3553 :
3554 : tracing::info!("Successfully aborted split");
3555 : Ok(())
3556 : }
3557 :
3558 : /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
3559 : /// of the tenant map to reflect the child shards that exist after the split.
3560 0 : fn tenant_shard_split_commit_inmem(
3561 0 : &self,
3562 0 : tenant_id: TenantId,
3563 0 : new_shard_count: ShardCount,
3564 0 : new_stripe_size: Option<ShardStripeSize>,
3565 0 : ) -> (
3566 0 : TenantShardSplitResponse,
3567 0 : Vec<(TenantShardId, NodeId, ShardStripeSize)>,
3568 0 : Vec<ReconcilerWaiter>,
3569 0 : ) {
3570 0 : let mut response = TenantShardSplitResponse {
3571 0 : new_shards: Vec::new(),
3572 0 : };
3573 0 : let mut child_locations = Vec::new();
3574 0 : let mut waiters = Vec::new();
3575 0 :
3576 0 : {
3577 0 : let mut locked = self.inner.write().unwrap();
3578 0 :
3579 0 : let parent_ids = locked
3580 0 : .tenants
3581 0 : .range(TenantShardId::tenant_range(tenant_id))
3582 0 : .map(|(shard_id, _)| *shard_id)
3583 0 : .collect::<Vec<_>>();
3584 0 :
3585 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3586 0 : for parent_id in parent_ids {
3587 0 : let child_ids = parent_id.split(new_shard_count);
3588 :
3589 0 : let (pageserver, generation, policy, parent_ident, config) = {
3590 0 : let mut old_state = tenants
3591 0 : .remove(&parent_id)
3592 0 : .expect("It was present, we just split it");
3593 0 :
3594 0 : // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
3595 0 : // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
3596 0 : // nothing else can clear this.
3597 0 : assert!(matches!(old_state.splitting, SplitState::Splitting));
3598 :
3599 0 : let old_attached = old_state.intent.get_attached().unwrap();
3600 0 : old_state.intent.clear(scheduler);
3601 0 : let generation = old_state.generation.expect("Shard must have been attached");
3602 0 : (
3603 0 : old_attached,
3604 0 : generation,
3605 0 : old_state.policy,
3606 0 : old_state.shard,
3607 0 : old_state.config,
3608 0 : )
3609 0 : };
3610 0 :
3611 0 : let mut schedule_context = ScheduleContext::default();
3612 0 : for child in child_ids {
3613 0 : let mut child_shard = parent_ident;
3614 0 : child_shard.number = child.shard_number;
3615 0 : child_shard.count = child.shard_count;
3616 0 : if let Some(stripe_size) = new_stripe_size {
3617 0 : child_shard.stripe_size = stripe_size;
3618 0 : }
3619 :
3620 0 : let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
3621 0 : child_observed.insert(
3622 0 : pageserver,
3623 0 : ObservedStateLocation {
3624 0 : conf: Some(attached_location_conf(
3625 0 : generation,
3626 0 : &child_shard,
3627 0 : &config,
3628 0 : &policy,
3629 0 : )),
3630 0 : },
3631 0 : );
3632 0 :
3633 0 : let mut child_state = TenantShard::new(child, child_shard, policy.clone());
3634 0 : child_state.intent = IntentState::single(scheduler, Some(pageserver));
3635 0 : child_state.observed = ObservedState {
3636 0 : locations: child_observed,
3637 0 : };
3638 0 : child_state.generation = Some(generation);
3639 0 : child_state.config = config.clone();
3640 0 :
3641 0 : // The child's TenantShard::splitting is intentionally left at the default value of Idle,
3642 0 : // as at this point in the split process we have succeeded and this part is infallible:
3643 0 : // we will never need to do any special recovery from this state.
3644 0 :
3645 0 : child_locations.push((child, pageserver, child_shard.stripe_size));
3646 :
3647 0 : if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
3648 : // This is not fatal, because we've implicitly already got an attached
3649 : // location for the child shard. Failure here just means we couldn't
3650 : // find a secondary (e.g. because cluster is overloaded).
3651 0 : tracing::warn!("Failed to schedule child shard {child}: {e}");
3652 0 : }
3653 : // In the background, attach secondary locations for the new shards
3654 0 : if let Some(waiter) = self.maybe_reconcile_shard(&mut child_state, nodes) {
3655 0 : waiters.push(waiter);
3656 0 : }
3657 :
3658 0 : tenants.insert(child, child_state);
3659 0 : response.new_shards.push(child);
3660 : }
3661 : }
3662 0 : (response, child_locations, waiters)
3663 0 : }
3664 0 : }
3665 :
3666 0 : async fn tenant_shard_split_start_secondaries(
3667 0 : &self,
3668 0 : tenant_id: TenantId,
3669 0 : waiters: Vec<ReconcilerWaiter>,
3670 0 : ) {
3671 : // Wait for initial reconcile of child shards, this creates the secondary locations
3672 0 : if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
3673 : // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
3674 : // their secondaries couldn't be attached.
3675 0 : tracing::warn!("Failed to reconcile after split: {e}");
3676 0 : return;
3677 0 : }
3678 :
3679 : // Take the state lock to discover the attached & secondary intents for all shards
3680 0 : let (attached, secondary) = {
3681 0 : let locked = self.inner.read().unwrap();
3682 0 : let mut attached = Vec::new();
3683 0 : let mut secondary = Vec::new();
3684 :
3685 0 : for (tenant_shard_id, shard) in
3686 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3687 : {
3688 0 : let Some(node_id) = shard.intent.get_attached() else {
3689 : // Unexpected. Race with a PlacementPolicy change?
3690 0 : tracing::warn!(
3691 0 : "No attached node on {tenant_shard_id} immediately after shard split!"
3692 : );
3693 0 : continue;
3694 : };
3695 :
3696 0 : let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
3697 : // No secondary location. Nothing for us to do.
3698 0 : continue;
3699 : };
3700 :
3701 0 : let attached_node = locked
3702 0 : .nodes
3703 0 : .get(node_id)
3704 0 : .expect("Pageservers may not be deleted while referenced");
3705 0 :
3706 0 : let secondary_node = locked
3707 0 : .nodes
3708 0 : .get(secondary_node_id)
3709 0 : .expect("Pageservers may not be deleted while referenced");
3710 0 :
3711 0 : attached.push((*tenant_shard_id, attached_node.clone()));
3712 0 : secondary.push((*tenant_shard_id, secondary_node.clone()));
3713 : }
3714 0 : (attached, secondary)
3715 0 : };
3716 0 :
3717 0 : if secondary.is_empty() {
3718 : // No secondary locations; nothing for us to do
3719 0 : return;
3720 0 : }
3721 :
3722 0 : for result in self
3723 0 : .tenant_for_shards_api(
3724 0 : attached,
3725 0 : |tenant_shard_id, client| async move {
3726 0 : client.tenant_heatmap_upload(tenant_shard_id).await
3727 0 : },
3728 0 : 1,
3729 0 : 1,
3730 0 : SHORT_RECONCILE_TIMEOUT,
3731 0 : &self.cancel,
3732 0 : )
3733 0 : .await
3734 : {
3735 0 : if let Err(e) = result {
3736 0 : tracing::warn!("Error calling heatmap upload after shard split: {e}");
3737 0 : return;
3738 0 : }
3739 : }
3740 :
3741 0 : for result in self
3742 0 : .tenant_for_shards_api(
3743 0 : secondary,
3744 0 : |tenant_shard_id, client| async move {
3745 0 : client
3746 0 : .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
3747 0 : .await
3748 0 : },
3749 0 : 1,
3750 0 : 1,
3751 0 : SHORT_RECONCILE_TIMEOUT,
3752 0 : &self.cancel,
3753 0 : )
3754 0 : .await
3755 : {
3756 0 : if let Err(e) = result {
3757 0 : tracing::warn!("Error calling secondary download after shard split: {e}");
3758 0 : return;
3759 0 : }
3760 : }
3761 0 : }
3762 :
3763 0 : pub(crate) async fn tenant_shard_split(
3764 0 : &self,
3765 0 : tenant_id: TenantId,
3766 0 : split_req: TenantShardSplitRequest,
3767 0 : ) -> Result<TenantShardSplitResponse, ApiError> {
3768 : // TODO: return 503 if we get stuck waiting for this lock
3769 : // (issue https://github.com/neondatabase/neon/issues/7108)
3770 0 : let _tenant_lock = trace_exclusive_lock(
3771 0 : &self.tenant_op_locks,
3772 0 : tenant_id,
3773 0 : TenantOperations::ShardSplit,
3774 0 : )
3775 0 : .await;
3776 :
3777 0 : let new_shard_count = ShardCount::new(split_req.new_shard_count);
3778 0 : let new_stripe_size = split_req.new_stripe_size;
3779 :
3780 : // Validate the request and construct parameters. This phase is fallible, but does not require
3781 : // rollback on errors, as it does no I/O and mutates no state.
3782 0 : let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
3783 0 : ShardSplitAction::NoOp(resp) => return Ok(resp),
3784 0 : ShardSplitAction::Split(params) => params,
3785 : };
3786 :
3787 : // Execute this split: this phase mutates state and does remote I/O on pageservers. If it fails,
3788 : // we must roll back.
3789 0 : let r = self
3790 0 : .do_tenant_shard_split(tenant_id, shard_split_params)
3791 0 : .await;
3792 :
3793 0 : let (response, waiters) = match r {
3794 0 : Ok(r) => r,
3795 0 : Err(e) => {
3796 0 : // Split might be part-done, we must do work to abort it.
3797 0 : tracing::warn!("Enqueuing background abort of split on {tenant_id}");
3798 0 : self.abort_tx
3799 0 : .send(TenantShardSplitAbort {
3800 0 : tenant_id,
3801 0 : new_shard_count,
3802 0 : new_stripe_size,
3803 0 : _tenant_lock,
3804 0 : })
3805 0 : // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
3806 0 : .ok();
3807 0 : return Err(e);
3808 : }
3809 : };
3810 :
3811 : // The split is now complete. As an optimization, we will trigger all the child shards to upload
3812 : // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
3813 : // for the background heatmap/download interval before secondaries get warm enough to migrate shards
3814 : // in [`Self::optimize_all`]
3815 0 : self.tenant_shard_split_start_secondaries(tenant_id, waiters)
3816 0 : .await;
3817 0 : Ok(response)
3818 0 : }
3819 :
3820 0 : fn prepare_tenant_shard_split(
3821 0 : &self,
3822 0 : tenant_id: TenantId,
3823 0 : split_req: TenantShardSplitRequest,
3824 0 : ) -> Result<ShardSplitAction, ApiError> {
3825 0 : fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
3826 0 : anyhow::anyhow!("failpoint")
3827 0 : )));
3828 :
3829 0 : let mut policy = None;
3830 0 : let mut config = None;
3831 0 : let mut shard_ident = None;
3832 : // Validate input, and calculate which shards we will create
3833 0 : let (old_shard_count, targets) =
3834 : {
3835 0 : let locked = self.inner.read().unwrap();
3836 0 :
3837 0 : let pageservers = locked.nodes.clone();
3838 0 :
3839 0 : let mut targets = Vec::new();
3840 0 :
3841 0 : // In case this is a retry, count how many already-split shards we found
3842 0 : let mut children_found = Vec::new();
3843 0 : let mut old_shard_count = None;
3844 :
3845 0 : for (tenant_shard_id, shard) in
3846 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3847 : {
3848 0 : match shard.shard.count.count().cmp(&split_req.new_shard_count) {
3849 : Ordering::Equal => {
3850 : // Already split this
3851 0 : children_found.push(*tenant_shard_id);
3852 0 : continue;
3853 : }
3854 : Ordering::Greater => {
3855 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3856 0 : "Requested count {} but already have shards at count {}",
3857 0 : split_req.new_shard_count,
3858 0 : shard.shard.count.count()
3859 0 : )));
3860 : }
3861 0 : Ordering::Less => {
3862 0 : // Fall through: this shard has lower count than requested,
3863 0 : // is a candidate for splitting.
3864 0 : }
3865 0 : }
3866 0 :
3867 0 : match old_shard_count {
3868 0 : None => old_shard_count = Some(shard.shard.count),
3869 0 : Some(old_shard_count) => {
3870 0 : if old_shard_count != shard.shard.count {
3871 : // We may hit this case if a caller asked for two splits to
3872 : // different sizes, before the first one is complete.
3873 : // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
3874 : // of shard_count=1 and shard_count=2 shards in the map.
3875 0 : return Err(ApiError::Conflict(
3876 0 : "Cannot split, currently mid-split".to_string(),
3877 0 : ));
3878 0 : }
3879 : }
3880 : }
3881 0 : if policy.is_none() {
3882 0 : policy = Some(shard.policy.clone());
3883 0 : }
3884 0 : if shard_ident.is_none() {
3885 0 : shard_ident = Some(shard.shard);
3886 0 : }
3887 0 : if config.is_none() {
3888 0 : config = Some(shard.config.clone());
3889 0 : }
3890 :
3891 0 : if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
3892 0 : tracing::info!(
3893 0 : "Tenant shard {} already has shard count {}",
3894 : tenant_shard_id,
3895 : split_req.new_shard_count
3896 : );
3897 0 : continue;
3898 0 : }
3899 :
3900 0 : let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
3901 0 : anyhow::anyhow!("Cannot split a tenant that is not attached"),
3902 0 : ))?;
3903 :
3904 0 : let node = pageservers
3905 0 : .get(&node_id)
3906 0 : .expect("Pageservers may not be deleted while referenced");
3907 0 :
3908 0 : targets.push(ShardSplitTarget {
3909 0 : parent_id: *tenant_shard_id,
3910 0 : node: node.clone(),
3911 0 : child_ids: tenant_shard_id
3912 0 : .split(ShardCount::new(split_req.new_shard_count)),
3913 0 : });
3914 : }
3915 :
3916 0 : if targets.is_empty() {
3917 0 : if children_found.len() == split_req.new_shard_count as usize {
3918 0 : return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
3919 0 : new_shards: children_found,
3920 0 : }));
3921 : } else {
3922 : // No shards found to split, and no existing children found: the
3923 : // tenant doesn't exist at all.
3924 0 : return Err(ApiError::NotFound(
3925 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
3926 0 : ));
3927 : }
3928 0 : }
3929 0 :
3930 0 : (old_shard_count, targets)
3931 0 : };
3932 0 :
3933 0 : // unwrap safety: we would have returned above if we didn't find at least one shard to split
3934 0 : let old_shard_count = old_shard_count.unwrap();
3935 0 : let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
3936 : // This ShardIdentity will be used as the template for all children, so this implicitly
3937 : // applies the new stripe size to the children.
3938 0 : let mut shard_ident = shard_ident.unwrap();
3939 0 : if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
3940 0 : return Err(ApiError::BadRequest(anyhow::anyhow!("Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards", shard_ident.stripe_size)));
3941 0 : }
3942 0 :
3943 0 : shard_ident.stripe_size = new_stripe_size;
3944 0 : tracing::info!("applied stripe size {}", shard_ident.stripe_size.0);
3945 0 : shard_ident
3946 : } else {
3947 0 : shard_ident.unwrap()
3948 : };
3949 0 : let policy = policy.unwrap();
3950 0 : let config = config.unwrap();
3951 0 :
3952 0 : Ok(ShardSplitAction::Split(ShardSplitParams {
3953 0 : old_shard_count,
3954 0 : new_shard_count: ShardCount::new(split_req.new_shard_count),
3955 0 : new_stripe_size: split_req.new_stripe_size,
3956 0 : targets,
3957 0 : policy,
3958 0 : config,
3959 0 : shard_ident,
3960 0 : }))
3961 0 : }
3962 :
3963 0 : async fn do_tenant_shard_split(
3964 0 : &self,
3965 0 : tenant_id: TenantId,
3966 0 : params: ShardSplitParams,
3967 0 : ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
3968 0 : // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
3969 0 : // request could occur here, deleting or mutating the tenant. begin_shard_split checks that the
3970 0 : // parent shards exist as expected, but it would be neater to do the above pre-checks within the
3971 0 : // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
3972 0 : // (https://github.com/neondatabase/neon/issues/6676)
3973 0 :
3974 0 : let ShardSplitParams {
3975 0 : old_shard_count,
3976 0 : new_shard_count,
3977 0 : new_stripe_size,
3978 0 : mut targets,
3979 0 : policy,
3980 0 : config,
3981 0 : shard_ident,
3982 0 : } = params;
3983 :
3984 : // Drop any secondary locations: pageservers do not support splitting these, and in any case the
3985 : // end-state for a split tenant will usually be to have secondary locations on different nodes.
3986 : // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
3987 : // at the time of split.
3988 0 : let waiters = {
3989 0 : let mut locked = self.inner.write().unwrap();
3990 0 : let mut waiters = Vec::new();
3991 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3992 0 : for target in &mut targets {
3993 0 : let Some(shard) = tenants.get_mut(&target.parent_id) else {
3994 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
3995 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3996 0 : "Shard {} not found",
3997 0 : target.parent_id
3998 0 : )));
3999 : };
4000 :
4001 0 : if shard.intent.get_attached() != &Some(target.node.get_id()) {
4002 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
4003 0 : return Err(ApiError::Conflict(format!(
4004 0 : "Shard {} unexpectedly rescheduled during split",
4005 0 : target.parent_id
4006 0 : )));
4007 0 : }
4008 0 :
4009 0 : // Irrespective of PlacementPolicy, clear secondary locations from intent
4010 0 : shard.intent.clear_secondary(scheduler);
4011 :
4012 : // Run Reconciler to execute detach fo secondary locations.
4013 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
4014 0 : waiters.push(waiter);
4015 0 : }
4016 : }
4017 0 : waiters
4018 0 : };
4019 0 : self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
4020 :
4021 : // Before creating any new child shards in memory or on the pageservers, persist them: this
4022 : // enables us to ensure that we will always be able to clean up if something goes wrong. This also
4023 : // acts as the protection against two concurrent attempts to split: one of them will get a database
4024 : // error trying to insert the child shards.
4025 0 : let mut child_tsps = Vec::new();
4026 0 : for target in &targets {
4027 0 : let mut this_child_tsps = Vec::new();
4028 0 : for child in &target.child_ids {
4029 0 : let mut child_shard = shard_ident;
4030 0 : child_shard.number = child.shard_number;
4031 0 : child_shard.count = child.shard_count;
4032 0 :
4033 0 : tracing::info!(
4034 0 : "Create child shard persistence with stripe size {}",
4035 : shard_ident.stripe_size.0
4036 : );
4037 :
4038 0 : this_child_tsps.push(TenantShardPersistence {
4039 0 : tenant_id: child.tenant_id.to_string(),
4040 0 : shard_number: child.shard_number.0 as i32,
4041 0 : shard_count: child.shard_count.literal() as i32,
4042 0 : shard_stripe_size: shard_ident.stripe_size.0 as i32,
4043 0 : // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
4044 0 : // populate the correct generation as part of its transaction, to protect us
4045 0 : // against racing with changes in the state of the parent.
4046 0 : generation: None,
4047 0 : generation_pageserver: Some(target.node.get_id().0 as i64),
4048 0 : placement_policy: serde_json::to_string(&policy).unwrap(),
4049 0 : config: serde_json::to_string(&config).unwrap(),
4050 0 : splitting: SplitState::Splitting,
4051 0 :
4052 0 : // Scheduling policies do not carry through to children
4053 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
4054 0 : .unwrap(),
4055 0 : });
4056 : }
4057 :
4058 0 : child_tsps.push((target.parent_id, this_child_tsps));
4059 : }
4060 :
4061 0 : if let Err(e) = self
4062 0 : .persistence
4063 0 : .begin_shard_split(old_shard_count, tenant_id, child_tsps)
4064 0 : .await
4065 : {
4066 0 : match e {
4067 : DatabaseError::Query(diesel::result::Error::DatabaseError(
4068 : DatabaseErrorKind::UniqueViolation,
4069 : _,
4070 : )) => {
4071 : // Inserting a child shard violated a unique constraint: we raced with another call to
4072 : // this function
4073 0 : tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
4074 0 : return Err(ApiError::Conflict("Tenant is already splitting".into()));
4075 : }
4076 0 : _ => return Err(ApiError::InternalServerError(e.into())),
4077 : }
4078 0 : }
4079 0 : fail::fail_point!("shard-split-post-begin", |_| Err(
4080 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
4081 0 : ));
4082 :
4083 : // Now that I have persisted the splitting state, apply it in-memory. This is infallible, so
4084 : // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
4085 : // is not set in memory, then it was not persisted.
4086 : {
4087 0 : let mut locked = self.inner.write().unwrap();
4088 0 : for target in &targets {
4089 0 : if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
4090 0 : parent_shard.splitting = SplitState::Splitting;
4091 0 : // Put the observed state to None, to reflect that it is indeterminate once we start the
4092 0 : // split operation.
4093 0 : parent_shard
4094 0 : .observed
4095 0 : .locations
4096 0 : .insert(target.node.get_id(), ObservedStateLocation { conf: None });
4097 0 : }
4098 : }
4099 : }
4100 :
4101 : // TODO: issue split calls concurrently (this only matters once we're splitting
4102 : // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
4103 :
4104 0 : for target in &targets {
4105 : let ShardSplitTarget {
4106 0 : parent_id,
4107 0 : node,
4108 0 : child_ids,
4109 0 : } = target;
4110 0 : let client = PageserverClient::new(
4111 0 : node.get_id(),
4112 0 : node.base_url(),
4113 0 : self.config.jwt_token.as_deref(),
4114 0 : );
4115 0 : let response = client
4116 0 : .tenant_shard_split(
4117 0 : *parent_id,
4118 0 : TenantShardSplitRequest {
4119 0 : new_shard_count: new_shard_count.literal(),
4120 0 : new_stripe_size,
4121 0 : },
4122 0 : )
4123 0 : .await
4124 0 : .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?;
4125 :
4126 0 : fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
4127 0 : "failpoint".to_string()
4128 0 : )));
4129 :
4130 0 : failpoint_support::sleep_millis_async!("shard-split-post-remote-sleep", &self.cancel);
4131 :
4132 0 : tracing::info!(
4133 0 : "Split {} into {}",
4134 0 : parent_id,
4135 0 : response
4136 0 : .new_shards
4137 0 : .iter()
4138 0 : .map(|s| format!("{:?}", s))
4139 0 : .collect::<Vec<_>>()
4140 0 : .join(",")
4141 : );
4142 :
4143 0 : if &response.new_shards != child_ids {
4144 : // This should never happen: the pageserver should agree with us on how shard splits work.
4145 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4146 0 : "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
4147 0 : parent_id,
4148 0 : response.new_shards,
4149 0 : child_ids
4150 0 : )));
4151 0 : }
4152 : }
4153 :
4154 : // TODO: if the pageserver restarted concurrently with our split API call,
4155 : // the actual generation of the child shard might differ from the generation
4156 : // we expect it to have. In order for our in-database generation to end up
4157 : // correct, we should carry the child generation back in the response and apply it here
4158 : // in complete_shard_split (and apply the correct generation in memory)
4159 : // (or, we can carry generation in the request and reject the request if
4160 : // it doesn't match, but that requires more retry logic on this side)
4161 :
4162 0 : self.persistence
4163 0 : .complete_shard_split(tenant_id, old_shard_count)
4164 0 : .await?;
4165 :
4166 0 : fail::fail_point!("shard-split-post-complete", |_| Err(
4167 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
4168 0 : ));
4169 :
4170 : // Replace all the shards we just split with their children: this phase is infallible.
4171 0 : let (response, child_locations, waiters) =
4172 0 : self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
4173 0 :
4174 0 : // Send compute notifications for all the new shards
4175 0 : let mut failed_notifications = Vec::new();
4176 0 : for (child_id, child_ps, stripe_size) in child_locations {
4177 0 : if let Err(e) = self
4178 0 : .compute_hook
4179 0 : .notify(child_id, child_ps, stripe_size, &self.cancel)
4180 0 : .await
4181 : {
4182 0 : tracing::warn!("Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
4183 : child_id, child_ps);
4184 0 : failed_notifications.push(child_id);
4185 0 : }
4186 : }
4187 :
4188 : // If we failed any compute notifications, make a note to retry later.
4189 0 : if !failed_notifications.is_empty() {
4190 0 : let mut locked = self.inner.write().unwrap();
4191 0 : for failed in failed_notifications {
4192 0 : if let Some(shard) = locked.tenants.get_mut(&failed) {
4193 0 : shard.pending_compute_notification = true;
4194 0 : }
4195 : }
4196 0 : }
4197 :
4198 0 : Ok((response, waiters))
4199 0 : }
4200 :
4201 0 : pub(crate) async fn tenant_shard_migrate(
4202 0 : &self,
4203 0 : tenant_shard_id: TenantShardId,
4204 0 : migrate_req: TenantShardMigrateRequest,
4205 0 : ) -> Result<TenantShardMigrateResponse, ApiError> {
4206 0 : let waiter = {
4207 0 : let mut locked = self.inner.write().unwrap();
4208 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4209 :
4210 0 : let Some(node) = nodes.get(&migrate_req.node_id) else {
4211 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
4212 0 : "Node {} not found",
4213 0 : migrate_req.node_id
4214 0 : )));
4215 : };
4216 :
4217 0 : if !node.is_available() {
4218 : // Warn but proceed: the caller may intend to manually adjust the placement of
4219 : // a shard even if the node is down, e.g. if intervening during an incident.
4220 0 : tracing::warn!("Migrating to unavailable node {node}");
4221 0 : }
4222 :
4223 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
4224 0 : return Err(ApiError::NotFound(
4225 0 : anyhow::anyhow!("Tenant shard not found").into(),
4226 0 : ));
4227 : };
4228 :
4229 0 : if shard.intent.get_attached() == &Some(migrate_req.node_id) {
4230 : // No-op case: we will still proceed to wait for reconciliation in case it is
4231 : // incomplete from an earlier update to the intent.
4232 0 : tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
4233 : } else {
4234 0 : let old_attached = *shard.intent.get_attached();
4235 0 :
4236 0 : match shard.policy {
4237 0 : PlacementPolicy::Attached(n) => {
4238 0 : // If our new attached node was a secondary, it no longer should be.
4239 0 : shard.intent.remove_secondary(scheduler, migrate_req.node_id);
4240 :
4241 : // If we were already attached to something, demote that to a secondary
4242 0 : if let Some(old_attached) = old_attached {
4243 0 : if n > 0 {
4244 : // Remove other secondaries to make room for the location we'll demote
4245 0 : while shard.intent.get_secondary().len() >= n {
4246 0 : shard.intent.pop_secondary(scheduler);
4247 0 : }
4248 :
4249 0 : shard.intent.push_secondary(scheduler, old_attached);
4250 0 : }
4251 0 : }
4252 :
4253 0 : shard.intent.set_attached(scheduler, Some(migrate_req.node_id));
4254 : }
4255 0 : PlacementPolicy::Secondary => {
4256 0 : shard.intent.clear(scheduler);
4257 0 : shard.intent.push_secondary(scheduler, migrate_req.node_id);
4258 0 : }
4259 : PlacementPolicy::Detached => {
4260 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
4261 0 : "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
4262 0 : )))
4263 : }
4264 : }
4265 :
4266 0 : tracing::info!("Migrating: new intent {:?}", shard.intent);
4267 0 : shard.sequence = shard.sequence.next();
4268 : }
4269 :
4270 0 : self.maybe_reconcile_shard(shard, nodes)
4271 : };
4272 :
4273 0 : if let Some(waiter) = waiter {
4274 0 : waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
4275 : } else {
4276 0 : tracing::info!("Migration is a no-op");
4277 : }
4278 :
4279 0 : Ok(TenantShardMigrateResponse {})
4280 0 : }
4281 :
4282 : /// This is for debug/support only: we simply drop all state for a tenant, without
4283 : /// detaching or deleting it on pageservers.
4284 0 : pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
4285 0 : self.persistence.delete_tenant(tenant_id).await?;
4286 :
4287 0 : let mut locked = self.inner.write().unwrap();
4288 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
4289 0 : let mut shards = Vec::new();
4290 0 : for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
4291 0 : shards.push(*tenant_shard_id);
4292 0 : }
4293 :
4294 0 : for shard_id in shards {
4295 0 : if let Some(mut shard) = tenants.remove(&shard_id) {
4296 0 : shard.intent.clear(scheduler);
4297 0 : }
4298 : }
4299 :
4300 0 : Ok(())
4301 0 : }
4302 :
4303 : /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
4304 : /// tenant with a very high generation number so that it will see the existing data.
4305 0 : pub(crate) async fn tenant_import(
4306 0 : &self,
4307 0 : tenant_id: TenantId,
4308 0 : ) -> Result<TenantCreateResponse, ApiError> {
4309 0 : // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
4310 0 : let maybe_node = {
4311 0 : self.inner
4312 0 : .read()
4313 0 : .unwrap()
4314 0 : .nodes
4315 0 : .values()
4316 0 : .find(|n| n.is_available())
4317 0 : .cloned()
4318 : };
4319 0 : let Some(node) = maybe_node else {
4320 0 : return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
4321 : };
4322 :
4323 0 : let client = PageserverClient::new(
4324 0 : node.get_id(),
4325 0 : node.base_url(),
4326 0 : self.config.jwt_token.as_deref(),
4327 0 : );
4328 :
4329 0 : let scan_result = client
4330 0 : .tenant_scan_remote_storage(tenant_id)
4331 0 : .await
4332 0 : .map_err(|e| passthrough_api_error(&node, e))?;
4333 :
4334 : // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
4335 0 : let Some(shard_count) = scan_result
4336 0 : .shards
4337 0 : .iter()
4338 0 : .map(|s| s.tenant_shard_id.shard_count)
4339 0 : .max()
4340 : else {
4341 0 : return Err(ApiError::NotFound(
4342 0 : anyhow::anyhow!("No shards found").into(),
4343 0 : ));
4344 : };
4345 :
4346 : // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
4347 : // to
4348 0 : let generation = scan_result
4349 0 : .shards
4350 0 : .iter()
4351 0 : .map(|s| s.generation)
4352 0 : .max()
4353 0 : .expect("We already validated >0 shards");
4354 0 :
4355 0 : // FIXME: we have no way to recover the shard stripe size from contents of remote storage: this will
4356 0 : // only work if they were using the default stripe size.
4357 0 : let stripe_size = ShardParameters::DEFAULT_STRIPE_SIZE;
4358 :
4359 0 : let (response, waiters) = self
4360 0 : .do_tenant_create(TenantCreateRequest {
4361 0 : new_tenant_id: TenantShardId::unsharded(tenant_id),
4362 0 : generation,
4363 0 :
4364 0 : shard_parameters: ShardParameters {
4365 0 : count: shard_count,
4366 0 : stripe_size,
4367 0 : },
4368 0 : placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
4369 0 :
4370 0 : // There is no way to know what the tenant's config was: revert to defaults
4371 0 : //
4372 0 : // TODO: remove `switch_aux_file_policy` once we finish auxv2 migration
4373 0 : //
4374 0 : // we write to both v1+v2 storage, so that the test case can use either storage format for testing
4375 0 : config: TenantConfig {
4376 0 : switch_aux_file_policy: Some(models::AuxFilePolicy::CrossValidation),
4377 0 : ..TenantConfig::default()
4378 0 : },
4379 0 : })
4380 0 : .await?;
4381 :
4382 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
4383 : // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
4384 : // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
4385 : // reconcile, as reconciliation includes notifying compute.
4386 0 : tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
4387 0 : }
4388 :
4389 0 : Ok(response)
4390 0 : }
4391 :
4392 : /// For debug/support: a full JSON dump of TenantShards. Returns a response so that
4393 : /// we don't have to make TenantShard clonable in the return path.
4394 0 : pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
4395 0 : let serialized = {
4396 0 : let locked = self.inner.read().unwrap();
4397 0 : let result = locked.tenants.values().collect::<Vec<_>>();
4398 0 : serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
4399 : };
4400 :
4401 0 : hyper::Response::builder()
4402 0 : .status(hyper::StatusCode::OK)
4403 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
4404 0 : .body(hyper::Body::from(serialized))
4405 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
4406 0 : }
4407 :
4408 : /// Check the consistency of in-memory state vs. persistent state, and check that the
4409 : /// scheduler's statistics are up to date.
4410 : ///
4411 : /// These consistency checks expect an **idle** system. If changes are going on while
4412 : /// we run, then we can falsely indicate a consistency issue. This is sufficient for end-of-test
4413 : /// checks, but not suitable for running continuously in the background in the field.
4414 0 : pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
4415 0 : let (mut expect_nodes, mut expect_shards) = {
4416 0 : let locked = self.inner.read().unwrap();
4417 0 :
4418 0 : locked
4419 0 : .scheduler
4420 0 : .consistency_check(locked.nodes.values(), locked.tenants.values())
4421 0 : .context("Scheduler checks")
4422 0 : .map_err(ApiError::InternalServerError)?;
4423 :
4424 0 : let expect_nodes = locked
4425 0 : .nodes
4426 0 : .values()
4427 0 : .map(|n| n.to_persistent())
4428 0 : .collect::<Vec<_>>();
4429 0 :
4430 0 : let expect_shards = locked
4431 0 : .tenants
4432 0 : .values()
4433 0 : .map(|t| t.to_persistent())
4434 0 : .collect::<Vec<_>>();
4435 :
4436 : // This method can only validate the state of an idle system: if a reconcile is in
4437 : // progress, fail out early to avoid giving false errors on state that won't match
4438 : // between database and memory under a ReconcileResult is processed.
4439 0 : for t in locked.tenants.values() {
4440 0 : if t.reconciler.is_some() {
4441 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4442 0 : "Shard {} reconciliation in progress",
4443 0 : t.tenant_shard_id
4444 0 : )));
4445 0 : }
4446 : }
4447 :
4448 0 : (expect_nodes, expect_shards)
4449 : };
4450 :
4451 0 : let mut nodes = self.persistence.list_nodes().await?;
4452 0 : expect_nodes.sort_by_key(|n| n.node_id);
4453 0 : nodes.sort_by_key(|n| n.node_id);
4454 0 :
4455 0 : if nodes != expect_nodes {
4456 0 : tracing::error!("Consistency check failed on nodes.");
4457 0 : tracing::error!(
4458 0 : "Nodes in memory: {}",
4459 0 : serde_json::to_string(&expect_nodes)
4460 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4461 : );
4462 0 : tracing::error!(
4463 0 : "Nodes in database: {}",
4464 0 : serde_json::to_string(&nodes)
4465 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4466 : );
4467 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4468 0 : "Node consistency failure"
4469 0 : )));
4470 0 : }
4471 :
4472 0 : let mut shards = self.persistence.list_tenant_shards().await?;
4473 0 : shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
4474 0 : expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
4475 0 :
4476 0 : if shards != expect_shards {
4477 0 : tracing::error!("Consistency check failed on shards.");
4478 0 : tracing::error!(
4479 0 : "Shards in memory: {}",
4480 0 : serde_json::to_string(&expect_shards)
4481 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4482 : );
4483 0 : tracing::error!(
4484 0 : "Shards in database: {}",
4485 0 : serde_json::to_string(&shards)
4486 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4487 : );
4488 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4489 0 : "Shard consistency failure"
4490 0 : )));
4491 0 : }
4492 0 :
4493 0 : Ok(())
4494 0 : }
4495 :
4496 : /// For debug/support: a JSON dump of the [`Scheduler`]. Returns a response so that
4497 : /// we don't have to make TenantShard clonable in the return path.
4498 0 : pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
4499 0 : let serialized = {
4500 0 : let locked = self.inner.read().unwrap();
4501 0 : serde_json::to_string(&locked.scheduler)
4502 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4503 : };
4504 :
4505 0 : hyper::Response::builder()
4506 0 : .status(hyper::StatusCode::OK)
4507 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
4508 0 : .body(hyper::Body::from(serialized))
4509 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
4510 0 : }
4511 :
4512 : /// This is for debug/support only: we simply drop all state for a tenant, without
4513 : /// detaching or deleting it on pageservers. We do not try and re-schedule any
4514 : /// tenants that were on this node.
4515 0 : pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
4516 0 : self.persistence.delete_node(node_id).await?;
4517 :
4518 0 : let mut locked = self.inner.write().unwrap();
4519 :
4520 0 : for shard in locked.tenants.values_mut() {
4521 0 : shard.deref_node(node_id);
4522 0 : shard.observed.locations.remove(&node_id);
4523 0 : }
4524 :
4525 0 : let mut nodes = (*locked.nodes).clone();
4526 0 : nodes.remove(&node_id);
4527 0 : locked.nodes = Arc::new(nodes);
4528 0 :
4529 0 : locked.scheduler.node_remove(node_id);
4530 0 :
4531 0 : Ok(())
4532 0 : }
4533 :
4534 : /// If a node has any work on it, it will be rescheduled: this is "clean" in the sense
4535 : /// that we don't leave any bad state behind in the storage controller, but unclean
4536 : /// in the sense that we are not carefully draining the node.
4537 0 : pub(crate) async fn node_delete(&self, node_id: NodeId) -> Result<(), ApiError> {
4538 0 : let _node_lock =
4539 0 : trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Delete).await;
4540 :
4541 : // 1. Atomically update in-memory state:
4542 : // - set the scheduling state to Pause to make subsequent scheduling ops skip it
4543 : // - update shards' intents to exclude the node, and reschedule any shards whose intents we modified.
4544 : // - drop the node from the main nodes map, so that when running reconciles complete they do not
4545 : // re-insert references to this node into the ObservedState of shards
4546 : // - drop the node from the scheduler
4547 : {
4548 0 : let mut locked = self.inner.write().unwrap();
4549 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4550 0 :
4551 0 : {
4552 0 : let mut nodes_mut = (*nodes).deref().clone();
4553 0 : match nodes_mut.get_mut(&node_id) {
4554 0 : Some(node) => {
4555 0 : // We do not bother setting this in the database, because we're about to delete the row anyway, and
4556 0 : // if we crash it would not be desirable to leave the node paused after a restart.
4557 0 : node.set_scheduling(NodeSchedulingPolicy::Pause);
4558 0 : }
4559 : None => {
4560 0 : tracing::info!(
4561 0 : "Node not found: presuming this is a retry and returning success"
4562 : );
4563 0 : return Ok(());
4564 : }
4565 : }
4566 :
4567 0 : *nodes = Arc::new(nodes_mut);
4568 : }
4569 :
4570 0 : for (tenant_shard_id, shard) in tenants {
4571 0 : if shard.deref_node(node_id) {
4572 : // FIXME: we need to build a ScheduleContext that reflects this shard's peers, otherwise
4573 : // it won't properly do anti-affinity.
4574 0 : let mut schedule_context = ScheduleContext::default();
4575 :
4576 0 : if let Err(e) = shard.schedule(scheduler, &mut schedule_context) {
4577 : // TODO: implement force flag to remove a node even if we can't reschedule
4578 : // a tenant
4579 0 : tracing::error!("Refusing to delete node, shard {tenant_shard_id} can't be rescheduled: {e}");
4580 0 : return Err(e.into());
4581 : } else {
4582 0 : tracing::info!(
4583 0 : "Rescheduled shard {tenant_shard_id} away from node during deletion"
4584 : )
4585 : }
4586 :
4587 0 : self.maybe_reconcile_shard(shard, nodes);
4588 0 : }
4589 :
4590 : // Here we remove an existing observed location for the node we're removing, and it will
4591 : // not be re-added by a reconciler's completion because we filter out removed nodes in
4592 : // process_result.
4593 : //
4594 : // Note that we update the shard's observed state _after_ calling maybe_reconcile_shard: that
4595 : // means any reconciles we spawned will know about the node we're deleting, enabling them
4596 : // to do live migrations if it's still online.
4597 0 : shard.observed.locations.remove(&node_id);
4598 : }
4599 :
4600 0 : scheduler.node_remove(node_id);
4601 0 :
4602 0 : {
4603 0 : let mut nodes_mut = (**nodes).clone();
4604 0 : nodes_mut.remove(&node_id);
4605 0 : *nodes = Arc::new(nodes_mut);
4606 0 : }
4607 0 : }
4608 0 :
4609 0 : // Note: some `generation_pageserver` columns on tenant shards in the database may still refer to
4610 0 : // the removed node, as this column means "The pageserver to which this generation was issued", and
4611 0 : // their generations won't get updated until the reconcilers moving them away from this node complete.
4612 0 : // That is safe because in Service::spawn we only use generation_pageserver if it refers to a node
4613 0 : // that exists.
4614 0 :
4615 0 : // 2. Actually delete the node from the database and from in-memory state
4616 0 : tracing::info!("Deleting node from database");
4617 0 : self.persistence.delete_node(node_id).await?;
4618 :
4619 0 : Ok(())
4620 0 : }
4621 :
4622 0 : pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
4623 0 : let nodes = {
4624 0 : self.inner
4625 0 : .read()
4626 0 : .unwrap()
4627 0 : .nodes
4628 0 : .values()
4629 0 : .cloned()
4630 0 : .collect::<Vec<_>>()
4631 0 : };
4632 0 :
4633 0 : Ok(nodes)
4634 0 : }
4635 :
4636 0 : pub(crate) async fn get_node(&self, node_id: NodeId) -> Result<Node, ApiError> {
4637 0 : self.inner
4638 0 : .read()
4639 0 : .unwrap()
4640 0 : .nodes
4641 0 : .get(&node_id)
4642 0 : .cloned()
4643 0 : .ok_or(ApiError::NotFound(
4644 0 : format!("Node {node_id} not registered").into(),
4645 0 : ))
4646 0 : }
4647 :
4648 0 : pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
4649 0 : self.persistence.get_leader().await
4650 0 : }
4651 :
4652 0 : pub(crate) async fn node_register(
4653 0 : &self,
4654 0 : register_req: NodeRegisterRequest,
4655 0 : ) -> Result<(), ApiError> {
4656 0 : let _node_lock = trace_exclusive_lock(
4657 0 : &self.node_op_locks,
4658 0 : register_req.node_id,
4659 0 : NodeOperations::Register,
4660 0 : )
4661 0 : .await;
4662 :
4663 : {
4664 0 : let locked = self.inner.read().unwrap();
4665 0 : if let Some(node) = locked.nodes.get(®ister_req.node_id) {
4666 : // Note that we do not do a total equality of the struct, because we don't require
4667 : // the availability/scheduling states to agree for a POST to be idempotent.
4668 0 : if node.registration_match(®ister_req) {
4669 0 : tracing::info!(
4670 0 : "Node {} re-registered with matching address",
4671 : register_req.node_id
4672 : );
4673 0 : return Ok(());
4674 : } else {
4675 : // TODO: decide if we want to allow modifying node addresses without removing and re-adding
4676 : // the node. Safest/simplest thing is to refuse it, and usually we deploy with
4677 : // a fixed address through the lifetime of a node.
4678 0 : tracing::warn!(
4679 0 : "Node {} tried to register with different address",
4680 : register_req.node_id
4681 : );
4682 0 : return Err(ApiError::Conflict(
4683 0 : "Node is already registered with different address".to_string(),
4684 0 : ));
4685 : }
4686 0 : }
4687 0 : }
4688 0 :
4689 0 : // We do not require that a node is actually online when registered (it will start life
4690 0 : // with it's availability set to Offline), but we _do_ require that its DNS record exists. We're
4691 0 : // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
4692 0 : // that register themselves with a broken DNS config. We check only the HTTP hostname, because
4693 0 : // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
4694 0 : if tokio::net::lookup_host(format!(
4695 0 : "{}:{}",
4696 0 : register_req.listen_http_addr, register_req.listen_http_port
4697 0 : ))
4698 0 : .await
4699 0 : .is_err()
4700 : {
4701 : // If we have a transient DNS issue, it's up to the caller to retry their registration. Because
4702 : // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
4703 : // we return a soft 503 error, to encourage callers to retry past transient issues.
4704 0 : return Err(ApiError::ResourceUnavailable(
4705 0 : format!(
4706 0 : "Node {} tried to register with unknown DNS name '{}'",
4707 0 : register_req.node_id, register_req.listen_http_addr
4708 0 : )
4709 0 : .into(),
4710 0 : ));
4711 0 : }
4712 0 :
4713 0 : // Ordering: we must persist the new node _before_ adding it to in-memory state.
4714 0 : // This ensures that before we use it for anything or expose it via any external
4715 0 : // API, it is guaranteed to be available after a restart.
4716 0 : let new_node = Node::new(
4717 0 : register_req.node_id,
4718 0 : register_req.listen_http_addr,
4719 0 : register_req.listen_http_port,
4720 0 : register_req.listen_pg_addr,
4721 0 : register_req.listen_pg_port,
4722 0 : );
4723 0 :
4724 0 : // TODO: idempotency if the node already exists in the database
4725 0 : self.persistence.insert_node(&new_node).await?;
4726 :
4727 0 : let mut locked = self.inner.write().unwrap();
4728 0 : let mut new_nodes = (*locked.nodes).clone();
4729 0 :
4730 0 : locked.scheduler.node_upsert(&new_node);
4731 0 : new_nodes.insert(register_req.node_id, new_node);
4732 0 :
4733 0 : locked.nodes = Arc::new(new_nodes);
4734 0 :
4735 0 : tracing::info!(
4736 0 : "Registered pageserver {}, now have {} pageservers",
4737 0 : register_req.node_id,
4738 0 : locked.nodes.len()
4739 : );
4740 0 : Ok(())
4741 0 : }
4742 :
4743 0 : pub(crate) async fn node_configure(
4744 0 : &self,
4745 0 : node_id: NodeId,
4746 0 : availability: Option<NodeAvailability>,
4747 0 : scheduling: Option<NodeSchedulingPolicy>,
4748 0 : ) -> Result<(), ApiError> {
4749 0 : let _node_lock =
4750 0 : trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
4751 :
4752 0 : if let Some(scheduling) = scheduling {
4753 : // Scheduling is a persistent part of Node: we must write updates to the database before
4754 : // applying them in memory
4755 0 : self.persistence.update_node(node_id, scheduling).await?;
4756 0 : }
4757 :
4758 : // If we're activating a node, then before setting it active we must reconcile any shard locations
4759 : // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
4760 : // by calling [`Self::node_activate_reconcile`]
4761 : //
4762 : // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
4763 : // nothing else can mutate its availability while we run.
4764 0 : let availability_transition = if let Some(input_availability) = availability {
4765 0 : let (activate_node, availability_transition) = {
4766 0 : let locked = self.inner.read().unwrap();
4767 0 : let Some(node) = locked.nodes.get(&node_id) else {
4768 0 : return Err(ApiError::NotFound(
4769 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
4770 0 : ));
4771 : };
4772 :
4773 0 : (
4774 0 : node.clone(),
4775 0 : node.get_availability_transition(input_availability),
4776 0 : )
4777 : };
4778 :
4779 0 : if matches!(availability_transition, AvailabilityTransition::ToActive) {
4780 0 : self.node_activate_reconcile(activate_node, &_node_lock)
4781 0 : .await?;
4782 0 : }
4783 0 : availability_transition
4784 : } else {
4785 0 : AvailabilityTransition::Unchanged
4786 : };
4787 :
4788 : // Apply changes from the request to our in-memory state for the Node
4789 0 : let mut locked = self.inner.write().unwrap();
4790 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4791 0 :
4792 0 : let mut new_nodes = (**nodes).clone();
4793 :
4794 0 : let Some(node) = new_nodes.get_mut(&node_id) else {
4795 0 : return Err(ApiError::NotFound(
4796 0 : anyhow::anyhow!("Node not registered").into(),
4797 0 : ));
4798 : };
4799 :
4800 0 : if let Some(availability) = &availability {
4801 0 : node.set_availability(*availability);
4802 0 : }
4803 :
4804 0 : if let Some(scheduling) = scheduling {
4805 0 : node.set_scheduling(scheduling);
4806 0 : }
4807 :
4808 : // Update the scheduler, in case the elegibility of the node for new shards has changed
4809 0 : scheduler.node_upsert(node);
4810 0 :
4811 0 : let new_nodes = Arc::new(new_nodes);
4812 0 :
4813 0 : // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
4814 0 : match availability_transition {
4815 : AvailabilityTransition::ToOffline => {
4816 0 : tracing::info!("Node {} transition to offline", node_id);
4817 0 : let mut tenants_affected: usize = 0;
4818 :
4819 0 : for (tenant_shard_id, tenant_shard) in tenants {
4820 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
4821 0 : // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
4822 0 : // not assume our knowledge of the node's configuration is accurate until it comes back online
4823 0 : observed_loc.conf = None;
4824 0 : }
4825 :
4826 0 : if new_nodes.len() == 1 {
4827 : // Special case for single-node cluster: there is no point trying to reschedule
4828 : // any tenant shards: avoid doing so, in order to avoid spewing warnings about
4829 : // failures to schedule them.
4830 0 : continue;
4831 0 : }
4832 0 :
4833 0 : if !new_nodes
4834 0 : .values()
4835 0 : .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_)))
4836 : {
4837 : // Special case for when all nodes are unavailable and/or unschedulable: there is no point
4838 : // trying to reschedule since there's nowhere else to go. Without this
4839 : // branch we incorrectly detach tenants in response to node unavailability.
4840 0 : continue;
4841 0 : }
4842 0 :
4843 0 : if tenant_shard.intent.demote_attached(scheduler, node_id) {
4844 0 : tenant_shard.sequence = tenant_shard.sequence.next();
4845 0 :
4846 0 : // TODO: populate a ScheduleContext including all shards in the same tenant_id (only matters
4847 0 : // for tenants without secondary locations: if they have a secondary location, then this
4848 0 : // schedule() call is just promoting an existing secondary)
4849 0 : let mut schedule_context = ScheduleContext::default();
4850 0 :
4851 0 : match tenant_shard.schedule(scheduler, &mut schedule_context) {
4852 0 : Err(e) => {
4853 0 : // It is possible that some tenants will become unschedulable when too many pageservers
4854 0 : // go offline: in this case there isn't much we can do other than make the issue observable.
4855 0 : // TODO: give TenantShard a scheduling error attribute to be queried later.
4856 0 : tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
4857 : }
4858 : Ok(()) => {
4859 0 : if self
4860 0 : .maybe_reconcile_shard(tenant_shard, &new_nodes)
4861 0 : .is_some()
4862 0 : {
4863 0 : tenants_affected += 1;
4864 0 : };
4865 : }
4866 : }
4867 0 : }
4868 : }
4869 0 : tracing::info!(
4870 0 : "Launched {} reconciler tasks for tenants affected by node {} going offline",
4871 : tenants_affected,
4872 : node_id
4873 : )
4874 : }
4875 : AvailabilityTransition::ToActive => {
4876 0 : tracing::info!("Node {} transition to active", node_id);
4877 : // When a node comes back online, we must reconcile any tenant that has a None observed
4878 : // location on the node.
4879 0 : for tenant_shard in locked.tenants.values_mut() {
4880 : // If a reconciliation is already in progress, rely on the previous scheduling
4881 : // decision and skip triggering a new reconciliation.
4882 0 : if tenant_shard.reconciler.is_some() {
4883 0 : continue;
4884 0 : }
4885 :
4886 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
4887 0 : if observed_loc.conf.is_none() {
4888 0 : self.maybe_reconcile_shard(tenant_shard, &new_nodes);
4889 0 : }
4890 0 : }
4891 : }
4892 :
4893 : // TODO: in the background, we should balance work back onto this pageserver
4894 : }
4895 : // No action required for the intermediate unavailable state.
4896 : // When we transition into active or offline from the unavailable state,
4897 : // the correct handling above will kick in.
4898 : AvailabilityTransition::ToWarmingUpFromActive => {
4899 0 : tracing::info!("Node {} transition to unavailable from active", node_id);
4900 : }
4901 : AvailabilityTransition::ToWarmingUpFromOffline => {
4902 0 : tracing::info!("Node {} transition to unavailable from offline", node_id);
4903 : }
4904 : AvailabilityTransition::Unchanged => {
4905 0 : tracing::debug!("Node {} no availability change during config", node_id);
4906 : }
4907 : }
4908 :
4909 0 : locked.nodes = new_nodes;
4910 0 :
4911 0 : Ok(())
4912 0 : }
4913 :
4914 : /// Wrapper around [`Self::node_configure`] which only allows changes while there is no ongoing
4915 : /// operation for HTTP api.
4916 0 : pub(crate) async fn external_node_configure(
4917 0 : &self,
4918 0 : node_id: NodeId,
4919 0 : availability: Option<NodeAvailability>,
4920 0 : scheduling: Option<NodeSchedulingPolicy>,
4921 0 : ) -> Result<(), ApiError> {
4922 0 : {
4923 0 : let locked = self.inner.read().unwrap();
4924 0 : if let Some(op) = locked.ongoing_operation.as_ref().map(|op| op.operation) {
4925 0 : return Err(ApiError::PreconditionFailed(
4926 0 : format!("Ongoing background operation forbids configuring: {op}").into(),
4927 0 : ));
4928 0 : }
4929 0 : }
4930 0 :
4931 0 : self.node_configure(node_id, availability, scheduling).await
4932 0 : }
4933 :
4934 0 : pub(crate) async fn start_node_drain(
4935 0 : self: &Arc<Self>,
4936 0 : node_id: NodeId,
4937 0 : ) -> Result<(), ApiError> {
4938 0 : let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = {
4939 0 : let locked = self.inner.read().unwrap();
4940 0 : let nodes = &locked.nodes;
4941 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
4942 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
4943 0 : ))?;
4944 0 : let schedulable_nodes_count = nodes
4945 0 : .iter()
4946 0 : .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
4947 0 : .count();
4948 0 :
4949 0 : (
4950 0 : locked
4951 0 : .ongoing_operation
4952 0 : .as_ref()
4953 0 : .map(|ongoing| ongoing.operation),
4954 0 : node.is_available(),
4955 0 : node.get_scheduling(),
4956 0 : schedulable_nodes_count,
4957 0 : )
4958 0 : };
4959 :
4960 0 : if let Some(ongoing) = ongoing_op {
4961 0 : return Err(ApiError::PreconditionFailed(
4962 0 : format!("Background operation already ongoing for node: {}", ongoing).into(),
4963 0 : ));
4964 0 : }
4965 0 :
4966 0 : if !node_available {
4967 0 : return Err(ApiError::ResourceUnavailable(
4968 0 : format!("Node {node_id} is currently unavailable").into(),
4969 0 : ));
4970 0 : }
4971 0 :
4972 0 : if schedulable_nodes_count == 0 {
4973 0 : return Err(ApiError::PreconditionFailed(
4974 0 : "No other schedulable nodes to drain to".into(),
4975 0 : ));
4976 0 : }
4977 0 :
4978 0 : match node_policy {
4979 : NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => {
4980 0 : self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining))
4981 0 : .await?;
4982 :
4983 0 : let cancel = self.cancel.child_token();
4984 0 : let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
4985 :
4986 0 : self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
4987 0 : operation: Operation::Drain(Drain { node_id }),
4988 0 : cancel: cancel.clone(),
4989 0 : });
4990 :
4991 0 : let span = tracing::info_span!(parent: None, "drain_node", %node_id);
4992 :
4993 0 : tokio::task::spawn({
4994 0 : let service = self.clone();
4995 0 : let cancel = cancel.clone();
4996 0 : async move {
4997 0 : let _gate_guard = gate_guard;
4998 :
4999 : scopeguard::defer! {
5000 : let prev = service.inner.write().unwrap().ongoing_operation.take();
5001 :
5002 0 : if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) {
5003 : assert_eq!(removed_drain.node_id, node_id, "We always take the same operation");
5004 : } else {
5005 : panic!("We always remove the same operation")
5006 : }
5007 : }
5008 :
5009 0 : tracing::info!("Drain background operation starting");
5010 0 : let res = service.drain_node(node_id, cancel).await;
5011 0 : match res {
5012 : Ok(()) => {
5013 0 : tracing::info!("Drain background operation completed successfully");
5014 : }
5015 : Err(OperationError::Cancelled) => {
5016 0 : tracing::info!("Drain background operation was cancelled");
5017 : }
5018 0 : Err(err) => {
5019 0 : tracing::error!("Drain background operation encountered: {err}")
5020 : }
5021 : }
5022 0 : }
5023 0 : }.instrument(span));
5024 0 : }
5025 : NodeSchedulingPolicy::Draining => {
5026 0 : return Err(ApiError::Conflict(format!(
5027 0 : "Node {node_id} has drain in progress"
5028 0 : )));
5029 : }
5030 0 : policy => {
5031 0 : return Err(ApiError::PreconditionFailed(
5032 0 : format!("Node {node_id} cannot be drained due to {policy:?} policy").into(),
5033 0 : ));
5034 : }
5035 : }
5036 :
5037 0 : Ok(())
5038 0 : }
5039 :
5040 0 : pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> {
5041 0 : let node_available = {
5042 0 : let locked = self.inner.read().unwrap();
5043 0 : let nodes = &locked.nodes;
5044 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
5045 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
5046 0 : ))?;
5047 :
5048 0 : node.is_available()
5049 0 : };
5050 0 :
5051 0 : if !node_available {
5052 0 : return Err(ApiError::ResourceUnavailable(
5053 0 : format!("Node {node_id} is currently unavailable").into(),
5054 0 : ));
5055 0 : }
5056 :
5057 0 : if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
5058 0 : if let Operation::Drain(drain) = op_handler.operation {
5059 0 : if drain.node_id == node_id {
5060 0 : tracing::info!("Cancelling background drain operation for node {node_id}");
5061 0 : op_handler.cancel.cancel();
5062 0 : return Ok(());
5063 0 : }
5064 0 : }
5065 0 : }
5066 :
5067 0 : Err(ApiError::PreconditionFailed(
5068 0 : format!("Node {node_id} has no drain in progress").into(),
5069 0 : ))
5070 0 : }
5071 :
5072 0 : pub(crate) async fn start_node_fill(self: &Arc<Self>, node_id: NodeId) -> Result<(), ApiError> {
5073 0 : let (ongoing_op, node_available, node_policy, total_nodes_count) = {
5074 0 : let locked = self.inner.read().unwrap();
5075 0 : let nodes = &locked.nodes;
5076 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
5077 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
5078 0 : ))?;
5079 :
5080 0 : (
5081 0 : locked
5082 0 : .ongoing_operation
5083 0 : .as_ref()
5084 0 : .map(|ongoing| ongoing.operation),
5085 0 : node.is_available(),
5086 0 : node.get_scheduling(),
5087 0 : nodes.len(),
5088 0 : )
5089 0 : };
5090 :
5091 0 : if let Some(ongoing) = ongoing_op {
5092 0 : return Err(ApiError::PreconditionFailed(
5093 0 : format!("Background operation already ongoing for node: {}", ongoing).into(),
5094 0 : ));
5095 0 : }
5096 0 :
5097 0 : if !node_available {
5098 0 : return Err(ApiError::ResourceUnavailable(
5099 0 : format!("Node {node_id} is currently unavailable").into(),
5100 0 : ));
5101 0 : }
5102 0 :
5103 0 : if total_nodes_count <= 1 {
5104 0 : return Err(ApiError::PreconditionFailed(
5105 0 : "No other nodes to fill from".into(),
5106 0 : ));
5107 0 : }
5108 0 :
5109 0 : match node_policy {
5110 : NodeSchedulingPolicy::Active => {
5111 0 : self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling))
5112 0 : .await?;
5113 :
5114 0 : let cancel = self.cancel.child_token();
5115 0 : let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
5116 :
5117 0 : self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
5118 0 : operation: Operation::Fill(Fill { node_id }),
5119 0 : cancel: cancel.clone(),
5120 0 : });
5121 :
5122 0 : let span = tracing::info_span!(parent: None, "fill_node", %node_id);
5123 :
5124 0 : tokio::task::spawn({
5125 0 : let service = self.clone();
5126 0 : let cancel = cancel.clone();
5127 0 : async move {
5128 0 : let _gate_guard = gate_guard;
5129 :
5130 : scopeguard::defer! {
5131 : let prev = service.inner.write().unwrap().ongoing_operation.take();
5132 :
5133 0 : if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) {
5134 : assert_eq!(removed_fill.node_id, node_id, "We always take the same operation");
5135 : } else {
5136 : panic!("We always remove the same operation")
5137 : }
5138 : }
5139 :
5140 0 : tracing::info!("Fill background operation starting");
5141 0 : let res = service.fill_node(node_id, cancel).await;
5142 0 : match res {
5143 : Ok(()) => {
5144 0 : tracing::info!("Fill background operation completed successfully");
5145 : }
5146 : Err(OperationError::Cancelled) => {
5147 0 : tracing::info!("Fill background operation was cancelled");
5148 : }
5149 0 : Err(err) => {
5150 0 : tracing::error!("Fill background operation encountered: {err}")
5151 : }
5152 : }
5153 0 : }
5154 0 : }.instrument(span));
5155 0 : }
5156 : NodeSchedulingPolicy::Filling => {
5157 0 : return Err(ApiError::Conflict(format!(
5158 0 : "Node {node_id} has fill in progress"
5159 0 : )));
5160 : }
5161 0 : policy => {
5162 0 : return Err(ApiError::PreconditionFailed(
5163 0 : format!("Node {node_id} cannot be filled due to {policy:?} policy").into(),
5164 0 : ));
5165 : }
5166 : }
5167 :
5168 0 : Ok(())
5169 0 : }
5170 :
5171 0 : pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> {
5172 0 : let node_available = {
5173 0 : let locked = self.inner.read().unwrap();
5174 0 : let nodes = &locked.nodes;
5175 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
5176 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
5177 0 : ))?;
5178 :
5179 0 : node.is_available()
5180 0 : };
5181 0 :
5182 0 : if !node_available {
5183 0 : return Err(ApiError::ResourceUnavailable(
5184 0 : format!("Node {node_id} is currently unavailable").into(),
5185 0 : ));
5186 0 : }
5187 :
5188 0 : if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
5189 0 : if let Operation::Fill(fill) = op_handler.operation {
5190 0 : if fill.node_id == node_id {
5191 0 : tracing::info!("Cancelling background drain operation for node {node_id}");
5192 0 : op_handler.cancel.cancel();
5193 0 : return Ok(());
5194 0 : }
5195 0 : }
5196 0 : }
5197 :
5198 0 : Err(ApiError::PreconditionFailed(
5199 0 : format!("Node {node_id} has no fill in progress").into(),
5200 0 : ))
5201 0 : }
5202 :
5203 : /// Helper for methods that will try and call pageserver APIs for
5204 : /// a tenant, such as timeline CRUD: they cannot proceed unless the tenant
5205 : /// is attached somewhere.
5206 0 : fn ensure_attached_schedule(
5207 0 : &self,
5208 0 : mut locked: std::sync::RwLockWriteGuard<'_, ServiceState>,
5209 0 : tenant_id: TenantId,
5210 0 : ) -> Result<Vec<ReconcilerWaiter>, anyhow::Error> {
5211 0 : let mut waiters = Vec::new();
5212 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5213 0 :
5214 0 : let mut schedule_context = ScheduleContext::default();
5215 0 : for (tenant_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
5216 0 : shard.schedule(scheduler, &mut schedule_context)?;
5217 :
5218 : // The shard's policies may not result in an attached location being scheduled: this
5219 : // is an error because our caller needs it attached somewhere.
5220 0 : if shard.intent.get_attached().is_none() {
5221 0 : return Err(anyhow::anyhow!(
5222 0 : "Tenant {tenant_id} not scheduled to be attached"
5223 0 : ));
5224 0 : };
5225 0 :
5226 0 : if shard.stably_attached().is_some() {
5227 : // We do not require the shard to be totally up to date on reconciliation: we just require
5228 : // that it has been attached on the intended node. Other dirty state such as unattached secondary
5229 : // locations, or compute hook notifications can be ignored.
5230 0 : continue;
5231 0 : }
5232 :
5233 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
5234 0 : tracing::info!("Waiting for shard {tenant_shard_id} to reconcile, in order to ensure it is attached");
5235 0 : waiters.push(waiter);
5236 0 : }
5237 : }
5238 0 : Ok(waiters)
5239 0 : }
5240 :
5241 0 : async fn ensure_attached_wait(&self, tenant_id: TenantId) -> Result<(), ApiError> {
5242 0 : let ensure_waiters = {
5243 0 : let locked = self.inner.write().unwrap();
5244 :
5245 : // Check if the tenant is splitting: in this case, even if it is attached,
5246 : // we must act as if it is not: this blocks e.g. timeline creation/deletion
5247 : // operations during the split.
5248 0 : for (_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
5249 0 : if !matches!(shard.splitting, SplitState::Idle) {
5250 0 : return Err(ApiError::ResourceUnavailable(
5251 0 : "Tenant shards are currently splitting".into(),
5252 0 : ));
5253 0 : }
5254 : }
5255 :
5256 0 : self.ensure_attached_schedule(locked, tenant_id)
5257 0 : .map_err(ApiError::InternalServerError)?
5258 : };
5259 :
5260 0 : let deadline = Instant::now().checked_add(Duration::from_secs(5)).unwrap();
5261 0 : for waiter in ensure_waiters {
5262 0 : let timeout = deadline.duration_since(Instant::now());
5263 0 : waiter.wait_timeout(timeout).await?;
5264 : }
5265 :
5266 0 : Ok(())
5267 0 : }
5268 :
5269 : /// Like [`Self::maybe_configured_reconcile_shard`], but uses the default reconciler
5270 : /// configuration
5271 0 : fn maybe_reconcile_shard(
5272 0 : &self,
5273 0 : shard: &mut TenantShard,
5274 0 : nodes: &Arc<HashMap<NodeId, Node>>,
5275 0 : ) -> Option<ReconcilerWaiter> {
5276 0 : self.maybe_configured_reconcile_shard(shard, nodes, ReconcilerConfig::default())
5277 0 : }
5278 :
5279 : /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
5280 0 : fn maybe_configured_reconcile_shard(
5281 0 : &self,
5282 0 : shard: &mut TenantShard,
5283 0 : nodes: &Arc<HashMap<NodeId, Node>>,
5284 0 : reconciler_config: ReconcilerConfig,
5285 0 : ) -> Option<ReconcilerWaiter> {
5286 0 : let reconcile_needed = shard.get_reconcile_needed(nodes);
5287 0 :
5288 0 : match reconcile_needed {
5289 0 : ReconcileNeeded::No => return None,
5290 0 : ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
5291 0 : ReconcileNeeded::Yes => {
5292 0 : // Fall through to try and acquire units for spawning reconciler
5293 0 : }
5294 : };
5295 :
5296 0 : let units = match self.reconciler_concurrency.clone().try_acquire_owned() {
5297 0 : Ok(u) => ReconcileUnits::new(u),
5298 : Err(_) => {
5299 0 : tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
5300 0 : "Concurrency limited: enqueued for reconcile later");
5301 0 : if !shard.delayed_reconcile {
5302 0 : match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
5303 0 : Err(TrySendError::Closed(_)) => {
5304 0 : // Weird mid-shutdown case?
5305 0 : }
5306 : Err(TrySendError::Full(_)) => {
5307 : // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
5308 0 : tracing::warn!(
5309 0 : "Many shards are waiting to reconcile: delayed_reconcile queue is full"
5310 : );
5311 : }
5312 0 : Ok(()) => {
5313 0 : shard.delayed_reconcile = true;
5314 0 : }
5315 : }
5316 0 : }
5317 :
5318 : // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
5319 : // number to advance. When this function is eventually called again and succeeds in getting units,
5320 : // it will spawn a reconciler that makes this waiter complete.
5321 0 : return Some(shard.future_reconcile_waiter());
5322 : }
5323 : };
5324 :
5325 0 : let Ok(gate_guard) = self.reconcilers_gate.enter() else {
5326 : // Gate closed: we're shutting down, drop out.
5327 0 : return None;
5328 : };
5329 :
5330 0 : shard.spawn_reconciler(
5331 0 : &self.result_tx,
5332 0 : nodes,
5333 0 : &self.compute_hook,
5334 0 : reconciler_config,
5335 0 : &self.config,
5336 0 : &self.persistence,
5337 0 : units,
5338 0 : gate_guard,
5339 0 : &self.reconcilers_cancel,
5340 0 : )
5341 0 : }
5342 :
5343 : /// Check all tenants for pending reconciliation work, and reconcile those in need.
5344 : /// Additionally, reschedule tenants that require it.
5345 : ///
5346 : /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
5347 : /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
5348 : /// available. A return value of 0 indicates that everything is fully reconciled already.
5349 0 : fn reconcile_all(&self) -> usize {
5350 0 : let mut locked = self.inner.write().unwrap();
5351 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
5352 0 : let pageservers = nodes.clone();
5353 0 :
5354 0 : let mut schedule_context = ScheduleContext::default();
5355 0 :
5356 0 : let mut reconciles_spawned = 0;
5357 0 : for (tenant_shard_id, shard) in tenants.iter_mut() {
5358 0 : if tenant_shard_id.is_shard_zero() {
5359 0 : schedule_context = ScheduleContext::default();
5360 0 : }
5361 :
5362 : // Skip checking if this shard is already enqueued for reconciliation
5363 0 : if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
5364 : // If there is something delayed, then return a nonzero count so that
5365 : // callers like reconcile_all_now do not incorrectly get the impression
5366 : // that the system is in a quiescent state.
5367 0 : reconciles_spawned = std::cmp::max(1, reconciles_spawned);
5368 0 : continue;
5369 0 : }
5370 0 :
5371 0 : // Eventual consistency: if an earlier reconcile job failed, and the shard is still
5372 0 : // dirty, spawn another rone
5373 0 : if self.maybe_reconcile_shard(shard, &pageservers).is_some() {
5374 0 : reconciles_spawned += 1;
5375 0 : }
5376 :
5377 0 : schedule_context.avoid(&shard.intent.all_pageservers());
5378 : }
5379 :
5380 0 : reconciles_spawned
5381 0 : }
5382 :
5383 : /// `optimize` in this context means identifying shards which have valid scheduled locations, but
5384 : /// could be scheduled somewhere better:
5385 : /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
5386 : /// * e.g. after a node fails then recovers, to move some work back to it
5387 : /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
5388 : /// * e.g. after a shard split, the initial attached locations will all be on the node where
5389 : /// we did the split, but are probably better placed elsewhere.
5390 : /// - Creating new secondary locations if it improves the spreading of a sharded tenant
5391 : /// * e.g. after a shard split, some locations will be on the same node (where the split
5392 : /// happened), and will probably be better placed elsewhere.
5393 : ///
5394 : /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
5395 : /// the time of scheduling, this function looks for cases where a better-scoring location is available
5396 : /// according to those same soft constraints.
5397 0 : async fn optimize_all(&self) -> usize {
5398 0 : // Limit on how many shards' optmizations each call to this function will execute. Combined
5399 0 : // with the frequency of background calls, this acts as an implicit rate limit that runs a small
5400 0 : // trickle of optimizations in the background, rather than executing a large number in parallel
5401 0 : // when a change occurs.
5402 0 : const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 2;
5403 0 :
5404 0 : // Synchronous prepare: scan shards for possible scheduling optimizations
5405 0 : let candidate_work = self.optimize_all_plan();
5406 0 : let candidate_work_len = candidate_work.len();
5407 :
5408 : // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
5409 0 : let validated_work = self.optimize_all_validate(candidate_work).await;
5410 :
5411 0 : let was_work_filtered = validated_work.len() != candidate_work_len;
5412 0 :
5413 0 : // Synchronous apply: update the shards' intent states according to validated optimisations
5414 0 : let mut reconciles_spawned = 0;
5415 0 : let mut optimizations_applied = 0;
5416 0 : let mut locked = self.inner.write().unwrap();
5417 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5418 0 : for (tenant_shard_id, optimization) in validated_work {
5419 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
5420 : // Shard was dropped between planning and execution;
5421 0 : continue;
5422 : };
5423 0 : if shard.apply_optimization(scheduler, optimization) {
5424 0 : optimizations_applied += 1;
5425 0 : if self.maybe_reconcile_shard(shard, nodes).is_some() {
5426 0 : reconciles_spawned += 1;
5427 0 : }
5428 0 : }
5429 :
5430 0 : if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
5431 0 : break;
5432 0 : }
5433 : }
5434 :
5435 0 : if was_work_filtered {
5436 0 : // If we filtered any work out during validation, ensure we return a nonzero value to indicate
5437 0 : // to callers that the system is not in a truly quiet state, it's going to do some work as soon
5438 0 : // as these validations start passing.
5439 0 : reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
5440 0 : }
5441 :
5442 0 : reconciles_spawned
5443 0 : }
5444 :
5445 0 : fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
5446 0 : let mut schedule_context = ScheduleContext::default();
5447 0 :
5448 0 : let mut tenant_shards: Vec<&TenantShard> = Vec::new();
5449 0 :
5450 0 : // How many candidate optimizations we will generate, before evaluating them for readniess: setting
5451 0 : // this higher than the execution limit gives us a chance to execute some work even if the first
5452 0 : // few optimizations we find are not ready.
5453 0 : const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 8;
5454 0 :
5455 0 : let mut work = Vec::new();
5456 0 :
5457 0 : let mut locked = self.inner.write().unwrap();
5458 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5459 0 : for (tenant_shard_id, shard) in tenants.iter() {
5460 0 : if tenant_shard_id.is_shard_zero() {
5461 0 : // Reset accumulators on the first shard in a tenant
5462 0 : schedule_context = ScheduleContext::default();
5463 0 : schedule_context.mode = ScheduleMode::Speculative;
5464 0 : tenant_shards.clear();
5465 0 : }
5466 :
5467 0 : if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
5468 0 : break;
5469 0 : }
5470 0 :
5471 0 : match shard.get_scheduling_policy() {
5472 0 : ShardSchedulingPolicy::Active => {
5473 0 : // Ok to do optimization
5474 0 : }
5475 : ShardSchedulingPolicy::Essential
5476 : | ShardSchedulingPolicy::Pause
5477 : | ShardSchedulingPolicy::Stop => {
5478 : // Policy prevents optimizing this shard.
5479 0 : continue;
5480 : }
5481 : }
5482 :
5483 : // Accumulate the schedule context for all the shards in a tenant: we must have
5484 : // the total view of all shards before we can try to optimize any of them.
5485 0 : schedule_context.avoid(&shard.intent.all_pageservers());
5486 0 : if let Some(attached) = shard.intent.get_attached() {
5487 0 : schedule_context.push_attached(*attached);
5488 0 : }
5489 0 : tenant_shards.push(shard);
5490 0 :
5491 0 : // Once we have seen the last shard in the tenant, proceed to search across all shards
5492 0 : // in the tenant for optimizations
5493 0 : if shard.shard.number.0 == shard.shard.count.count() - 1 {
5494 0 : if tenant_shards.iter().any(|s| s.reconciler.is_some()) {
5495 : // Do not start any optimizations while another change to the tenant is ongoing: this
5496 : // is not necessary for correctness, but simplifies operations and implicitly throttles
5497 : // optimization changes to happen in a "trickle" over time.
5498 0 : continue;
5499 0 : }
5500 0 :
5501 0 : if tenant_shards.iter().any(|s| {
5502 0 : !matches!(s.splitting, SplitState::Idle)
5503 0 : || matches!(s.policy, PlacementPolicy::Detached)
5504 0 : }) {
5505 : // Never attempt to optimize a tenant that is currently being split, or
5506 : // a tenant that is meant to be detached
5507 0 : continue;
5508 0 : }
5509 :
5510 : // TODO: optimization calculations are relatively expensive: create some fast-path for
5511 : // the common idle case (avoiding the search on tenants that we have recently checked)
5512 :
5513 0 : for shard in &tenant_shards {
5514 0 : if let Some(optimization) =
5515 : // If idle, maybe ptimize attachments: if a shard has a secondary location that is preferable to
5516 : // its primary location based on soft constraints, cut it over.
5517 0 : shard.optimize_attachment(nodes, &schedule_context)
5518 : {
5519 0 : work.push((shard.tenant_shard_id, optimization));
5520 0 : break;
5521 0 : } else if let Some(optimization) =
5522 : // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
5523 : // better placed on another node, based on ScheduleContext, then adjust it. This
5524 : // covers cases like after a shard split, where we might have too many shards
5525 : // in the same tenant with secondary locations on the node where they originally split.
5526 0 : shard.optimize_secondary(scheduler, &schedule_context)
5527 : {
5528 0 : work.push((shard.tenant_shard_id, optimization));
5529 0 : break;
5530 0 : }
5531 :
5532 : // TODO: extend this mechanism to prefer attaching on nodes with fewer attached
5533 : // tenants (i.e. extend schedule state to distinguish attached from secondary counts),
5534 : // for the total number of attachments on a node (not just within a tenant.)
5535 : }
5536 0 : }
5537 : }
5538 :
5539 0 : work
5540 0 : }
5541 :
5542 0 : async fn optimize_all_validate(
5543 0 : &self,
5544 0 : candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
5545 0 : ) -> Vec<(TenantShardId, ScheduleOptimization)> {
5546 0 : // Take a clone of the node map to use outside the lock in async validation phase
5547 0 : let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
5548 0 :
5549 0 : let mut want_secondary_status = Vec::new();
5550 0 :
5551 0 : // Validate our plans: this is an async phase where we may do I/O to pageservers to
5552 0 : // check that the state of locations is acceptable to run the optimization, such as
5553 0 : // checking that a secondary location is sufficiently warmed-up to cleanly cut over
5554 0 : // in a live migration.
5555 0 : let mut validated_work = Vec::new();
5556 0 : for (tenant_shard_id, optimization) in candidate_work {
5557 0 : match optimization.action {
5558 : ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
5559 : old_attached_node_id: _,
5560 0 : new_attached_node_id,
5561 0 : }) => {
5562 0 : match validation_nodes.get(&new_attached_node_id) {
5563 0 : None => {
5564 0 : // Node was dropped between planning and validation
5565 0 : }
5566 0 : Some(node) => {
5567 0 : if !node.is_available() {
5568 0 : tracing::info!("Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable");
5569 0 : } else {
5570 0 : // Accumulate optimizations that require fetching secondary status, so that we can execute these
5571 0 : // remote API requests concurrently.
5572 0 : want_secondary_status.push((
5573 0 : tenant_shard_id,
5574 0 : node.clone(),
5575 0 : optimization,
5576 0 : ));
5577 0 : }
5578 : }
5579 : }
5580 : }
5581 : ScheduleOptimizationAction::ReplaceSecondary(_) => {
5582 : // No extra checks needed to replace a secondary: this does not interrupt client access
5583 0 : validated_work.push((tenant_shard_id, optimization))
5584 : }
5585 : };
5586 : }
5587 :
5588 : // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
5589 : // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
5590 : // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
5591 0 : let results = self
5592 0 : .tenant_for_shards_api(
5593 0 : want_secondary_status
5594 0 : .iter()
5595 0 : .map(|i| (i.0, i.1.clone()))
5596 0 : .collect(),
5597 0 : |tenant_shard_id, client| async move {
5598 0 : client.tenant_secondary_status(tenant_shard_id).await
5599 0 : },
5600 0 : 1,
5601 0 : 1,
5602 0 : SHORT_RECONCILE_TIMEOUT,
5603 0 : &self.cancel,
5604 0 : )
5605 0 : .await;
5606 :
5607 0 : for ((tenant_shard_id, node, optimization), secondary_status) in
5608 0 : want_secondary_status.into_iter().zip(results.into_iter())
5609 : {
5610 0 : match secondary_status {
5611 0 : Err(e) => {
5612 0 : tracing::info!("Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}");
5613 : }
5614 0 : Ok(progress) => {
5615 0 : // We require secondary locations to have less than 10GiB of downloads pending before we will use
5616 0 : // them in an optimization
5617 0 : const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
5618 0 :
5619 0 : if progress.heatmap_mtime.is_none()
5620 0 : || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
5621 0 : && progress.bytes_downloaded != progress.bytes_total
5622 0 : || progress.bytes_total - progress.bytes_downloaded
5623 0 : > DOWNLOAD_FRESHNESS_THRESHOLD
5624 : {
5625 0 : tracing::info!("Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}");
5626 : } else {
5627 : // Location looks ready: proceed
5628 0 : tracing::info!(
5629 0 : "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
5630 : );
5631 0 : validated_work.push((tenant_shard_id, optimization))
5632 : }
5633 : }
5634 : }
5635 : }
5636 :
5637 0 : validated_work
5638 0 : }
5639 :
5640 : /// Look for shards which are oversized and in need of splitting
5641 0 : async fn autosplit_tenants(self: &Arc<Self>) {
5642 0 : let Some(split_threshold) = self.config.split_threshold else {
5643 : // Auto-splitting is disabled
5644 0 : return;
5645 : };
5646 :
5647 0 : let nodes = self.inner.read().unwrap().nodes.clone();
5648 0 :
5649 0 : const SPLIT_TO_MAX: ShardCount = ShardCount::new(8);
5650 0 :
5651 0 : let mut top_n = Vec::new();
5652 0 :
5653 0 : // Call into each node to look for big tenants
5654 0 : let top_n_request = TopTenantShardsRequest {
5655 0 : // We currently split based on logical size, for simplicity: logical size is a signal of
5656 0 : // the user's intent to run a large database, whereas physical/resident size can be symptoms
5657 0 : // of compaction issues. Eventually we should switch to using resident size to bound the
5658 0 : // disk space impact of one shard.
5659 0 : order_by: models::TenantSorting::MaxLogicalSize,
5660 0 : limit: 10,
5661 0 : where_shards_lt: Some(SPLIT_TO_MAX),
5662 0 : where_gt: Some(split_threshold),
5663 0 : };
5664 0 : for node in nodes.values() {
5665 0 : let request_ref = &top_n_request;
5666 0 : match node
5667 0 : .with_client_retries(
5668 0 : |client| async move {
5669 0 : let request = request_ref.clone();
5670 0 : client.top_tenant_shards(request.clone()).await
5671 0 : },
5672 0 : &self.config.jwt_token,
5673 0 : 3,
5674 0 : 3,
5675 0 : Duration::from_secs(5),
5676 0 : &self.cancel,
5677 0 : )
5678 0 : .await
5679 : {
5680 0 : Some(Ok(node_top_n)) => {
5681 0 : top_n.extend(node_top_n.shards.into_iter());
5682 0 : }
5683 : Some(Err(mgmt_api::Error::Cancelled)) => {
5684 0 : continue;
5685 : }
5686 0 : Some(Err(e)) => {
5687 0 : tracing::warn!("Failed to fetch top N tenants from {node}: {e}");
5688 0 : continue;
5689 : }
5690 : None => {
5691 : // Node is shutting down
5692 0 : continue;
5693 : }
5694 : };
5695 : }
5696 :
5697 : // Pick the biggest tenant to split first
5698 0 : top_n.sort_by_key(|i| i.resident_size);
5699 0 : let Some(split_candidate) = top_n.into_iter().next() else {
5700 0 : tracing::debug!("No split-elegible shards found");
5701 0 : return;
5702 : };
5703 :
5704 : // We spawn a task to run this, so it's exactly like some external API client requesting it. We don't
5705 : // want to block the background reconcile loop on this.
5706 0 : tracing::info!("Auto-splitting tenant for size threshold {split_threshold}: current size {split_candidate:?}");
5707 :
5708 0 : let this = self.clone();
5709 0 : tokio::spawn(
5710 0 : async move {
5711 0 : match this
5712 0 : .tenant_shard_split(
5713 0 : split_candidate.id.tenant_id,
5714 0 : TenantShardSplitRequest {
5715 0 : // Always split to the max number of shards: this avoids stepping through
5716 0 : // intervening shard counts and encountering the overrhead of a split+cleanup
5717 0 : // each time as a tenant grows, and is not too expensive because our max shard
5718 0 : // count is relatively low anyway.
5719 0 : // This policy will be adjusted in future once we support higher shard count.
5720 0 : new_shard_count: SPLIT_TO_MAX.literal(),
5721 0 : new_stripe_size: Some(ShardParameters::DEFAULT_STRIPE_SIZE),
5722 0 : },
5723 0 : )
5724 0 : .await
5725 : {
5726 : Ok(_) => {
5727 0 : tracing::info!("Successful auto-split");
5728 : }
5729 0 : Err(e) => {
5730 0 : tracing::error!("Auto-split failed: {e}");
5731 : }
5732 : }
5733 0 : }
5734 0 : .instrument(tracing::info_span!("auto_split", tenant_id=%split_candidate.id.tenant_id)),
5735 : );
5736 0 : }
5737 :
5738 : /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
5739 : /// also wait for any generated Reconcilers to complete. Calling this until it returns zero should
5740 : /// put the system into a quiescent state where future background reconciliations won't do anything.
5741 0 : pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
5742 0 : let reconciles_spawned = self.reconcile_all();
5743 0 : let reconciles_spawned = if reconciles_spawned == 0 {
5744 : // Only optimize when we are otherwise idle
5745 0 : self.optimize_all().await
5746 : } else {
5747 0 : reconciles_spawned
5748 : };
5749 :
5750 0 : let waiters = {
5751 0 : let mut waiters = Vec::new();
5752 0 : let locked = self.inner.read().unwrap();
5753 0 : for (_tenant_shard_id, shard) in locked.tenants.iter() {
5754 0 : if let Some(waiter) = shard.get_waiter() {
5755 0 : waiters.push(waiter);
5756 0 : }
5757 : }
5758 0 : waiters
5759 0 : };
5760 0 :
5761 0 : let waiter_count = waiters.len();
5762 0 : match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
5763 0 : Ok(()) => {}
5764 0 : Err(ReconcileWaitError::Failed(_, reconcile_error))
5765 0 : if matches!(*reconcile_error, ReconcileError::Cancel) =>
5766 0 : {
5767 0 : // Ignore reconciler cancel errors: this reconciler might have shut down
5768 0 : // because some other change superceded it. We will return a nonzero number,
5769 0 : // so the caller knows they might have to call again to quiesce the system.
5770 0 : }
5771 0 : Err(e) => {
5772 0 : return Err(e);
5773 : }
5774 : };
5775 :
5776 0 : tracing::info!(
5777 0 : "{} reconciles in reconcile_all, {} waiters",
5778 : reconciles_spawned,
5779 : waiter_count
5780 : );
5781 :
5782 0 : Ok(std::cmp::max(waiter_count, reconciles_spawned))
5783 0 : }
5784 :
5785 0 : async fn stop_reconciliations(&self, reason: StopReconciliationsReason) {
5786 0 : // Cancel all on-going reconciles and wait for them to exit the gate.
5787 0 : tracing::info!("{reason}: cancelling and waiting for in-flight reconciles");
5788 0 : self.reconcilers_cancel.cancel();
5789 0 : self.reconcilers_gate.close().await;
5790 :
5791 : // Signal the background loop in [`Service::process_results`] to exit once
5792 : // it has proccessed the results from all the reconciles we cancelled earlier.
5793 0 : tracing::info!("{reason}: processing results from previously in-flight reconciles");
5794 0 : self.result_tx.send(ReconcileResultRequest::Stop).ok();
5795 0 : self.result_tx.closed().await;
5796 0 : }
5797 :
5798 0 : pub async fn shutdown(&self) {
5799 0 : self.stop_reconciliations(StopReconciliationsReason::ShuttingDown)
5800 0 : .await;
5801 :
5802 : // Background tasks hold gate guards: this notifies them of the cancellation and
5803 : // waits for them all to complete.
5804 0 : tracing::info!("Shutting down: cancelling and waiting for background tasks to exit");
5805 0 : self.cancel.cancel();
5806 0 : self.gate.close().await;
5807 0 : }
5808 :
5809 : /// Spot check the download lag for a secondary location of a shard.
5810 : /// Should be used as a heuristic, since it's not always precise: the
5811 : /// secondary might have not downloaded the new heat map yet and, hence,
5812 : /// is not aware of the lag.
5813 : ///
5814 : /// Returns:
5815 : /// * Ok(None) if the lag could not be determined from the status,
5816 : /// * Ok(Some(_)) if the lag could be determind
5817 : /// * Err on failures to query the pageserver.
5818 0 : async fn secondary_lag(
5819 0 : &self,
5820 0 : secondary: &NodeId,
5821 0 : tenant_shard_id: TenantShardId,
5822 0 : ) -> Result<Option<u64>, mgmt_api::Error> {
5823 0 : let nodes = self.inner.read().unwrap().nodes.clone();
5824 0 : let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError(
5825 0 : StatusCode::NOT_FOUND,
5826 0 : format!("Node with id {} not found", secondary),
5827 0 : ))?;
5828 :
5829 0 : match node
5830 0 : .with_client_retries(
5831 0 : |client| async move { client.tenant_secondary_status(tenant_shard_id).await },
5832 0 : &self.config.jwt_token,
5833 0 : 1,
5834 0 : 3,
5835 0 : Duration::from_millis(250),
5836 0 : &self.cancel,
5837 0 : )
5838 0 : .await
5839 : {
5840 0 : Some(Ok(status)) => match status.heatmap_mtime {
5841 0 : Some(_) => Ok(Some(status.bytes_total - status.bytes_downloaded)),
5842 0 : None => Ok(None),
5843 : },
5844 0 : Some(Err(e)) => Err(e),
5845 0 : None => Err(mgmt_api::Error::Cancelled),
5846 : }
5847 0 : }
5848 :
5849 : /// Drain a node by moving the shards attached to it as primaries.
5850 : /// This is a long running operation and it should run as a separate Tokio task.
5851 0 : pub(crate) async fn drain_node(
5852 0 : self: &Arc<Self>,
5853 0 : node_id: NodeId,
5854 0 : cancel: CancellationToken,
5855 0 : ) -> Result<(), OperationError> {
5856 0 : const MAX_SECONDARY_LAG_BYTES_DEFAULT: u64 = 256 * 1024 * 1024;
5857 0 : let max_secondary_lag_bytes = self
5858 0 : .config
5859 0 : .max_secondary_lag_bytes
5860 0 : .unwrap_or(MAX_SECONDARY_LAG_BYTES_DEFAULT);
5861 0 :
5862 0 : // By default, live migrations are generous about the wait time for getting
5863 0 : // the secondary location up to speed. When draining, give up earlier in order
5864 0 : // to not stall the operation when a cold secondary is encountered.
5865 0 : const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(20);
5866 0 : const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
5867 0 : let reconciler_config = ReconcilerConfigBuilder::new()
5868 0 : .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
5869 0 : .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
5870 0 : .build();
5871 0 :
5872 0 : let mut waiters = Vec::new();
5873 0 :
5874 0 : let mut tid_iter = TenantShardIterator::new({
5875 0 : let service = self.clone();
5876 0 : move |last_inspected_shard: Option<TenantShardId>| {
5877 0 : let locked = &service.inner.read().unwrap();
5878 0 : let tenants = &locked.tenants;
5879 0 : let entry = match last_inspected_shard {
5880 0 : Some(skip_past) => {
5881 0 : // Skip to the last seen tenant shard id
5882 0 : let mut cursor = tenants.iter().skip_while(|(tid, _)| **tid != skip_past);
5883 0 :
5884 0 : // Skip past the last seen
5885 0 : cursor.nth(1)
5886 : }
5887 0 : None => tenants.first_key_value(),
5888 : };
5889 :
5890 0 : entry.map(|(tid, _)| tid).copied()
5891 0 : }
5892 0 : });
5893 :
5894 0 : while !tid_iter.finished() {
5895 0 : if cancel.is_cancelled() {
5896 0 : match self
5897 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
5898 0 : .await
5899 : {
5900 0 : Ok(()) => return Err(OperationError::Cancelled),
5901 0 : Err(err) => {
5902 0 : return Err(OperationError::FinalizeError(
5903 0 : format!(
5904 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
5905 0 : node_id, err
5906 0 : )
5907 0 : .into(),
5908 0 : ));
5909 : }
5910 : }
5911 0 : }
5912 0 :
5913 0 : drain_utils::validate_node_state(&node_id, self.inner.read().unwrap().nodes.clone())?;
5914 :
5915 0 : while waiters.len() < MAX_RECONCILES_PER_OPERATION {
5916 0 : let tid = match tid_iter.next() {
5917 0 : Some(tid) => tid,
5918 : None => {
5919 0 : break;
5920 : }
5921 : };
5922 :
5923 0 : let tid_drain = TenantShardDrain {
5924 0 : drained_node: node_id,
5925 0 : tenant_shard_id: tid,
5926 0 : };
5927 :
5928 0 : let dest_node_id = {
5929 0 : let locked = self.inner.read().unwrap();
5930 0 :
5931 0 : match tid_drain
5932 0 : .tenant_shard_eligible_for_drain(&locked.tenants, &locked.scheduler)
5933 : {
5934 0 : Some(node_id) => node_id,
5935 : None => {
5936 0 : continue;
5937 : }
5938 : }
5939 : };
5940 :
5941 0 : match self.secondary_lag(&dest_node_id, tid).await {
5942 0 : Ok(Some(lag)) if lag <= max_secondary_lag_bytes => {
5943 0 : // The secondary is reasonably up to date.
5944 0 : // Migrate to it
5945 0 : }
5946 0 : Ok(Some(lag)) => {
5947 0 : tracing::info!(
5948 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
5949 0 : "Secondary on node {dest_node_id} is lagging by {lag}. Skipping reconcile."
5950 : );
5951 0 : continue;
5952 : }
5953 : Ok(None) => {
5954 0 : tracing::info!(
5955 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
5956 0 : "Could not determine lag for secondary on node {dest_node_id}. Skipping reconcile."
5957 : );
5958 0 : continue;
5959 : }
5960 0 : Err(err) => {
5961 0 : tracing::warn!(
5962 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
5963 0 : "Failed to get secondary lag from node {dest_node_id}. Skipping reconcile: {err}"
5964 : );
5965 0 : continue;
5966 : }
5967 : }
5968 :
5969 : {
5970 0 : let mut locked = self.inner.write().unwrap();
5971 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5972 0 : let rescheduled = tid_drain.reschedule_to_secondary(
5973 0 : dest_node_id,
5974 0 : tenants,
5975 0 : scheduler,
5976 0 : nodes,
5977 0 : )?;
5978 :
5979 0 : if let Some(tenant_shard) = rescheduled {
5980 0 : let waiter = self.maybe_configured_reconcile_shard(
5981 0 : tenant_shard,
5982 0 : nodes,
5983 0 : reconciler_config,
5984 0 : );
5985 0 : if let Some(some) = waiter {
5986 0 : waiters.push(some);
5987 0 : }
5988 0 : }
5989 : }
5990 : }
5991 :
5992 0 : waiters = self
5993 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
5994 0 : .await;
5995 :
5996 0 : failpoint_support::sleep_millis_async!("sleepy-drain-loop", &cancel);
5997 : }
5998 :
5999 0 : while !waiters.is_empty() {
6000 0 : if cancel.is_cancelled() {
6001 0 : match self
6002 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
6003 0 : .await
6004 : {
6005 0 : Ok(()) => return Err(OperationError::Cancelled),
6006 0 : Err(err) => {
6007 0 : return Err(OperationError::FinalizeError(
6008 0 : format!(
6009 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
6010 0 : node_id, err
6011 0 : )
6012 0 : .into(),
6013 0 : ));
6014 : }
6015 : }
6016 0 : }
6017 0 :
6018 0 : tracing::info!("Awaiting {} pending drain reconciliations", waiters.len());
6019 :
6020 0 : waiters = self
6021 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
6022 0 : .await;
6023 : }
6024 :
6025 : // At this point we have done the best we could to drain shards from this node.
6026 : // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]`
6027 : // to complete the drain.
6028 0 : if let Err(err) = self
6029 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart))
6030 0 : .await
6031 : {
6032 : // This is not fatal. Anything that is polling the node scheduling policy to detect
6033 : // the end of the drain operations will hang, but all such places should enforce an
6034 : // overall timeout. The scheduling policy will be updated upon node re-attach and/or
6035 : // by the counterpart fill operation.
6036 0 : return Err(OperationError::FinalizeError(
6037 0 : format!(
6038 0 : "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}"
6039 0 : )
6040 0 : .into(),
6041 0 : ));
6042 0 : }
6043 0 :
6044 0 : Ok(())
6045 0 : }
6046 :
6047 : /// Create a node fill plan (pick secondaries to promote) that meets the following requirements:
6048 : /// 1. The node should be filled until it reaches the expected cluster average of
6049 : /// attached shards. If there are not enough secondaries on the node, the plan stops early.
6050 : /// 2. Select tenant shards to promote such that the number of attached shards is balanced
6051 : /// throughout the cluster. We achieve this by picking tenant shards from each node,
6052 : /// starting from the ones with the largest number of attached shards, until the node
6053 : /// reaches the expected cluster average.
6054 : /// 3. Avoid promoting more shards of the same tenant than required. The upper bound
6055 : /// for the number of tenants from the same shard promoted to the node being filled is:
6056 : /// shard count for the tenant divided by the number of nodes in the cluster.
6057 0 : fn fill_node_plan(&self, node_id: NodeId) -> Vec<TenantShardId> {
6058 0 : let mut locked = self.inner.write().unwrap();
6059 0 : let fill_requirement = locked.scheduler.compute_fill_requirement(node_id);
6060 0 :
6061 0 : let mut tids_by_node = locked
6062 0 : .tenants
6063 0 : .iter_mut()
6064 0 : .filter_map(|(tid, tenant_shard)| {
6065 0 : if tenant_shard.intent.get_secondary().contains(&node_id) {
6066 0 : if let Some(primary) = tenant_shard.intent.get_attached() {
6067 0 : return Some((*primary, *tid));
6068 0 : }
6069 0 : }
6070 :
6071 0 : None
6072 0 : })
6073 0 : .into_group_map();
6074 0 :
6075 0 : let expected_attached = locked.scheduler.expected_attached_shard_count();
6076 0 : let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count();
6077 0 :
6078 0 : let mut promoted_per_tenant: HashMap<TenantId, usize> = HashMap::new();
6079 0 : let mut plan = Vec::new();
6080 :
6081 0 : for (node_id, attached) in nodes_by_load {
6082 0 : let available = locked
6083 0 : .nodes
6084 0 : .get(&node_id)
6085 0 : .map_or(false, |n| n.is_available());
6086 0 : if !available {
6087 0 : continue;
6088 0 : }
6089 0 :
6090 0 : if plan.len() >= fill_requirement
6091 0 : || tids_by_node.is_empty()
6092 0 : || attached <= expected_attached
6093 : {
6094 0 : break;
6095 0 : }
6096 0 :
6097 0 : let can_take = attached - expected_attached;
6098 0 : let needed = fill_requirement - plan.len();
6099 0 : let mut take = std::cmp::min(can_take, needed);
6100 0 :
6101 0 : let mut remove_node = false;
6102 0 : while take > 0 {
6103 0 : match tids_by_node.get_mut(&node_id) {
6104 0 : Some(tids) => match tids.pop() {
6105 0 : Some(tid) => {
6106 0 : let max_promote_for_tenant = std::cmp::max(
6107 0 : tid.shard_count.count() as usize / locked.nodes.len(),
6108 0 : 1,
6109 0 : );
6110 0 : let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default();
6111 0 : if *promoted < max_promote_for_tenant {
6112 0 : plan.push(tid);
6113 0 : *promoted += 1;
6114 0 : take -= 1;
6115 0 : }
6116 : }
6117 : None => {
6118 0 : remove_node = true;
6119 0 : break;
6120 : }
6121 : },
6122 : None => {
6123 0 : break;
6124 : }
6125 : }
6126 : }
6127 :
6128 0 : if remove_node {
6129 0 : tids_by_node.remove(&node_id);
6130 0 : }
6131 : }
6132 :
6133 0 : plan
6134 0 : }
6135 :
6136 : /// Fill a node by promoting its secondaries until the cluster is balanced
6137 : /// with regards to attached shard counts. Note that this operation only
6138 : /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`].
6139 : /// This is a long running operation and it should run as a separate Tokio task.
6140 0 : pub(crate) async fn fill_node(
6141 0 : &self,
6142 0 : node_id: NodeId,
6143 0 : cancel: CancellationToken,
6144 0 : ) -> Result<(), OperationError> {
6145 0 : // TODO(vlad): Currently this operates on the assumption that all
6146 0 : // secondaries are warm. This is not always true (e.g. we just migrated the
6147 0 : // tenant). Take that into consideration by checking the secondary status.
6148 0 : let mut tids_to_promote = self.fill_node_plan(node_id);
6149 0 : let mut waiters = Vec::new();
6150 :
6151 : // Execute the plan we've composed above. Before aplying each move from the plan,
6152 : // we validate to ensure that it has not gone stale in the meantime.
6153 0 : while !tids_to_promote.is_empty() {
6154 0 : if cancel.is_cancelled() {
6155 0 : match self
6156 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
6157 0 : .await
6158 : {
6159 0 : Ok(()) => return Err(OperationError::Cancelled),
6160 0 : Err(err) => {
6161 0 : return Err(OperationError::FinalizeError(
6162 0 : format!(
6163 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
6164 0 : node_id, err
6165 0 : )
6166 0 : .into(),
6167 0 : ));
6168 : }
6169 : }
6170 0 : }
6171 0 :
6172 0 : {
6173 0 : let mut locked = self.inner.write().unwrap();
6174 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
6175 :
6176 0 : let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
6177 0 : format!("node {node_id} was removed").into(),
6178 0 : ))?;
6179 :
6180 0 : let current_policy = node.get_scheduling();
6181 0 : if !matches!(current_policy, NodeSchedulingPolicy::Filling) {
6182 : // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
6183 : // about it
6184 0 : return Err(OperationError::NodeStateChanged(
6185 0 : format!("node {node_id} changed state to {current_policy:?}").into(),
6186 0 : ));
6187 0 : }
6188 :
6189 0 : while waiters.len() < MAX_RECONCILES_PER_OPERATION {
6190 0 : if let Some(tid) = tids_to_promote.pop() {
6191 0 : if let Some(tenant_shard) = tenants.get_mut(&tid) {
6192 : // If the node being filled is not a secondary anymore,
6193 : // skip the promotion.
6194 0 : if !tenant_shard.intent.get_secondary().contains(&node_id) {
6195 0 : continue;
6196 0 : }
6197 0 :
6198 0 : let previously_attached_to = *tenant_shard.intent.get_attached();
6199 0 : match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) {
6200 0 : Err(e) => {
6201 0 : tracing::warn!(
6202 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
6203 0 : "Scheduling error when filling pageserver {} : {e}", node_id
6204 : );
6205 : }
6206 : Ok(()) => {
6207 0 : tracing::info!(
6208 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
6209 0 : "Rescheduled shard while filling node {}: {:?} -> {}",
6210 : node_id,
6211 : previously_attached_to,
6212 : node_id
6213 : );
6214 :
6215 0 : if let Some(waiter) =
6216 0 : self.maybe_reconcile_shard(tenant_shard, nodes)
6217 0 : {
6218 0 : waiters.push(waiter);
6219 0 : }
6220 : }
6221 : }
6222 0 : }
6223 : } else {
6224 0 : break;
6225 : }
6226 : }
6227 : }
6228 :
6229 0 : waiters = self
6230 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
6231 0 : .await;
6232 : }
6233 :
6234 0 : while !waiters.is_empty() {
6235 0 : if cancel.is_cancelled() {
6236 0 : match self
6237 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
6238 0 : .await
6239 : {
6240 0 : Ok(()) => return Err(OperationError::Cancelled),
6241 0 : Err(err) => {
6242 0 : return Err(OperationError::FinalizeError(
6243 0 : format!(
6244 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
6245 0 : node_id, err
6246 0 : )
6247 0 : .into(),
6248 0 : ));
6249 : }
6250 : }
6251 0 : }
6252 0 :
6253 0 : tracing::info!("Awaiting {} pending fill reconciliations", waiters.len());
6254 :
6255 0 : waiters = self
6256 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
6257 0 : .await;
6258 : }
6259 :
6260 0 : if let Err(err) = self
6261 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
6262 0 : .await
6263 : {
6264 : // This isn't a huge issue since the filling process starts upon request. However, it
6265 : // will prevent the next drain from starting. The only case in which this can fail
6266 : // is database unavailability. Such a case will require manual intervention.
6267 0 : return Err(OperationError::FinalizeError(
6268 0 : format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}")
6269 0 : .into(),
6270 0 : ));
6271 0 : }
6272 0 :
6273 0 : Ok(())
6274 0 : }
6275 :
6276 : /// Updates scrubber metadata health check results.
6277 0 : pub(crate) async fn metadata_health_update(
6278 0 : &self,
6279 0 : update_req: MetadataHealthUpdateRequest,
6280 0 : ) -> Result<(), ApiError> {
6281 0 : let now = chrono::offset::Utc::now();
6282 0 : let (healthy_records, unhealthy_records) = {
6283 0 : let locked = self.inner.read().unwrap();
6284 0 : let healthy_records = update_req
6285 0 : .healthy_tenant_shards
6286 0 : .into_iter()
6287 0 : // Retain only health records associated with tenant shards managed by storage controller.
6288 0 : .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
6289 0 : .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, true, now))
6290 0 : .collect();
6291 0 : let unhealthy_records = update_req
6292 0 : .unhealthy_tenant_shards
6293 0 : .into_iter()
6294 0 : .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
6295 0 : .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, false, now))
6296 0 : .collect();
6297 0 :
6298 0 : (healthy_records, unhealthy_records)
6299 0 : };
6300 0 :
6301 0 : self.persistence
6302 0 : .update_metadata_health_records(healthy_records, unhealthy_records, now)
6303 0 : .await?;
6304 0 : Ok(())
6305 0 : }
6306 :
6307 : /// Lists the tenant shards that has unhealthy metadata status.
6308 0 : pub(crate) async fn metadata_health_list_unhealthy(
6309 0 : &self,
6310 0 : ) -> Result<Vec<TenantShardId>, ApiError> {
6311 0 : let result = self
6312 0 : .persistence
6313 0 : .list_unhealthy_metadata_health_records()
6314 0 : .await?
6315 0 : .iter()
6316 0 : .map(|p| p.get_tenant_shard_id().unwrap())
6317 0 : .collect();
6318 0 :
6319 0 : Ok(result)
6320 0 : }
6321 :
6322 : /// Lists the tenant shards that have not been scrubbed for some duration.
6323 0 : pub(crate) async fn metadata_health_list_outdated(
6324 0 : &self,
6325 0 : not_scrubbed_for: Duration,
6326 0 : ) -> Result<Vec<MetadataHealthRecord>, ApiError> {
6327 0 : let earlier = chrono::offset::Utc::now() - not_scrubbed_for;
6328 0 : let result = self
6329 0 : .persistence
6330 0 : .list_outdated_metadata_health_records(earlier)
6331 0 : .await?
6332 0 : .into_iter()
6333 0 : .map(|record| record.into())
6334 0 : .collect();
6335 0 : Ok(result)
6336 0 : }
6337 :
6338 0 : pub(crate) fn get_leadership_status(&self) -> LeadershipStatus {
6339 0 : self.inner.read().unwrap().get_leadership_status()
6340 0 : }
6341 :
6342 0 : pub(crate) async fn step_down(&self) -> GlobalObservedState {
6343 0 : tracing::info!("Received step down request from peer");
6344 0 : failpoint_support::sleep_millis_async!("sleep-on-step-down-handling");
6345 :
6346 0 : self.inner.write().unwrap().step_down();
6347 0 : // TODO: would it make sense to have a time-out for this?
6348 0 : self.stop_reconciliations(StopReconciliationsReason::SteppingDown)
6349 0 : .await;
6350 :
6351 0 : let mut global_observed = GlobalObservedState::default();
6352 0 : let locked = self.inner.read().unwrap();
6353 0 : for (tid, tenant_shard) in locked.tenants.iter() {
6354 0 : global_observed
6355 0 : .0
6356 0 : .insert(*tid, tenant_shard.observed.clone());
6357 0 : }
6358 :
6359 0 : global_observed
6360 0 : }
6361 : }
|