Line data Source code
1 : use hyper::Uri;
2 : use std::{
3 : borrow::Cow,
4 : cmp::Ordering,
5 : collections::{BTreeMap, HashMap, HashSet},
6 : error::Error,
7 : ops::Deref,
8 : path::PathBuf,
9 : str::FromStr,
10 : sync::Arc,
11 : time::{Duration, Instant},
12 : };
13 :
14 : use crate::{
15 : background_node_operations::{
16 : Drain, Fill, Operation, OperationError, OperationHandler, MAX_RECONCILES_PER_OPERATION,
17 : },
18 : compute_hook::NotifyError,
19 : drain_utils::{self, TenantShardDrain, TenantShardIterator},
20 : id_lock_map::{trace_exclusive_lock, trace_shared_lock, IdLockMap, TracingExclusiveGuard},
21 : leadership::Leadership,
22 : metrics,
23 : peer_client::GlobalObservedState,
24 : persistence::{
25 : AbortShardSplitStatus, ControllerPersistence, DatabaseResult, MetadataHealthPersistence,
26 : ShardGenerationState, TenantFilter,
27 : },
28 : reconciler::{ReconcileError, ReconcileUnits, ReconcilerConfig, ReconcilerConfigBuilder},
29 : scheduler::{AttachedShardTag, MaySchedule, ScheduleContext, ScheduleError, ScheduleMode},
30 : tenant_shard::{
31 : MigrateAttachment, ReconcileNeeded, ReconcilerStatus, ScheduleOptimization,
32 : ScheduleOptimizationAction,
33 : },
34 : };
35 : use anyhow::Context;
36 : use control_plane::storage_controller::{
37 : AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
38 : };
39 : use diesel::result::DatabaseErrorKind;
40 : use futures::{stream::FuturesUnordered, StreamExt};
41 : use itertools::Itertools;
42 : use pageserver_api::{
43 : controller_api::{
44 : MetadataHealthRecord, MetadataHealthUpdateRequest, NodeAvailability, NodeRegisterRequest,
45 : NodeSchedulingPolicy, NodeShard, NodeShardResponse, PlacementPolicy, ShardSchedulingPolicy,
46 : ShardsPreferredAzsRequest, ShardsPreferredAzsResponse, TenantCreateRequest,
47 : TenantCreateResponse, TenantCreateResponseShard, TenantDescribeResponse,
48 : TenantDescribeResponseShard, TenantLocateResponse, TenantPolicyRequest,
49 : TenantShardMigrateRequest, TenantShardMigrateResponse,
50 : },
51 : models::{
52 : SecondaryProgress, TenantConfigRequest, TimelineArchivalConfigRequest,
53 : TopTenantShardsRequest,
54 : },
55 : };
56 : use reqwest::StatusCode;
57 : use tracing::{instrument, Instrument};
58 :
59 : use crate::pageserver_client::PageserverClient;
60 : use pageserver_api::{
61 : models::{
62 : self, LocationConfig, LocationConfigListResponse, LocationConfigMode,
63 : PageserverUtilization, ShardParameters, TenantConfig, TenantLocationConfigRequest,
64 : TenantLocationConfigResponse, TenantShardLocation, TenantShardSplitRequest,
65 : TenantShardSplitResponse, TenantTimeTravelRequest, TimelineCreateRequest, TimelineInfo,
66 : },
67 : shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId},
68 : upcall_api::{
69 : ReAttachRequest, ReAttachResponse, ReAttachResponseTenant, ValidateRequest,
70 : ValidateResponse, ValidateResponseTenant,
71 : },
72 : };
73 : use pageserver_client::{mgmt_api, BlockUnblock};
74 : use tokio::sync::mpsc::error::TrySendError;
75 : use tokio_util::sync::CancellationToken;
76 : use utils::{
77 : completion::Barrier,
78 : failpoint_support,
79 : generation::Generation,
80 : http::error::ApiError,
81 : id::{NodeId, TenantId, TimelineId},
82 : sync::gate::Gate,
83 : };
84 :
85 : use crate::{
86 : compute_hook::ComputeHook,
87 : heartbeater::{Heartbeater, PageserverState},
88 : node::{AvailabilityTransition, Node},
89 : persistence::{split_state::SplitState, DatabaseError, Persistence, TenantShardPersistence},
90 : reconciler::attached_location_conf,
91 : scheduler::Scheduler,
92 : tenant_shard::{
93 : IntentState, ObservedState, ObservedStateLocation, ReconcileResult, ReconcileWaitError,
94 : ReconcilerWaiter, TenantShard,
95 : },
96 : };
97 :
98 : pub mod chaos_injector;
99 :
100 : // For operations that should be quick, like attaching a new tenant
101 : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
102 :
103 : // For operations that might be slow, like migrating a tenant with
104 : // some data in it.
105 : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
106 :
107 : // If we receive a call using Secondary mode initially, it will omit generation. We will initialize
108 : // tenant shards into this generation, and as long as it remains in this generation, we will accept
109 : // input generation from future requests as authoritative.
110 : const INITIAL_GENERATION: Generation = Generation::new(0);
111 :
112 : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
113 : /// up on unresponsive pageservers and proceed.
114 : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
115 :
116 : /// How long a node may be unresponsive to heartbeats before we declare it offline.
117 : /// This must be long enough to cover node restarts as well as normal operations: in future
118 : pub const MAX_OFFLINE_INTERVAL_DEFAULT: Duration = Duration::from_secs(30);
119 :
120 : /// How long a node may be unresponsive to heartbeats during start up before we declare it
121 : /// offline.
122 : ///
123 : /// This is much more lenient than [`MAX_OFFLINE_INTERVAL_DEFAULT`] since the pageserver's
124 : /// handling of the re-attach response may take a long time and blocks heartbeats from
125 : /// being handled on the pageserver side.
126 : pub const MAX_WARMING_UP_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
127 :
128 : /// How often to send heartbeats to registered nodes?
129 : pub const HEARTBEAT_INTERVAL_DEFAULT: Duration = Duration::from_secs(5);
130 :
131 0 : #[derive(Clone, strum_macros::Display)]
132 : enum TenantOperations {
133 : Create,
134 : LocationConfig,
135 : ConfigSet,
136 : TimeTravelRemoteStorage,
137 : Delete,
138 : UpdatePolicy,
139 : ShardSplit,
140 : SecondaryDownload,
141 : TimelineCreate,
142 : TimelineDelete,
143 : AttachHook,
144 : TimelineArchivalConfig,
145 : TimelineDetachAncestor,
146 : TimelineGcBlockUnblock,
147 : }
148 :
149 0 : #[derive(Clone, strum_macros::Display)]
150 : enum NodeOperations {
151 : Register,
152 : Configure,
153 : Delete,
154 : }
155 :
156 : /// The leadership status for the storage controller process.
157 : /// Allowed transitions are:
158 : /// 1. Leader -> SteppedDown
159 : /// 2. Candidate -> Leader
160 : #[derive(
161 : Eq,
162 : PartialEq,
163 : Copy,
164 : Clone,
165 0 : strum_macros::Display,
166 0 : strum_macros::EnumIter,
167 : measured::FixedCardinalityLabel,
168 : )]
169 : #[strum(serialize_all = "snake_case")]
170 : pub(crate) enum LeadershipStatus {
171 : /// This is the steady state where the storage controller can produce
172 : /// side effects in the cluster.
173 : Leader,
174 : /// We've been notified to step down by another candidate. No reconciliations
175 : /// take place in this state.
176 : SteppedDown,
177 : /// Initial state for a new storage controller instance. Will attempt to assume leadership.
178 : #[allow(unused)]
179 : Candidate,
180 : }
181 :
182 : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
183 :
184 : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
185 : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
186 : // than they're being pushed onto the queue.
187 : const MAX_DELAYED_RECONCILES: usize = 10000;
188 :
189 : // Top level state available to all HTTP handlers
190 : struct ServiceState {
191 : leadership_status: LeadershipStatus,
192 :
193 : tenants: BTreeMap<TenantShardId, TenantShard>,
194 :
195 : nodes: Arc<HashMap<NodeId, Node>>,
196 :
197 : scheduler: Scheduler,
198 :
199 : /// Ongoing background operation on the cluster if any is running.
200 : /// Note that only one such operation may run at any given time,
201 : /// hence the type choice.
202 : ongoing_operation: Option<OperationHandler>,
203 :
204 : /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
205 : delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
206 : }
207 :
208 : /// Transform an error from a pageserver into an error to return to callers of a storage
209 : /// controller API.
210 0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
211 0 : match e {
212 0 : mgmt_api::Error::SendRequest(e) => {
213 0 : // Presume errors sending requests are connectivity/availability issues
214 0 : ApiError::ResourceUnavailable(format!("{node} error sending request: {e}").into())
215 : }
216 0 : mgmt_api::Error::ReceiveErrorBody(str) => {
217 0 : // Presume errors receiving body are connectivity/availability issues
218 0 : ApiError::ResourceUnavailable(
219 0 : format!("{node} error receiving error body: {str}").into(),
220 0 : )
221 : }
222 0 : mgmt_api::Error::ReceiveBody(err) if err.is_decode() => {
223 0 : // Return 500 for decoding errors.
224 0 : ApiError::InternalServerError(anyhow::Error::from(err).context("error decoding body"))
225 : }
226 0 : mgmt_api::Error::ReceiveBody(err) => {
227 0 : // Presume errors receiving body are connectivity/availability issues except for decoding errors
228 0 : let src_str = err.source().map(|e| e.to_string()).unwrap_or_default();
229 0 : ApiError::ResourceUnavailable(
230 0 : format!("{node} error receiving error body: {err} {}", src_str).into(),
231 0 : )
232 : }
233 0 : mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
234 0 : ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
235 : }
236 0 : mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
237 0 : ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
238 : }
239 0 : mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
240 0 : | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
241 : // Auth errors talking to a pageserver are not auth errors for the caller: they are
242 : // internal server errors, showing that something is wrong with the pageserver or
243 : // storage controller's auth configuration.
244 0 : ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
245 : }
246 0 : mgmt_api::Error::ApiError(status, msg) => {
247 0 : // Presume general case of pageserver API errors is that we tried to do something
248 0 : // that can't be done right now.
249 0 : ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
250 : }
251 0 : mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
252 : }
253 0 : }
254 :
255 : impl ServiceState {
256 0 : fn new(
257 0 : nodes: HashMap<NodeId, Node>,
258 0 : tenants: BTreeMap<TenantShardId, TenantShard>,
259 0 : scheduler: Scheduler,
260 0 : delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
261 0 : initial_leadership_status: LeadershipStatus,
262 0 : ) -> Self {
263 0 : metrics::update_leadership_status(initial_leadership_status);
264 0 :
265 0 : Self {
266 0 : leadership_status: initial_leadership_status,
267 0 : tenants,
268 0 : nodes: Arc::new(nodes),
269 0 : scheduler,
270 0 : ongoing_operation: None,
271 0 : delayed_reconcile_rx,
272 0 : }
273 0 : }
274 :
275 0 : fn parts_mut(
276 0 : &mut self,
277 0 : ) -> (
278 0 : &mut Arc<HashMap<NodeId, Node>>,
279 0 : &mut BTreeMap<TenantShardId, TenantShard>,
280 0 : &mut Scheduler,
281 0 : ) {
282 0 : (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
283 0 : }
284 :
285 0 : fn get_leadership_status(&self) -> LeadershipStatus {
286 0 : self.leadership_status
287 0 : }
288 :
289 0 : fn step_down(&mut self) {
290 0 : self.leadership_status = LeadershipStatus::SteppedDown;
291 0 : metrics::update_leadership_status(self.leadership_status);
292 0 : }
293 :
294 0 : fn become_leader(&mut self) {
295 0 : self.leadership_status = LeadershipStatus::Leader;
296 0 : metrics::update_leadership_status(self.leadership_status);
297 0 : }
298 : }
299 :
300 : #[derive(Clone)]
301 : pub struct Config {
302 : // All pageservers managed by one instance of this service must have
303 : // the same public key. This JWT token will be used to authenticate
304 : // this service to the pageservers it manages.
305 : pub jwt_token: Option<String>,
306 :
307 : // This JWT token will be used to authenticate this service to the control plane.
308 : pub control_plane_jwt_token: Option<String>,
309 :
310 : // This JWT token will be used to authenticate with other storage controller instances
311 : pub peer_jwt_token: Option<String>,
312 :
313 : /// Where the compute hook should send notifications of pageserver attachment locations
314 : /// (this URL points to the control plane in prod). If this is None, the compute hook will
315 : /// assume it is running in a test environment and try to update neon_local.
316 : pub compute_hook_url: Option<String>,
317 :
318 : /// Grace period within which a pageserver does not respond to heartbeats, but is still
319 : /// considered active. Once the grace period elapses, the next heartbeat failure will
320 : /// mark the pagseserver offline.
321 : pub max_offline_interval: Duration,
322 :
323 : /// Extended grace period within which pageserver may not respond to heartbeats.
324 : /// This extended grace period kicks in after the node has been drained for restart
325 : /// and/or upon handling the re-attach request from a node.
326 : pub max_warming_up_interval: Duration,
327 :
328 : /// How many Reconcilers may be spawned concurrently
329 : pub reconciler_concurrency: usize,
330 :
331 : /// How large must a shard grow in bytes before we split it?
332 : /// None disables auto-splitting.
333 : pub split_threshold: Option<u64>,
334 :
335 : // TODO: make this cfg(feature = "testing")
336 : pub neon_local_repo_dir: Option<PathBuf>,
337 :
338 : // Maximum acceptable download lag for the secondary location
339 : // while draining a node. If the secondary location is lagging
340 : // by more than the configured amount, then the secondary is not
341 : // upgraded to primary.
342 : pub max_secondary_lag_bytes: Option<u64>,
343 :
344 : pub heartbeat_interval: Duration,
345 :
346 : pub address_for_peers: Option<Uri>,
347 :
348 : pub start_as_candidate: bool,
349 :
350 : pub http_service_port: i32,
351 : }
352 :
353 : impl From<DatabaseError> for ApiError {
354 0 : fn from(err: DatabaseError) -> ApiError {
355 0 : match err {
356 0 : DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
357 : // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
358 : DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
359 0 : ApiError::ShuttingDown
360 : }
361 0 : DatabaseError::Logical(reason) | DatabaseError::Migration(reason) => {
362 0 : ApiError::InternalServerError(anyhow::anyhow!(reason))
363 : }
364 : }
365 0 : }
366 : }
367 :
368 : enum InitialShardScheduleOutcome {
369 : Scheduled(TenantCreateResponseShard),
370 : NotScheduled,
371 : ShardScheduleError(ScheduleError),
372 : }
373 :
374 : pub struct Service {
375 : inner: Arc<std::sync::RwLock<ServiceState>>,
376 : config: Config,
377 : persistence: Arc<Persistence>,
378 : compute_hook: Arc<ComputeHook>,
379 : result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResultRequest>,
380 :
381 : heartbeater: Heartbeater,
382 :
383 : // Channel for background cleanup from failed operations that require cleanup, such as shard split
384 : abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
385 :
386 : // Locking on a tenant granularity (covers all shards in the tenant):
387 : // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
388 : // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
389 : tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
390 :
391 : // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
392 : // that transition it to/from Active.
393 : node_op_locks: IdLockMap<NodeId, NodeOperations>,
394 :
395 : // Limit how many Reconcilers we will spawn concurrently
396 : reconciler_concurrency: Arc<tokio::sync::Semaphore>,
397 :
398 : /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
399 : /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
400 : ///
401 : /// Note that this state logically lives inside ServiceInner, but carrying Sender here makes the code simpler
402 : /// by avoiding needing a &mut ref to something inside the ServiceInner. This could be optimized to
403 : /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
404 : delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
405 :
406 : // Process shutdown will fire this token
407 : cancel: CancellationToken,
408 :
409 : // Child token of [`Service::cancel`] used by reconcilers
410 : reconcilers_cancel: CancellationToken,
411 :
412 : // Background tasks will hold this gate
413 : gate: Gate,
414 :
415 : // Reconcilers background tasks will hold this gate
416 : reconcilers_gate: Gate,
417 :
418 : /// This waits for initial reconciliation with pageservers to complete. Until this barrier
419 : /// passes, it isn't safe to do any actions that mutate tenants.
420 : pub(crate) startup_complete: Barrier,
421 : }
422 :
423 : impl From<ReconcileWaitError> for ApiError {
424 0 : fn from(value: ReconcileWaitError) -> Self {
425 0 : match value {
426 0 : ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
427 0 : e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
428 0 : e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
429 : }
430 0 : }
431 : }
432 :
433 : impl From<OperationError> for ApiError {
434 0 : fn from(value: OperationError) -> Self {
435 0 : match value {
436 0 : OperationError::NodeStateChanged(err) | OperationError::FinalizeError(err) => {
437 0 : ApiError::InternalServerError(anyhow::anyhow!(err))
438 : }
439 0 : OperationError::Cancelled => ApiError::Conflict("Operation was cancelled".into()),
440 : }
441 0 : }
442 : }
443 :
444 : #[allow(clippy::large_enum_variant)]
445 : enum TenantCreateOrUpdate {
446 : Create(TenantCreateRequest),
447 : Update(Vec<ShardUpdate>),
448 : }
449 :
450 : struct ShardSplitParams {
451 : old_shard_count: ShardCount,
452 : new_shard_count: ShardCount,
453 : new_stripe_size: Option<ShardStripeSize>,
454 : targets: Vec<ShardSplitTarget>,
455 : policy: PlacementPolicy,
456 : config: TenantConfig,
457 : shard_ident: ShardIdentity,
458 : }
459 :
460 : // When preparing for a shard split, we may either choose to proceed with the split,
461 : // or find that the work is already done and return NoOp.
462 : enum ShardSplitAction {
463 : Split(Box<ShardSplitParams>),
464 : NoOp(TenantShardSplitResponse),
465 : }
466 :
467 : // A parent shard which will be split
468 : struct ShardSplitTarget {
469 : parent_id: TenantShardId,
470 : node: Node,
471 : child_ids: Vec<TenantShardId>,
472 : }
473 :
474 : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
475 : /// might not be available. We therefore use a queue of abort operations processed in the background.
476 : struct TenantShardSplitAbort {
477 : tenant_id: TenantId,
478 : /// The target values from the request that failed
479 : new_shard_count: ShardCount,
480 : new_stripe_size: Option<ShardStripeSize>,
481 : /// Until this abort op is complete, no other operations may be done on the tenant
482 : _tenant_lock: TracingExclusiveGuard<TenantOperations>,
483 : }
484 :
485 0 : #[derive(thiserror::Error, Debug)]
486 : enum TenantShardSplitAbortError {
487 : #[error(transparent)]
488 : Database(#[from] DatabaseError),
489 : #[error(transparent)]
490 : Remote(#[from] mgmt_api::Error),
491 : #[error("Unavailable")]
492 : Unavailable,
493 : }
494 :
495 : struct ShardUpdate {
496 : tenant_shard_id: TenantShardId,
497 : placement_policy: PlacementPolicy,
498 : tenant_config: TenantConfig,
499 :
500 : /// If this is None, generation is not updated.
501 : generation: Option<Generation>,
502 : }
503 :
504 : enum StopReconciliationsReason {
505 : ShuttingDown,
506 : SteppingDown,
507 : }
508 :
509 : impl std::fmt::Display for StopReconciliationsReason {
510 0 : fn fmt(&self, writer: &mut std::fmt::Formatter) -> std::fmt::Result {
511 0 : let s = match self {
512 0 : Self::ShuttingDown => "Shutting down",
513 0 : Self::SteppingDown => "Stepping down",
514 : };
515 0 : write!(writer, "{}", s)
516 0 : }
517 : }
518 :
519 : pub(crate) enum ReconcileResultRequest {
520 : ReconcileResult(ReconcileResult),
521 : Stop,
522 : }
523 :
524 : impl Service {
525 0 : pub fn get_config(&self) -> &Config {
526 0 : &self.config
527 0 : }
528 :
529 : /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
530 : /// view of the world, and determine which pageservers are responsive.
531 0 : #[instrument(skip_all)]
532 : async fn startup_reconcile(
533 : self: &Arc<Service>,
534 : current_leader: Option<ControllerPersistence>,
535 : leader_step_down_state: Option<GlobalObservedState>,
536 : bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
537 : Result<(), (TenantShardId, NotifyError)>,
538 : >,
539 : ) {
540 : // Startup reconciliation does I/O to other services: whether they
541 : // are responsive or not, we should aim to finish within our deadline, because:
542 : // - If we don't, a k8s readiness hook watching /ready will kill us.
543 : // - While we're waiting for startup reconciliation, we are not fully
544 : // available for end user operations like creating/deleting tenants and timelines.
545 : //
546 : // We set multiple deadlines to break up the time available between the phases of work: this is
547 : // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
548 : let start_at = Instant::now();
549 : let node_scan_deadline = start_at
550 : .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
551 : .expect("Reconcile timeout is a modest constant");
552 :
553 : let observed = if let Some(state) = leader_step_down_state {
554 : tracing::info!(
555 : "Using observed state received from leader at {}",
556 : current_leader.as_ref().unwrap().address
557 : );
558 :
559 : state
560 : } else {
561 : self.build_global_observed_state(node_scan_deadline).await
562 : };
563 :
564 : // Accumulate a list of any tenant locations that ought to be detached
565 : let mut cleanup = Vec::new();
566 :
567 : // Send initial heartbeat requests to all nodes loaded from the database
568 : let all_nodes = {
569 : let locked = self.inner.read().unwrap();
570 : locked.nodes.clone()
571 : };
572 : let mut nodes_online = self.initial_heartbeat_round(all_nodes.keys()).await;
573 :
574 : // List of tenants for which we will attempt to notify compute of their location at startup
575 : let mut compute_notifications = Vec::new();
576 :
577 : // Populate intent and observed states for all tenants, based on reported state on pageservers
578 : tracing::info!("Populating tenant shards' states from initial pageserver scan...");
579 : let shard_count = {
580 : let mut locked = self.inner.write().unwrap();
581 : let (nodes, tenants, scheduler) = locked.parts_mut();
582 :
583 : // Mark nodes online if they responded to us: nodes are offline by default after a restart.
584 : let mut new_nodes = (**nodes).clone();
585 : for (node_id, node) in new_nodes.iter_mut() {
586 : if let Some(utilization) = nodes_online.remove(node_id) {
587 : node.set_availability(NodeAvailability::Active(utilization));
588 : scheduler.node_upsert(node);
589 : }
590 : }
591 : *nodes = Arc::new(new_nodes);
592 :
593 : for (tenant_shard_id, observed_state) in observed.0 {
594 : let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
595 : for node_id in observed_state.locations.keys() {
596 : cleanup.push((tenant_shard_id, *node_id));
597 : }
598 :
599 : continue;
600 : };
601 :
602 : tenant_shard.observed = observed_state;
603 : }
604 :
605 : // Populate each tenant's intent state
606 : let mut schedule_context = ScheduleContext::default();
607 : for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
608 : if tenant_shard_id.shard_number == ShardNumber(0) {
609 : // Reset scheduling context each time we advance to the next Tenant
610 : schedule_context = ScheduleContext::default();
611 : }
612 :
613 : tenant_shard.intent_from_observed(scheduler);
614 : if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
615 : // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
616 : // not enough pageservers are available. The tenant may well still be available
617 : // to clients.
618 : tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
619 : } else {
620 : // If we're both intending and observed to be attached at a particular node, we will
621 : // emit a compute notification for this. In the case where our observed state does not
622 : // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
623 : if let Some(attached_at) = tenant_shard.stably_attached() {
624 : compute_notifications.push((
625 : *tenant_shard_id,
626 : attached_at,
627 : tenant_shard.shard.stripe_size,
628 : ));
629 : }
630 : }
631 : }
632 :
633 : tenants.len()
634 : };
635 :
636 : // Before making any obeservable changes to the cluster, persist self
637 : // as leader in database and memory.
638 : let leadership = Leadership::new(
639 : self.persistence.clone(),
640 : self.config.clone(),
641 : self.cancel.child_token(),
642 : );
643 :
644 : if let Err(e) = leadership.become_leader(current_leader).await {
645 : tracing::error!("Failed to persist self as leader: {e}. Aborting start-up ...");
646 : std::process::exit(1);
647 : }
648 :
649 : self.inner.write().unwrap().become_leader();
650 :
651 : // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
652 : // generation_pageserver in the database.
653 :
654 : // Emit compute hook notifications for all tenants which are already stably attached. Other tenants
655 : // will emit compute hook notifications when they reconcile.
656 : //
657 : // Ordering: our calls to notify_background synchronously establish a relative order for these notifications vs. any later
658 : // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
659 : // calls will be correctly ordered wrt these.
660 : //
661 : // Concurrency: we call notify_background for all tenants, which will create O(N) tokio tasks, but almost all of them
662 : // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
663 : // unit and start doing I/O.
664 : tracing::info!(
665 : "Sending {} compute notifications",
666 : compute_notifications.len()
667 : );
668 : self.compute_hook.notify_background(
669 : compute_notifications,
670 : bg_compute_notify_result_tx.clone(),
671 : &self.cancel,
672 : );
673 :
674 : // Finally, now that the service is up and running, launch reconcile operations for any tenants
675 : // which require it: under normal circumstances this should only include tenants that were in some
676 : // transient state before we restarted, or any tenants whose compute hooks failed above.
677 : tracing::info!("Checking for shards in need of reconciliation...");
678 : let reconcile_tasks = self.reconcile_all();
679 : // We will not wait for these reconciliation tasks to run here: we're now done with startup and
680 : // normal operations may proceed.
681 :
682 : // Clean up any tenants that were found on pageservers but are not known to us. Do this in the
683 : // background because it does not need to complete in order to proceed with other work.
684 : if !cleanup.is_empty() {
685 : tracing::info!("Cleaning up {} locations in the background", cleanup.len());
686 : tokio::task::spawn({
687 : let cleanup_self = self.clone();
688 0 : async move { cleanup_self.cleanup_locations(cleanup).await }
689 : });
690 : }
691 :
692 : tracing::info!("Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)");
693 : }
694 :
695 0 : async fn initial_heartbeat_round<'a>(
696 0 : &self,
697 0 : node_ids: impl Iterator<Item = &'a NodeId>,
698 0 : ) -> HashMap<NodeId, PageserverUtilization> {
699 0 : assert!(!self.startup_complete.is_ready());
700 :
701 0 : let all_nodes = {
702 0 : let locked = self.inner.read().unwrap();
703 0 : locked.nodes.clone()
704 0 : };
705 0 :
706 0 : let mut nodes_to_heartbeat = HashMap::new();
707 0 : for node_id in node_ids {
708 0 : match all_nodes.get(node_id) {
709 0 : Some(node) => {
710 0 : nodes_to_heartbeat.insert(*node_id, node.clone());
711 0 : }
712 : None => {
713 0 : tracing::warn!("Node {node_id} was removed during start-up");
714 : }
715 : }
716 : }
717 :
718 0 : tracing::info!("Sending initial heartbeats...");
719 0 : let res = self
720 0 : .heartbeater
721 0 : .heartbeat(Arc::new(nodes_to_heartbeat))
722 0 : .await;
723 :
724 0 : let mut online_nodes = HashMap::new();
725 0 : if let Ok(deltas) = res {
726 0 : for (node_id, status) in deltas.0 {
727 0 : match status {
728 0 : PageserverState::Available { utilization, .. } => {
729 0 : online_nodes.insert(node_id, utilization);
730 0 : }
731 0 : PageserverState::Offline => {}
732 : PageserverState::WarmingUp { .. } => {
733 0 : unreachable!("Nodes are never marked warming-up during startup reconcile")
734 : }
735 : }
736 : }
737 0 : }
738 :
739 0 : online_nodes
740 0 : }
741 :
742 : /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
743 : ///
744 : /// The result includes only nodes which responded within the deadline
745 0 : async fn scan_node_locations(
746 0 : &self,
747 0 : deadline: Instant,
748 0 : ) -> HashMap<NodeId, LocationConfigListResponse> {
749 0 : let nodes = {
750 0 : let locked = self.inner.read().unwrap();
751 0 : locked.nodes.clone()
752 0 : };
753 0 :
754 0 : let mut node_results = HashMap::new();
755 0 :
756 0 : let mut node_list_futs = FuturesUnordered::new();
757 0 :
758 0 : tracing::info!("Scanning shards on {} nodes...", nodes.len());
759 0 : for node in nodes.values() {
760 0 : node_list_futs.push({
761 0 : async move {
762 0 : tracing::info!("Scanning shards on node {node}...");
763 0 : let timeout = Duration::from_secs(1);
764 0 : let response = node
765 0 : .with_client_retries(
766 0 : |client| async move { client.list_location_config().await },
767 0 : &self.config.jwt_token,
768 0 : 1,
769 0 : 5,
770 0 : timeout,
771 0 : &self.cancel,
772 0 : )
773 0 : .await;
774 0 : (node.get_id(), response)
775 0 : }
776 0 : });
777 0 : }
778 :
779 : loop {
780 0 : let (node_id, result) = tokio::select! {
781 0 : next = node_list_futs.next() => {
782 0 : match next {
783 0 : Some(result) => result,
784 : None =>{
785 : // We got results for all our nodes
786 0 : break;
787 : }
788 :
789 : }
790 : },
791 0 : _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
792 : // Give up waiting for anyone who hasn't responded: we will yield the results that we have
793 0 : tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
794 0 : break;
795 : }
796 : };
797 :
798 0 : let Some(list_response) = result else {
799 0 : tracing::info!("Shutdown during startup_reconcile");
800 0 : break;
801 : };
802 :
803 0 : match list_response {
804 0 : Err(e) => {
805 0 : tracing::warn!("Could not scan node {} ({e})", node_id);
806 : }
807 0 : Ok(listing) => {
808 0 : node_results.insert(node_id, listing);
809 0 : }
810 : }
811 : }
812 :
813 0 : node_results
814 0 : }
815 :
816 0 : async fn build_global_observed_state(&self, deadline: Instant) -> GlobalObservedState {
817 0 : let node_listings = self.scan_node_locations(deadline).await;
818 0 : let mut observed = GlobalObservedState::default();
819 :
820 0 : for (node_id, location_confs) in node_listings {
821 0 : tracing::info!(
822 0 : "Received {} shard statuses from pageserver {}",
823 0 : location_confs.tenant_shards.len(),
824 : node_id
825 : );
826 :
827 0 : for (tid, location_conf) in location_confs.tenant_shards {
828 0 : let entry = observed.0.entry(tid).or_default();
829 0 : entry.locations.insert(
830 0 : node_id,
831 0 : ObservedStateLocation {
832 0 : conf: location_conf,
833 0 : },
834 0 : );
835 0 : }
836 : }
837 :
838 0 : observed
839 0 : }
840 :
841 : /// Used during [`Self::startup_reconcile`]: detach a list of unknown-to-us tenants from pageservers.
842 : ///
843 : /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
844 : /// tenants, then it is probably something incompletely deleted before: we will not fight with any
845 : /// other task trying to attach it.
846 0 : #[instrument(skip_all)]
847 : async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
848 : let nodes = self.inner.read().unwrap().nodes.clone();
849 :
850 : for (tenant_shard_id, node_id) in cleanup {
851 : // A node reported a tenant_shard_id which is unknown to us: detach it.
852 : let Some(node) = nodes.get(&node_id) else {
853 : // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
854 : // a location to clean up on a node that has since been removed.
855 : tracing::info!(
856 : "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
857 : );
858 : continue;
859 : };
860 :
861 : if self.cancel.is_cancelled() {
862 : break;
863 : }
864 :
865 : let client = PageserverClient::new(
866 : node.get_id(),
867 : node.base_url(),
868 : self.config.jwt_token.as_deref(),
869 : );
870 : match client
871 : .location_config(
872 : tenant_shard_id,
873 : LocationConfig {
874 : mode: LocationConfigMode::Detached,
875 : generation: None,
876 : secondary_conf: None,
877 : shard_number: tenant_shard_id.shard_number.0,
878 : shard_count: tenant_shard_id.shard_count.literal(),
879 : shard_stripe_size: 0,
880 : tenant_conf: models::TenantConfig::default(),
881 : },
882 : None,
883 : false,
884 : )
885 : .await
886 : {
887 : Ok(()) => {
888 : tracing::info!(
889 : "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
890 : );
891 : }
892 : Err(e) => {
893 : // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
894 : // break anything.
895 : tracing::error!(
896 : "Failed to detach unknkown shard {tenant_shard_id} on pageserver {node_id}: {e}"
897 : );
898 : }
899 : }
900 : }
901 : }
902 :
903 : /// Long running background task that periodically wakes up and looks for shards that need
904 : /// reconciliation. Reconciliation is fallible, so any reconciliation tasks that fail during
905 : /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
906 : /// for those retries.
907 0 : #[instrument(skip_all)]
908 : async fn background_reconcile(self: &Arc<Self>) {
909 : self.startup_complete.clone().wait().await;
910 :
911 : const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
912 :
913 : let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
914 : while !self.reconcilers_cancel.is_cancelled() {
915 : tokio::select! {
916 : _ = interval.tick() => {
917 : let reconciles_spawned = self.reconcile_all();
918 : if reconciles_spawned == 0 {
919 : // Run optimizer only when we didn't find any other work to do
920 : let optimizations = self.optimize_all().await;
921 : if optimizations == 0 {
922 : // Run new splits only when no optimizations are pending
923 : self.autosplit_tenants().await;
924 : }
925 : }
926 : }
927 : _ = self.reconcilers_cancel.cancelled() => return
928 : }
929 : }
930 : }
931 0 : #[instrument(skip_all)]
932 : async fn spawn_heartbeat_driver(&self) {
933 : self.startup_complete.clone().wait().await;
934 :
935 : let mut interval = tokio::time::interval(self.config.heartbeat_interval);
936 : while !self.cancel.is_cancelled() {
937 : tokio::select! {
938 : _ = interval.tick() => { }
939 : _ = self.cancel.cancelled() => return
940 : };
941 :
942 : let nodes = {
943 : let locked = self.inner.read().unwrap();
944 : locked.nodes.clone()
945 : };
946 :
947 : let res = self.heartbeater.heartbeat(nodes).await;
948 : if let Ok(deltas) = res {
949 : for (node_id, state) in deltas.0 {
950 : let new_availability = match state {
951 : PageserverState::Available { utilization, .. } => {
952 : NodeAvailability::Active(utilization)
953 : }
954 : PageserverState::WarmingUp { started_at } => {
955 : NodeAvailability::WarmingUp(started_at)
956 : }
957 : PageserverState::Offline => {
958 : // The node might have been placed in the WarmingUp state
959 : // while the heartbeat round was on-going. Hence, filter out
960 : // offline transitions for WarmingUp nodes that are still within
961 : // their grace period.
962 : if let Ok(NodeAvailability::WarmingUp(started_at)) = self
963 : .get_node(node_id)
964 : .await
965 : .as_ref()
966 0 : .map(|n| n.get_availability())
967 : {
968 : let now = Instant::now();
969 : if now - *started_at >= self.config.max_warming_up_interval {
970 : NodeAvailability::Offline
971 : } else {
972 : NodeAvailability::WarmingUp(*started_at)
973 : }
974 : } else {
975 : NodeAvailability::Offline
976 : }
977 : }
978 : };
979 :
980 : // This is the code path for geniune availability transitions (i.e node
981 : // goes unavailable and/or comes back online).
982 : let res = self
983 : .node_configure(node_id, Some(new_availability), None)
984 : .await;
985 :
986 : match res {
987 : Ok(()) => {}
988 : Err(ApiError::NotFound(_)) => {
989 : // This should be rare, but legitimate since the heartbeats are done
990 : // on a snapshot of the nodes.
991 : tracing::info!("Node {} was not found after heartbeat round", node_id);
992 : }
993 : Err(err) => {
994 : // Transition to active involves reconciling: if a node responds to a heartbeat then
995 : // becomes unavailable again, we may get an error here.
996 : tracing::error!(
997 : "Failed to update node {} after heartbeat round: {}",
998 : node_id,
999 : err
1000 : );
1001 : }
1002 : }
1003 : }
1004 : }
1005 : }
1006 : }
1007 :
1008 : /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
1009 : /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
1010 : /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
1011 : /// will indicate that reconciliation is not needed.
1012 0 : #[instrument(skip_all, fields(
1013 : tenant_id=%result.tenant_shard_id.tenant_id, shard_id=%result.tenant_shard_id.shard_slug(),
1014 : sequence=%result.sequence
1015 0 : ))]
1016 : fn process_result(&self, mut result: ReconcileResult) {
1017 : let mut locked = self.inner.write().unwrap();
1018 : let (nodes, tenants, _scheduler) = locked.parts_mut();
1019 : let Some(tenant) = tenants.get_mut(&result.tenant_shard_id) else {
1020 : // A reconciliation result might race with removing a tenant: drop results for
1021 : // tenants that aren't in our map.
1022 : return;
1023 : };
1024 :
1025 : // Usually generation should only be updated via this path, so the max() isn't
1026 : // needed, but it is used to handle out-of-band updates via. e.g. test hook.
1027 : tenant.generation = std::cmp::max(tenant.generation, result.generation);
1028 :
1029 : // If the reconciler signals that it failed to notify compute, set this state on
1030 : // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
1031 : tenant.pending_compute_notification = result.pending_compute_notification;
1032 :
1033 : // Let the TenantShard know it is idle.
1034 : tenant.reconcile_complete(result.sequence);
1035 :
1036 : // In case a node was deleted while this reconcile is in flight, filter it out of the update we will
1037 : // make to the tenant
1038 : result
1039 : .observed
1040 : .locations
1041 0 : .retain(|node_id, _loc| nodes.contains_key(node_id));
1042 :
1043 : match result.result {
1044 : Ok(()) => {
1045 : for (node_id, loc) in &result.observed.locations {
1046 : if let Some(conf) = &loc.conf {
1047 : tracing::info!("Updating observed location {}: {:?}", node_id, conf);
1048 : } else {
1049 : tracing::info!("Setting observed location {} to None", node_id,)
1050 : }
1051 : }
1052 :
1053 : tenant.observed = result.observed;
1054 : tenant.waiter.advance(result.sequence);
1055 : }
1056 : Err(e) => {
1057 : match e {
1058 : ReconcileError::Cancel => {
1059 : tracing::info!("Reconciler was cancelled");
1060 : }
1061 : ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
1062 : // This might be due to the reconciler getting cancelled, or it might
1063 : // be due to the `Node` being marked offline.
1064 : tracing::info!("Reconciler cancelled during pageserver API call");
1065 : }
1066 : _ => {
1067 : tracing::warn!("Reconcile error: {}", e);
1068 : }
1069 : }
1070 :
1071 : // Ordering: populate last_error before advancing error_seq,
1072 : // so that waiters will see the correct error after waiting.
1073 : tenant.set_last_error(result.sequence, e);
1074 :
1075 : for (node_id, o) in result.observed.locations {
1076 : tenant.observed.locations.insert(node_id, o);
1077 : }
1078 : }
1079 : }
1080 :
1081 : // Maybe some other work can proceed now that this job finished.
1082 : if self.reconciler_concurrency.available_permits() > 0 {
1083 : while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
1084 : let (nodes, tenants, _scheduler) = locked.parts_mut();
1085 : if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
1086 : shard.delayed_reconcile = false;
1087 : self.maybe_reconcile_shard(shard, nodes);
1088 : }
1089 :
1090 : if self.reconciler_concurrency.available_permits() == 0 {
1091 : break;
1092 : }
1093 : }
1094 : }
1095 : }
1096 :
1097 0 : async fn process_results(
1098 0 : &self,
1099 0 : mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResultRequest>,
1100 0 : mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
1101 0 : Result<(), (TenantShardId, NotifyError)>,
1102 0 : >,
1103 0 : ) {
1104 : loop {
1105 : // Wait for the next result, or for cancellation
1106 0 : tokio::select! {
1107 0 : r = result_rx.recv() => {
1108 0 : match r {
1109 0 : Some(ReconcileResultRequest::ReconcileResult(result)) => {self.process_result(result);},
1110 0 : None | Some(ReconcileResultRequest::Stop) => {break;}
1111 : }
1112 : }
1113 0 : _ = async{
1114 0 : match bg_compute_hook_result_rx.recv().await {
1115 0 : Some(result) => {
1116 0 : if let Err((tenant_shard_id, notify_error)) = result {
1117 0 : tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
1118 0 : let mut locked = self.inner.write().unwrap();
1119 0 : if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
1120 0 : shard.pending_compute_notification = true;
1121 0 : }
1122 :
1123 0 : }
1124 : },
1125 : None => {
1126 : // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
1127 0 : self.cancel.cancelled().await;
1128 : }
1129 : }
1130 0 : } => {},
1131 0 : _ = self.cancel.cancelled() => {
1132 0 : break;
1133 : }
1134 : };
1135 : }
1136 0 : }
1137 :
1138 0 : async fn process_aborts(
1139 0 : &self,
1140 0 : mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
1141 0 : ) {
1142 : loop {
1143 : // Wait for the next result, or for cancellation
1144 0 : let op = tokio::select! {
1145 0 : r = abort_rx.recv() => {
1146 0 : match r {
1147 0 : Some(op) => {op},
1148 0 : None => {break;}
1149 : }
1150 : }
1151 0 : _ = self.cancel.cancelled() => {
1152 0 : break;
1153 : }
1154 : };
1155 :
1156 : // Retry until shutdown: we must keep this request object alive until it is properly
1157 : // processed, as it holds a lock guard that prevents other operations trying to do things
1158 : // to the tenant while it is in a weird part-split state.
1159 0 : while !self.cancel.is_cancelled() {
1160 0 : match self.abort_tenant_shard_split(&op).await {
1161 0 : Ok(_) => break,
1162 0 : Err(e) => {
1163 0 : tracing::warn!(
1164 0 : "Failed to abort shard split on {}, will retry: {e}",
1165 : op.tenant_id
1166 : );
1167 :
1168 : // If a node is unavailable, we hope that it has been properly marked Offline
1169 : // when we retry, so that the abort op will succeed. If the abort op is failing
1170 : // for some other reason, we will keep retrying forever, or until a human notices
1171 : // and does something about it (either fixing a pageserver or restarting the controller).
1172 0 : tokio::time::timeout(Duration::from_secs(5), self.cancel.cancelled())
1173 0 : .await
1174 0 : .ok();
1175 : }
1176 : }
1177 : }
1178 : }
1179 0 : }
1180 :
1181 0 : pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
1182 0 : let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
1183 0 : let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
1184 0 :
1185 0 : let leadership_cancel = CancellationToken::new();
1186 0 : let leadership = Leadership::new(persistence.clone(), config.clone(), leadership_cancel);
1187 0 : let (leader, leader_step_down_state) = leadership.step_down_current_leader().await?;
1188 :
1189 : // Apply the migrations **after** the current leader has stepped down
1190 : // (or we've given up waiting for it), but **before** reading from the
1191 : // database. The only exception is reading the current leader before
1192 : // migrating.
1193 0 : persistence.migration_run().await?;
1194 :
1195 0 : tracing::info!("Loading nodes from database...");
1196 0 : let nodes = persistence
1197 0 : .list_nodes()
1198 0 : .await?
1199 0 : .into_iter()
1200 0 : .map(Node::from_persistent)
1201 0 : .collect::<Vec<_>>();
1202 0 : let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
1203 0 : tracing::info!("Loaded {} nodes from database.", nodes.len());
1204 :
1205 0 : tracing::info!("Loading shards from database...");
1206 0 : let mut tenant_shard_persistence = persistence.list_tenant_shards().await?;
1207 0 : tracing::info!(
1208 0 : "Loaded {} shards from database.",
1209 0 : tenant_shard_persistence.len()
1210 : );
1211 :
1212 : // If any shard splits were in progress, reset the database state to abort them
1213 0 : let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
1214 0 : HashMap::new();
1215 0 : for tsp in &mut tenant_shard_persistence {
1216 0 : let shard = tsp.get_shard_identity()?;
1217 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
1218 0 : let entry = tenant_shard_count_min_max
1219 0 : .entry(tenant_shard_id.tenant_id)
1220 0 : .or_insert_with(|| (shard.count, shard.count));
1221 0 : entry.0 = std::cmp::min(entry.0, shard.count);
1222 0 : entry.1 = std::cmp::max(entry.1, shard.count);
1223 0 : }
1224 :
1225 0 : for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
1226 0 : if count_min != count_max {
1227 : // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
1228 : // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
1229 : // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
1230 0 : tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
1231 0 : let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
1232 :
1233 : // We may never see the Complete status here: if the split was complete, we wouldn't have
1234 : // identified this tenant has having mismatching min/max counts.
1235 0 : assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
1236 :
1237 : // Clear the splitting status in-memory, to reflect that we just aborted in the database
1238 0 : tenant_shard_persistence.iter_mut().for_each(|tsp| {
1239 0 : // Set idle split state on those shards that we will retain.
1240 0 : let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
1241 0 : if tsp_tenant_id == tenant_id
1242 0 : && tsp.get_shard_identity().unwrap().count == count_min
1243 0 : {
1244 0 : tsp.splitting = SplitState::Idle;
1245 0 : } else if tsp_tenant_id == tenant_id {
1246 : // Leave the splitting state on the child shards: this will be used next to
1247 : // drop them.
1248 0 : tracing::info!(
1249 0 : "Shard {tsp_tenant_id} will be dropped after shard split abort",
1250 : );
1251 0 : }
1252 0 : });
1253 0 :
1254 0 : // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
1255 0 : tenant_shard_persistence.retain(|tsp| {
1256 0 : TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
1257 0 : || tsp.splitting == SplitState::Idle
1258 0 : });
1259 0 : }
1260 : }
1261 :
1262 0 : let mut tenants = BTreeMap::new();
1263 0 :
1264 0 : let mut scheduler = Scheduler::new(nodes.values());
1265 0 :
1266 0 : #[cfg(feature = "testing")]
1267 0 : {
1268 0 : // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
1269 0 : // tests only store the shards, not the nodes. The nodes will be loaded shortly
1270 0 : // after when pageservers start up and register.
1271 0 : let mut node_ids = HashSet::new();
1272 0 : for tsp in &tenant_shard_persistence {
1273 0 : if let Some(node_id) = tsp.generation_pageserver {
1274 0 : node_ids.insert(node_id);
1275 0 : }
1276 : }
1277 0 : for node_id in node_ids {
1278 0 : tracing::info!("Creating node {} in scheduler for tests", node_id);
1279 0 : let node = Node::new(
1280 0 : NodeId(node_id as u64),
1281 0 : "".to_string(),
1282 0 : 123,
1283 0 : "".to_string(),
1284 0 : 123,
1285 0 : "test_az".to_string(),
1286 0 : );
1287 0 :
1288 0 : scheduler.node_upsert(&node);
1289 : }
1290 : }
1291 0 : for tsp in tenant_shard_persistence {
1292 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
1293 :
1294 : // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
1295 : // it with what we can infer: the node for which a generation was most recently issued.
1296 0 : let mut intent = IntentState::new();
1297 0 : if let Some(generation_pageserver) = tsp.generation_pageserver.map(|n| NodeId(n as u64))
1298 : {
1299 0 : if nodes.contains_key(&generation_pageserver) {
1300 0 : intent.set_attached(&mut scheduler, Some(generation_pageserver));
1301 0 : } else {
1302 : // If a node was removed before being completely drained, it is legal for it to leave behind a `generation_pageserver` referring
1303 : // to a non-existent node, because node deletion doesn't block on completing the reconciliations that will issue new generations
1304 : // on different pageservers.
1305 0 : tracing::warn!("Tenant shard {tenant_shard_id} references non-existent node {generation_pageserver} in database, will be rescheduled");
1306 : }
1307 0 : }
1308 0 : let new_tenant = TenantShard::from_persistent(tsp, intent)?;
1309 :
1310 0 : tenants.insert(tenant_shard_id, new_tenant);
1311 : }
1312 :
1313 0 : let (startup_completion, startup_complete) = utils::completion::channel();
1314 0 :
1315 0 : // This channel is continuously consumed by process_results, so doesn't need to be very large.
1316 0 : let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
1317 0 : tokio::sync::mpsc::channel(512);
1318 0 :
1319 0 : let (delayed_reconcile_tx, delayed_reconcile_rx) =
1320 0 : tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
1321 0 :
1322 0 : let cancel = CancellationToken::new();
1323 0 : let reconcilers_cancel = cancel.child_token();
1324 0 :
1325 0 : let heartbeater = Heartbeater::new(
1326 0 : config.jwt_token.clone(),
1327 0 : config.max_offline_interval,
1328 0 : config.max_warming_up_interval,
1329 0 : cancel.clone(),
1330 0 : );
1331 :
1332 0 : let initial_leadership_status = if config.start_as_candidate {
1333 0 : LeadershipStatus::Candidate
1334 : } else {
1335 0 : LeadershipStatus::Leader
1336 : };
1337 :
1338 0 : let this = Arc::new(Self {
1339 0 : inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
1340 0 : nodes,
1341 0 : tenants,
1342 0 : scheduler,
1343 0 : delayed_reconcile_rx,
1344 0 : initial_leadership_status,
1345 0 : ))),
1346 0 : config: config.clone(),
1347 0 : persistence,
1348 0 : compute_hook: Arc::new(ComputeHook::new(config.clone())),
1349 0 : result_tx,
1350 0 : heartbeater,
1351 0 : reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
1352 0 : config.reconciler_concurrency,
1353 0 : )),
1354 0 : delayed_reconcile_tx,
1355 0 : abort_tx,
1356 0 : startup_complete: startup_complete.clone(),
1357 0 : cancel,
1358 0 : reconcilers_cancel,
1359 0 : gate: Gate::default(),
1360 0 : reconcilers_gate: Gate::default(),
1361 0 : tenant_op_locks: Default::default(),
1362 0 : node_op_locks: Default::default(),
1363 0 : });
1364 0 :
1365 0 : let result_task_this = this.clone();
1366 0 : tokio::task::spawn(async move {
1367 : // Block shutdown until we're done (we must respect self.cancel)
1368 0 : if let Ok(_gate) = result_task_this.gate.enter() {
1369 0 : result_task_this
1370 0 : .process_results(result_rx, bg_compute_notify_result_rx)
1371 0 : .await
1372 0 : }
1373 0 : });
1374 0 :
1375 0 : tokio::task::spawn({
1376 0 : let this = this.clone();
1377 0 : async move {
1378 : // Block shutdown until we're done (we must respect self.cancel)
1379 0 : if let Ok(_gate) = this.gate.enter() {
1380 0 : this.process_aborts(abort_rx).await
1381 0 : }
1382 0 : }
1383 0 : });
1384 0 :
1385 0 : tokio::task::spawn({
1386 0 : let this = this.clone();
1387 0 : async move {
1388 0 : if let Ok(_gate) = this.gate.enter() {
1389 : loop {
1390 0 : tokio::select! {
1391 0 : _ = this.cancel.cancelled() => {
1392 0 : break;
1393 : },
1394 0 : _ = tokio::time::sleep(Duration::from_secs(60)) => {}
1395 0 : };
1396 0 : this.tenant_op_locks.housekeeping();
1397 : }
1398 0 : }
1399 0 : }
1400 0 : });
1401 0 :
1402 0 : tokio::task::spawn({
1403 0 : let this = this.clone();
1404 0 : // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
1405 0 : // is done.
1406 0 : let startup_completion = startup_completion.clone();
1407 0 : async move {
1408 : // Block shutdown until we're done (we must respect self.cancel)
1409 0 : let Ok(_gate) = this.gate.enter() else {
1410 0 : return;
1411 : };
1412 :
1413 0 : this.startup_reconcile(leader, leader_step_down_state, bg_compute_notify_result_tx)
1414 0 : .await;
1415 :
1416 0 : drop(startup_completion);
1417 0 : }
1418 0 : });
1419 0 :
1420 0 : tokio::task::spawn({
1421 0 : let this = this.clone();
1422 0 : let startup_complete = startup_complete.clone();
1423 0 : async move {
1424 0 : startup_complete.wait().await;
1425 0 : this.background_reconcile().await;
1426 0 : }
1427 0 : });
1428 0 :
1429 0 : tokio::task::spawn({
1430 0 : let this = this.clone();
1431 0 : let startup_complete = startup_complete.clone();
1432 0 : async move {
1433 0 : startup_complete.wait().await;
1434 0 : this.spawn_heartbeat_driver().await;
1435 0 : }
1436 0 : });
1437 0 :
1438 0 : Ok(this)
1439 0 : }
1440 :
1441 0 : pub(crate) async fn attach_hook(
1442 0 : &self,
1443 0 : attach_req: AttachHookRequest,
1444 0 : ) -> anyhow::Result<AttachHookResponse> {
1445 0 : let _tenant_lock = trace_exclusive_lock(
1446 0 : &self.tenant_op_locks,
1447 0 : attach_req.tenant_shard_id.tenant_id,
1448 0 : TenantOperations::AttachHook,
1449 0 : )
1450 0 : .await;
1451 :
1452 : // This is a test hook. To enable using it on tenants that were created directly with
1453 : // the pageserver API (not via this service), we will auto-create any missing tenant
1454 : // shards with default state.
1455 0 : let insert = {
1456 0 : let locked = self.inner.write().unwrap();
1457 0 : !locked.tenants.contains_key(&attach_req.tenant_shard_id)
1458 0 : };
1459 0 :
1460 0 : if insert {
1461 0 : let tsp = TenantShardPersistence {
1462 0 : tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
1463 0 : shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
1464 0 : shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
1465 0 : shard_stripe_size: 0,
1466 0 : generation: attach_req.generation_override.or(Some(0)),
1467 0 : generation_pageserver: None,
1468 0 : placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
1469 0 : config: serde_json::to_string(&TenantConfig::default()).unwrap(),
1470 0 : splitting: SplitState::default(),
1471 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
1472 0 : .unwrap(),
1473 0 : preferred_az_id: None,
1474 0 : };
1475 0 :
1476 0 : match self.persistence.insert_tenant_shards(vec![tsp]).await {
1477 0 : Err(e) => match e {
1478 : DatabaseError::Query(diesel::result::Error::DatabaseError(
1479 : DatabaseErrorKind::UniqueViolation,
1480 : _,
1481 : )) => {
1482 0 : tracing::info!(
1483 0 : "Raced with another request to insert tenant {}",
1484 : attach_req.tenant_shard_id
1485 : )
1486 : }
1487 0 : _ => return Err(e.into()),
1488 : },
1489 : Ok(()) => {
1490 0 : tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
1491 :
1492 0 : let mut locked = self.inner.write().unwrap();
1493 0 : locked.tenants.insert(
1494 0 : attach_req.tenant_shard_id,
1495 0 : TenantShard::new(
1496 0 : attach_req.tenant_shard_id,
1497 0 : ShardIdentity::unsharded(),
1498 0 : PlacementPolicy::Attached(0),
1499 0 : ),
1500 0 : );
1501 0 : tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
1502 : }
1503 : }
1504 0 : }
1505 :
1506 0 : let new_generation = if let Some(req_node_id) = attach_req.node_id {
1507 0 : let maybe_tenant_conf = {
1508 0 : let locked = self.inner.write().unwrap();
1509 0 : locked
1510 0 : .tenants
1511 0 : .get(&attach_req.tenant_shard_id)
1512 0 : .map(|t| t.config.clone())
1513 0 : };
1514 0 :
1515 0 : match maybe_tenant_conf {
1516 0 : Some(conf) => {
1517 0 : let new_generation = self
1518 0 : .persistence
1519 0 : .increment_generation(attach_req.tenant_shard_id, req_node_id)
1520 0 : .await?;
1521 :
1522 : // Persist the placement policy update. This is required
1523 : // when we reattaching a detached tenant.
1524 0 : self.persistence
1525 0 : .update_tenant_shard(
1526 0 : TenantFilter::Shard(attach_req.tenant_shard_id),
1527 0 : Some(PlacementPolicy::Attached(0)),
1528 0 : Some(conf),
1529 0 : None,
1530 0 : None,
1531 0 : )
1532 0 : .await?;
1533 0 : Some(new_generation)
1534 : }
1535 : None => {
1536 0 : anyhow::bail!("Attach hook handling raced with tenant removal")
1537 : }
1538 : }
1539 : } else {
1540 0 : self.persistence.detach(attach_req.tenant_shard_id).await?;
1541 0 : None
1542 : };
1543 :
1544 0 : let mut locked = self.inner.write().unwrap();
1545 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
1546 0 :
1547 0 : let tenant_shard = tenants
1548 0 : .get_mut(&attach_req.tenant_shard_id)
1549 0 : .expect("Checked for existence above");
1550 :
1551 0 : if let Some(new_generation) = new_generation {
1552 0 : tenant_shard.generation = Some(new_generation);
1553 0 : tenant_shard.policy = PlacementPolicy::Attached(0);
1554 0 : } else {
1555 : // This is a detach notification. We must update placement policy to avoid re-attaching
1556 : // during background scheduling/reconciliation, or during storage controller restart.
1557 0 : assert!(attach_req.node_id.is_none());
1558 0 : tenant_shard.policy = PlacementPolicy::Detached;
1559 : }
1560 :
1561 0 : if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
1562 0 : tracing::info!(
1563 : tenant_id = %attach_req.tenant_shard_id,
1564 : ps_id = %attaching_pageserver,
1565 : generation = ?tenant_shard.generation,
1566 0 : "issuing",
1567 : );
1568 0 : } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
1569 0 : tracing::info!(
1570 : tenant_id = %attach_req.tenant_shard_id,
1571 : %ps_id,
1572 : generation = ?tenant_shard.generation,
1573 0 : "dropping",
1574 : );
1575 : } else {
1576 0 : tracing::info!(
1577 : tenant_id = %attach_req.tenant_shard_id,
1578 0 : "no-op: tenant already has no pageserver");
1579 : }
1580 0 : tenant_shard
1581 0 : .intent
1582 0 : .set_attached(scheduler, attach_req.node_id);
1583 0 :
1584 0 : tracing::info!(
1585 0 : "attach_hook: tenant {} set generation {:?}, pageserver {}",
1586 0 : attach_req.tenant_shard_id,
1587 0 : tenant_shard.generation,
1588 0 : // TODO: this is an odd number of 0xf's
1589 0 : attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff))
1590 : );
1591 :
1592 : // Trick the reconciler into not doing anything for this tenant: this helps
1593 : // tests that manually configure a tenant on the pagesrever, and then call this
1594 : // attach hook: they don't want background reconciliation to modify what they
1595 : // did to the pageserver.
1596 : #[cfg(feature = "testing")]
1597 : {
1598 0 : if let Some(node_id) = attach_req.node_id {
1599 0 : tenant_shard.observed.locations = HashMap::from([(
1600 0 : node_id,
1601 0 : ObservedStateLocation {
1602 0 : conf: Some(attached_location_conf(
1603 0 : tenant_shard.generation.unwrap(),
1604 0 : &tenant_shard.shard,
1605 0 : &tenant_shard.config,
1606 0 : &PlacementPolicy::Attached(0),
1607 0 : )),
1608 0 : },
1609 0 : )]);
1610 0 : } else {
1611 0 : tenant_shard.observed.locations.clear();
1612 0 : }
1613 : }
1614 :
1615 0 : Ok(AttachHookResponse {
1616 0 : gen: attach_req
1617 0 : .node_id
1618 0 : .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
1619 0 : })
1620 0 : }
1621 :
1622 0 : pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
1623 0 : let locked = self.inner.read().unwrap();
1624 0 :
1625 0 : let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
1626 0 :
1627 0 : InspectResponse {
1628 0 : attachment: tenant_shard.and_then(|s| {
1629 0 : s.intent
1630 0 : .get_attached()
1631 0 : .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
1632 0 : }),
1633 0 : }
1634 0 : }
1635 :
1636 : // When the availability state of a node transitions to active, we must do a full reconciliation
1637 : // of LocationConfigs on that node. This is because while a node was offline:
1638 : // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
1639 : // - aborting a tenant shard split might have left rogue child shards behind on this node.
1640 : //
1641 : // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
1642 : // Reconcilers might communicate with the node, and these must not overlap with the work we do in
1643 : // this function.
1644 : //
1645 : // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
1646 : // for written for a single node rather than as a batch job for all nodes.
1647 0 : #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
1648 : async fn node_activate_reconcile(
1649 : &self,
1650 : mut node: Node,
1651 : _lock: &TracingExclusiveGuard<NodeOperations>,
1652 : ) -> Result<(), ApiError> {
1653 : // This Node is a mutable local copy: we will set it active so that we can use its
1654 : // API client to reconcile with the node. The Node in [`Self::nodes`] will get updated
1655 : // later.
1656 : node.set_availability(NodeAvailability::Active(PageserverUtilization::full()));
1657 :
1658 : let configs = match node
1659 : .with_client_retries(
1660 0 : |client| async move { client.list_location_config().await },
1661 : &self.config.jwt_token,
1662 : 1,
1663 : 5,
1664 : SHORT_RECONCILE_TIMEOUT,
1665 : &self.cancel,
1666 : )
1667 : .await
1668 : {
1669 : None => {
1670 : // We're shutting down (the Node's cancellation token can't have fired, because
1671 : // we're the only scope that has a reference to it, and we didn't fire it).
1672 : return Err(ApiError::ShuttingDown);
1673 : }
1674 : Some(Err(e)) => {
1675 : // This node didn't succeed listing its locations: it may not proceed to active state
1676 : // as it is apparently unavailable.
1677 : return Err(ApiError::PreconditionFailed(
1678 : format!("Failed to query node location configs, cannot activate ({e})").into(),
1679 : ));
1680 : }
1681 : Some(Ok(configs)) => configs,
1682 : };
1683 : tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
1684 :
1685 : let mut cleanup = Vec::new();
1686 : {
1687 : let mut locked = self.inner.write().unwrap();
1688 :
1689 : for (tenant_shard_id, observed_loc) in configs.tenant_shards {
1690 : let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
1691 : cleanup.push(tenant_shard_id);
1692 : continue;
1693 : };
1694 : tenant_shard
1695 : .observed
1696 : .locations
1697 : .insert(node.get_id(), ObservedStateLocation { conf: observed_loc });
1698 : }
1699 : }
1700 :
1701 : for tenant_shard_id in cleanup {
1702 : tracing::info!("Detaching {tenant_shard_id}");
1703 : match node
1704 : .with_client_retries(
1705 0 : |client| async move {
1706 0 : let config = LocationConfig {
1707 0 : mode: LocationConfigMode::Detached,
1708 0 : generation: None,
1709 0 : secondary_conf: None,
1710 0 : shard_number: tenant_shard_id.shard_number.0,
1711 0 : shard_count: tenant_shard_id.shard_count.literal(),
1712 0 : shard_stripe_size: 0,
1713 0 : tenant_conf: models::TenantConfig::default(),
1714 0 : };
1715 0 : client
1716 0 : .location_config(tenant_shard_id, config, None, false)
1717 0 : .await
1718 0 : },
1719 : &self.config.jwt_token,
1720 : 1,
1721 : 5,
1722 : SHORT_RECONCILE_TIMEOUT,
1723 : &self.cancel,
1724 : )
1725 : .await
1726 : {
1727 : None => {
1728 : // We're shutting down (the Node's cancellation token can't have fired, because
1729 : // we're the only scope that has a reference to it, and we didn't fire it).
1730 : return Err(ApiError::ShuttingDown);
1731 : }
1732 : Some(Err(e)) => {
1733 : // Do not let the node proceed to Active state if it is not responsive to requests
1734 : // to detach. This could happen if e.g. a shutdown bug in the pageserver is preventing
1735 : // detach completing: we should not let this node back into the set of nodes considered
1736 : // okay for scheduling.
1737 : return Err(ApiError::Conflict(format!(
1738 : "Node {node} failed to detach {tenant_shard_id}: {e}"
1739 : )));
1740 : }
1741 : Some(Ok(_)) => {}
1742 : };
1743 : }
1744 :
1745 : Ok(())
1746 : }
1747 :
1748 0 : pub(crate) async fn re_attach(
1749 0 : &self,
1750 0 : reattach_req: ReAttachRequest,
1751 0 : ) -> Result<ReAttachResponse, ApiError> {
1752 0 : if let Some(register_req) = reattach_req.register {
1753 0 : self.node_register(register_req).await?;
1754 0 : }
1755 :
1756 : // Ordering: we must persist generation number updates before making them visible in the in-memory state
1757 0 : let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
1758 :
1759 0 : tracing::info!(
1760 : node_id=%reattach_req.node_id,
1761 0 : "Incremented {} tenant shards' generations",
1762 0 : incremented_generations.len()
1763 : );
1764 :
1765 : // Apply the updated generation to our in-memory state, and
1766 : // gather discover secondary locations.
1767 0 : let mut locked = self.inner.write().unwrap();
1768 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
1769 0 :
1770 0 : let mut response = ReAttachResponse {
1771 0 : tenants: Vec::new(),
1772 0 : };
1773 :
1774 : // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
1775 : // to call location_conf API with an old generation. Wait for cancellation to complete
1776 : // before responding to this request. Requires well implemented CancellationToken logic
1777 : // all the way to where we call location_conf. Even then, there can still be a location_conf
1778 : // request in flight over the network: TODO handle that by making location_conf API refuse
1779 : // to go backward in generations.
1780 :
1781 : // Scan through all shards, applying updates for ones where we updated generation
1782 : // and identifying shards that intend to have a secondary location on this node.
1783 0 : for (tenant_shard_id, shard) in tenants {
1784 0 : if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
1785 0 : let new_gen = *new_gen;
1786 0 : response.tenants.push(ReAttachResponseTenant {
1787 0 : id: *tenant_shard_id,
1788 0 : gen: Some(new_gen.into().unwrap()),
1789 0 : // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
1790 0 : // execution. If a pageserver is restarted during that process, then the reconcile pass will
1791 0 : // fail, and start from scratch, so it doesn't make sense for us to try and preserve
1792 0 : // the stale/multi states at this point.
1793 0 : mode: LocationConfigMode::AttachedSingle,
1794 0 : });
1795 0 :
1796 0 : shard.generation = std::cmp::max(shard.generation, Some(new_gen));
1797 0 : if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
1798 : // Why can we update `observed` even though we're not sure our response will be received
1799 : // by the pageserver? Because the pageserver will not proceed with startup until
1800 : // it has processed response: if it loses it, we'll see another request and increment
1801 : // generation again, avoiding any uncertainty about dirtiness of tenant's state.
1802 0 : if let Some(conf) = observed.conf.as_mut() {
1803 0 : conf.generation = new_gen.into();
1804 0 : }
1805 0 : } else {
1806 0 : // This node has no observed state for the shard: perhaps it was offline
1807 0 : // when the pageserver restarted. Insert a None, so that the Reconciler
1808 0 : // will be prompted to learn the location's state before it makes changes.
1809 0 : shard
1810 0 : .observed
1811 0 : .locations
1812 0 : .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
1813 0 : }
1814 0 : } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
1815 0 : // Ordering: pageserver will not accept /location_config requests until it has
1816 0 : // finished processing the response from re-attach. So we can update our in-memory state
1817 0 : // now, and be confident that we are not stamping on the result of some later location config.
1818 0 : // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
1819 0 : // so we might update observed state here, and then get over-written by some racing
1820 0 : // ReconcileResult. The impact is low however, since we have set state on pageserver something
1821 0 : // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
1822 0 :
1823 0 : response.tenants.push(ReAttachResponseTenant {
1824 0 : id: *tenant_shard_id,
1825 0 : gen: None,
1826 0 : mode: LocationConfigMode::Secondary,
1827 0 : });
1828 0 :
1829 0 : // We must not update observed, because we have no guarantee that our
1830 0 : // response will be received by the pageserver. This could leave it
1831 0 : // falsely dirty, but the resulting reconcile should be idempotent.
1832 0 : }
1833 : }
1834 :
1835 : // We consider a node Active once we have composed a re-attach response, but we
1836 : // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
1837 : // implicitly synchronizes the LocationConfigs on the node.
1838 : //
1839 : // Setting a node active unblocks any Reconcilers that might write to the location config API,
1840 : // but those requests will not be accepted by the node until it has finished processing
1841 : // the re-attach response.
1842 : //
1843 : // Additionally, reset the nodes scheduling policy to match the conditional update done
1844 : // in [`Persistence::re_attach`].
1845 0 : if let Some(node) = nodes.get(&reattach_req.node_id) {
1846 0 : let reset_scheduling = matches!(
1847 0 : node.get_scheduling(),
1848 : NodeSchedulingPolicy::PauseForRestart
1849 : | NodeSchedulingPolicy::Draining
1850 : | NodeSchedulingPolicy::Filling
1851 : );
1852 :
1853 0 : let mut new_nodes = (**nodes).clone();
1854 0 : if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
1855 0 : if reset_scheduling {
1856 0 : node.set_scheduling(NodeSchedulingPolicy::Active);
1857 0 : }
1858 :
1859 0 : tracing::info!("Marking {} warming-up on reattach", reattach_req.node_id);
1860 0 : node.set_availability(NodeAvailability::WarmingUp(std::time::Instant::now()));
1861 0 :
1862 0 : scheduler.node_upsert(node);
1863 0 : let new_nodes = Arc::new(new_nodes);
1864 0 : *nodes = new_nodes;
1865 : } else {
1866 0 : tracing::error!(
1867 0 : "Reattaching node {} was removed while processing the request",
1868 : reattach_req.node_id
1869 : );
1870 : }
1871 0 : }
1872 :
1873 0 : Ok(response)
1874 0 : }
1875 :
1876 0 : pub(crate) async fn validate(
1877 0 : &self,
1878 0 : validate_req: ValidateRequest,
1879 0 : ) -> Result<ValidateResponse, DatabaseError> {
1880 : // Fast in-memory check: we may reject validation on anything that doesn't match our
1881 : // in-memory generation for a shard
1882 0 : let in_memory_result = {
1883 0 : let mut in_memory_result = Vec::new();
1884 0 : let locked = self.inner.read().unwrap();
1885 0 : for req_tenant in validate_req.tenants {
1886 0 : if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
1887 0 : let valid = tenant_shard.generation == Some(Generation::new(req_tenant.gen));
1888 0 : tracing::info!(
1889 0 : "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
1890 : req_tenant.id,
1891 : req_tenant.gen,
1892 : tenant_shard.generation
1893 : );
1894 :
1895 0 : in_memory_result.push((req_tenant.id, Generation::new(req_tenant.gen), valid));
1896 : } else {
1897 : // This is legal: for example during a shard split the pageserver may still
1898 : // have deletions in its queue from the old pre-split shard, or after deletion
1899 : // of a tenant that was busy with compaction/gc while being deleted.
1900 0 : tracing::info!(
1901 0 : "Refusing deletion validation for missing shard {}",
1902 : req_tenant.id
1903 : );
1904 : }
1905 : }
1906 :
1907 0 : in_memory_result
1908 : };
1909 :
1910 : // Database calls to confirm validity for anything that passed the in-memory check. We must do this
1911 : // in case of controller split-brain, where some other controller process might have incremented the generation.
1912 0 : let db_generations = self
1913 0 : .persistence
1914 0 : .shard_generations(in_memory_result.iter().filter_map(|i| {
1915 0 : if i.2 {
1916 0 : Some(&i.0)
1917 : } else {
1918 0 : None
1919 : }
1920 0 : }))
1921 0 : .await?;
1922 0 : let db_generations = db_generations.into_iter().collect::<HashMap<_, _>>();
1923 0 :
1924 0 : let mut response = ValidateResponse {
1925 0 : tenants: Vec::new(),
1926 0 : };
1927 0 : for (tenant_shard_id, validate_generation, valid) in in_memory_result.into_iter() {
1928 0 : let valid = if valid {
1929 0 : let db_generation = db_generations.get(&tenant_shard_id);
1930 0 : db_generation == Some(&Some(validate_generation))
1931 : } else {
1932 : // If in-memory state says it's invalid, trust that. It's always safe to fail a validation, at worst
1933 : // this prevents a pageserver from cleaning up an object in S3.
1934 0 : false
1935 : };
1936 :
1937 0 : response.tenants.push(ValidateResponseTenant {
1938 0 : id: tenant_shard_id,
1939 0 : valid,
1940 0 : })
1941 : }
1942 :
1943 0 : Ok(response)
1944 0 : }
1945 :
1946 0 : pub(crate) async fn tenant_create(
1947 0 : &self,
1948 0 : create_req: TenantCreateRequest,
1949 0 : ) -> Result<TenantCreateResponse, ApiError> {
1950 0 : let tenant_id = create_req.new_tenant_id.tenant_id;
1951 :
1952 : // Exclude any concurrent attempts to create/access the same tenant ID
1953 0 : let _tenant_lock = trace_exclusive_lock(
1954 0 : &self.tenant_op_locks,
1955 0 : create_req.new_tenant_id.tenant_id,
1956 0 : TenantOperations::Create,
1957 0 : )
1958 0 : .await;
1959 0 : let (response, waiters) = self.do_tenant_create(create_req).await?;
1960 :
1961 0 : if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
1962 : // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
1963 : // accept compute notifications while it is in the process of creating. Reconciliation will
1964 : // be retried in the background.
1965 0 : tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
1966 0 : }
1967 0 : Ok(response)
1968 0 : }
1969 :
1970 0 : pub(crate) async fn do_tenant_create(
1971 0 : &self,
1972 0 : create_req: TenantCreateRequest,
1973 0 : ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
1974 0 : let placement_policy = create_req
1975 0 : .placement_policy
1976 0 : .clone()
1977 0 : // As a default, zero secondaries is convenient for tests that don't choose a policy.
1978 0 : .unwrap_or(PlacementPolicy::Attached(0));
1979 :
1980 : // This service expects to handle sharding itself: it is an error to try and directly create
1981 : // a particular shard here.
1982 0 : let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
1983 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
1984 0 : "Attempted to create a specific shard, this API is for creating the whole tenant"
1985 0 : )));
1986 : } else {
1987 0 : create_req.new_tenant_id.tenant_id
1988 0 : };
1989 0 :
1990 0 : tracing::info!(
1991 0 : "Creating tenant {}, shard_count={:?}",
1992 : create_req.new_tenant_id,
1993 : create_req.shard_parameters.count,
1994 : );
1995 :
1996 0 : let create_ids = (0..create_req.shard_parameters.count.count())
1997 0 : .map(|i| TenantShardId {
1998 0 : tenant_id,
1999 0 : shard_number: ShardNumber(i),
2000 0 : shard_count: create_req.shard_parameters.count,
2001 0 : })
2002 0 : .collect::<Vec<_>>();
2003 :
2004 : // If the caller specifies a None generation, it means "start from default". This is different
2005 : // to [`Self::tenant_location_config`], where a None generation is used to represent
2006 : // an incompletely-onboarded tenant.
2007 0 : let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
2008 0 : tracing::info!(
2009 0 : "tenant_create: secondary mode, generation is_some={}",
2010 0 : create_req.generation.is_some()
2011 : );
2012 0 : create_req.generation.map(Generation::new)
2013 : } else {
2014 0 : tracing::info!(
2015 0 : "tenant_create: not secondary mode, generation is_some={}",
2016 0 : create_req.generation.is_some()
2017 : );
2018 0 : Some(
2019 0 : create_req
2020 0 : .generation
2021 0 : .map(Generation::new)
2022 0 : .unwrap_or(INITIAL_GENERATION),
2023 0 : )
2024 : };
2025 :
2026 : // Ordering: we persist tenant shards before creating them on the pageserver. This enables a caller
2027 : // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
2028 : // during the creation, rather than risking leaving orphan objects in S3.
2029 0 : let persist_tenant_shards = create_ids
2030 0 : .iter()
2031 0 : .map(|tenant_shard_id| TenantShardPersistence {
2032 0 : tenant_id: tenant_shard_id.tenant_id.to_string(),
2033 0 : shard_number: tenant_shard_id.shard_number.0 as i32,
2034 0 : shard_count: tenant_shard_id.shard_count.literal() as i32,
2035 0 : shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
2036 0 : generation: initial_generation.map(|g| g.into().unwrap() as i32),
2037 0 : // The pageserver is not known until scheduling happens: we will set this column when
2038 0 : // incrementing the generation the first time we attach to a pageserver.
2039 0 : generation_pageserver: None,
2040 0 : placement_policy: serde_json::to_string(&placement_policy).unwrap(),
2041 0 : config: serde_json::to_string(&create_req.config).unwrap(),
2042 0 : splitting: SplitState::default(),
2043 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
2044 0 : .unwrap(),
2045 0 : preferred_az_id: None,
2046 0 : })
2047 0 : .collect();
2048 0 :
2049 0 : match self
2050 0 : .persistence
2051 0 : .insert_tenant_shards(persist_tenant_shards)
2052 0 : .await
2053 : {
2054 0 : Ok(_) => {}
2055 : Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
2056 : DatabaseErrorKind::UniqueViolation,
2057 : _,
2058 : ))) => {
2059 : // Unique key violation: this is probably a retry. Because the shard count is part of the unique key,
2060 : // if we see a unique key violation it means that the creation request's shard count matches the previous
2061 : // creation's shard count.
2062 0 : tracing::info!("Tenant shards already present in database, proceeding with idempotent creation...");
2063 : }
2064 : // Any other database error is unexpected and a bug.
2065 0 : Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
2066 : };
2067 :
2068 0 : let mut schedule_context = ScheduleContext::default();
2069 0 : let mut schedule_error = None;
2070 0 : let mut response_shards = Vec::new();
2071 0 : for tenant_shard_id in create_ids {
2072 0 : tracing::info!("Creating shard {tenant_shard_id}...");
2073 :
2074 0 : let outcome = self
2075 0 : .do_initial_shard_scheduling(
2076 0 : tenant_shard_id,
2077 0 : initial_generation,
2078 0 : &create_req.shard_parameters,
2079 0 : create_req.config.clone(),
2080 0 : placement_policy.clone(),
2081 0 : &mut schedule_context,
2082 0 : )
2083 0 : .await;
2084 :
2085 0 : match outcome {
2086 0 : InitialShardScheduleOutcome::Scheduled(resp) => response_shards.push(resp),
2087 0 : InitialShardScheduleOutcome::NotScheduled => {}
2088 0 : InitialShardScheduleOutcome::ShardScheduleError(err) => {
2089 0 : schedule_error = Some(err);
2090 0 : }
2091 : }
2092 : }
2093 :
2094 0 : let preferred_azs = {
2095 0 : let locked = self.inner.read().unwrap();
2096 0 : response_shards
2097 0 : .iter()
2098 0 : .filter_map(|resp| {
2099 0 : let az_id = locked
2100 0 : .nodes
2101 0 : .get(&resp.node_id)
2102 0 : .map(|n| n.get_availability_zone_id().to_string())?;
2103 :
2104 0 : Some((resp.shard_id, az_id))
2105 0 : })
2106 0 : .collect::<Vec<_>>()
2107 : };
2108 :
2109 : // Note that we persist the preferred AZ for the new shards separately.
2110 : // In theory, we could "peek" the scheduler to determine where the shard will
2111 : // land, but the subsequent "real" call into the scheduler might select a different
2112 : // node. Hence, we do this awkward update to keep things consistent.
2113 0 : let updated = self
2114 0 : .persistence
2115 0 : .set_tenant_shard_preferred_azs(preferred_azs)
2116 0 : .await
2117 0 : .map_err(|err| {
2118 0 : ApiError::InternalServerError(anyhow::anyhow!(
2119 0 : "Failed to persist preferred az ids: {err}"
2120 0 : ))
2121 0 : })?;
2122 :
2123 : {
2124 0 : let mut locked = self.inner.write().unwrap();
2125 0 : for (tid, az_id) in updated {
2126 0 : if let Some(shard) = locked.tenants.get_mut(&tid) {
2127 0 : shard.set_preferred_az(az_id);
2128 0 : }
2129 : }
2130 : }
2131 :
2132 : // If we failed to schedule shards, then they are still created in the controller,
2133 : // but we return an error to the requester to avoid a silent failure when someone
2134 : // tries to e.g. create a tenant whose placement policy requires more nodes than
2135 : // are present in the system. We do this here rather than in the above loop, to
2136 : // avoid situations where we only create a subset of shards in the tenant.
2137 0 : if let Some(e) = schedule_error {
2138 0 : return Err(ApiError::Conflict(format!(
2139 0 : "Failed to schedule shard(s): {e}"
2140 0 : )));
2141 0 : }
2142 0 :
2143 0 : let waiters = {
2144 0 : let mut locked = self.inner.write().unwrap();
2145 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
2146 0 : tenants
2147 0 : .range_mut(TenantShardId::tenant_range(tenant_id))
2148 0 : .filter_map(|(_shard_id, shard)| self.maybe_reconcile_shard(shard, nodes))
2149 0 : .collect::<Vec<_>>()
2150 0 : };
2151 0 :
2152 0 : Ok((
2153 0 : TenantCreateResponse {
2154 0 : shards: response_shards,
2155 0 : },
2156 0 : waiters,
2157 0 : ))
2158 0 : }
2159 :
2160 : /// Helper for tenant creation that does the scheduling for an individual shard. Covers both the
2161 : /// case of a new tenant and a pre-existing one.
2162 0 : async fn do_initial_shard_scheduling(
2163 0 : &self,
2164 0 : tenant_shard_id: TenantShardId,
2165 0 : initial_generation: Option<Generation>,
2166 0 : shard_params: &ShardParameters,
2167 0 : config: TenantConfig,
2168 0 : placement_policy: PlacementPolicy,
2169 0 : schedule_context: &mut ScheduleContext,
2170 0 : ) -> InitialShardScheduleOutcome {
2171 0 : let mut locked = self.inner.write().unwrap();
2172 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
2173 :
2174 : use std::collections::btree_map::Entry;
2175 0 : match tenants.entry(tenant_shard_id) {
2176 0 : Entry::Occupied(mut entry) => {
2177 0 : tracing::info!("Tenant shard {tenant_shard_id} already exists while creating");
2178 :
2179 : // TODO: schedule() should take an anti-affinity expression that pushes
2180 : // attached and secondary locations (independently) away frorm those
2181 : // pageservers also holding a shard for this tenant.
2182 :
2183 0 : if let Err(err) = entry.get_mut().schedule(scheduler, schedule_context) {
2184 0 : return InitialShardScheduleOutcome::ShardScheduleError(err);
2185 0 : }
2186 :
2187 0 : if let Some(node_id) = entry.get().intent.get_attached() {
2188 0 : let generation = entry
2189 0 : .get()
2190 0 : .generation
2191 0 : .expect("Generation is set when in attached mode");
2192 0 : InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
2193 0 : shard_id: tenant_shard_id,
2194 0 : node_id: *node_id,
2195 0 : generation: generation.into().unwrap(),
2196 0 : })
2197 : } else {
2198 0 : InitialShardScheduleOutcome::NotScheduled
2199 : }
2200 : }
2201 0 : Entry::Vacant(entry) => {
2202 0 : let state = entry.insert(TenantShard::new(
2203 0 : tenant_shard_id,
2204 0 : ShardIdentity::from_params(tenant_shard_id.shard_number, shard_params),
2205 0 : placement_policy,
2206 0 : ));
2207 0 :
2208 0 : state.generation = initial_generation;
2209 0 : state.config = config;
2210 0 : if let Err(e) = state.schedule(scheduler, schedule_context) {
2211 0 : return InitialShardScheduleOutcome::ShardScheduleError(e);
2212 0 : }
2213 :
2214 : // Only include shards in result if we are attaching: the purpose
2215 : // of the response is to tell the caller where the shards are attached.
2216 0 : if let Some(node_id) = state.intent.get_attached() {
2217 0 : let generation = state
2218 0 : .generation
2219 0 : .expect("Generation is set when in attached mode");
2220 0 : InitialShardScheduleOutcome::Scheduled(TenantCreateResponseShard {
2221 0 : shard_id: tenant_shard_id,
2222 0 : node_id: *node_id,
2223 0 : generation: generation.into().unwrap(),
2224 0 : })
2225 : } else {
2226 0 : InitialShardScheduleOutcome::NotScheduled
2227 : }
2228 : }
2229 : }
2230 0 : }
2231 :
2232 : /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
2233 : /// wait for reconciliation to complete before responding.
2234 0 : async fn await_waiters(
2235 0 : &self,
2236 0 : waiters: Vec<ReconcilerWaiter>,
2237 0 : timeout: Duration,
2238 0 : ) -> Result<(), ReconcileWaitError> {
2239 0 : let deadline = Instant::now().checked_add(timeout).unwrap();
2240 0 : for waiter in waiters {
2241 0 : let timeout = deadline.duration_since(Instant::now());
2242 0 : waiter.wait_timeout(timeout).await?;
2243 : }
2244 :
2245 0 : Ok(())
2246 0 : }
2247 :
2248 : /// Same as [`Service::await_waiters`], but returns the waiters which are still
2249 : /// in progress
2250 0 : async fn await_waiters_remainder(
2251 0 : &self,
2252 0 : waiters: Vec<ReconcilerWaiter>,
2253 0 : timeout: Duration,
2254 0 : ) -> Vec<ReconcilerWaiter> {
2255 0 : let deadline = Instant::now().checked_add(timeout).unwrap();
2256 0 : for waiter in waiters.iter() {
2257 0 : let timeout = deadline.duration_since(Instant::now());
2258 0 : let _ = waiter.wait_timeout(timeout).await;
2259 : }
2260 :
2261 0 : waiters
2262 0 : .into_iter()
2263 0 : .filter(|waiter| matches!(waiter.get_status(), ReconcilerStatus::InProgress))
2264 0 : .collect::<Vec<_>>()
2265 0 : }
2266 :
2267 : /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
2268 : /// and transform it into either a tenant creation of a series of shard updates.
2269 : ///
2270 : /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
2271 : /// still be returned.
2272 0 : fn tenant_location_config_prepare(
2273 0 : &self,
2274 0 : tenant_id: TenantId,
2275 0 : req: TenantLocationConfigRequest,
2276 0 : ) -> TenantCreateOrUpdate {
2277 0 : let mut updates = Vec::new();
2278 0 : let mut locked = self.inner.write().unwrap();
2279 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
2280 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
2281 :
2282 : // Use location config mode as an indicator of policy.
2283 0 : let placement_policy = match req.config.mode {
2284 0 : LocationConfigMode::Detached => PlacementPolicy::Detached,
2285 0 : LocationConfigMode::Secondary => PlacementPolicy::Secondary,
2286 : LocationConfigMode::AttachedMulti
2287 : | LocationConfigMode::AttachedSingle
2288 : | LocationConfigMode::AttachedStale => {
2289 0 : if nodes.len() > 1 {
2290 0 : PlacementPolicy::Attached(1)
2291 : } else {
2292 : // Convenience for dev/test: if we just have one pageserver, import
2293 : // tenants into non-HA mode so that scheduling will succeed.
2294 0 : PlacementPolicy::Attached(0)
2295 : }
2296 : }
2297 : };
2298 :
2299 0 : let mut create = true;
2300 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2301 : // Saw an existing shard: this is not a creation
2302 0 : create = false;
2303 :
2304 : // Shards may have initially been created by a Secondary request, where we
2305 : // would have left generation as None.
2306 : //
2307 : // We only update generation the first time we see an attached-mode request,
2308 : // and if there is no existing generation set. The caller is responsible for
2309 : // ensuring that no non-storage-controller pageserver ever uses a higher
2310 : // generation than they passed in here.
2311 : use LocationConfigMode::*;
2312 0 : let set_generation = match req.config.mode {
2313 0 : AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
2314 0 : req.config.generation.map(Generation::new)
2315 : }
2316 0 : _ => None,
2317 : };
2318 :
2319 0 : updates.push(ShardUpdate {
2320 0 : tenant_shard_id: *shard_id,
2321 0 : placement_policy: placement_policy.clone(),
2322 0 : tenant_config: req.config.tenant_conf.clone(),
2323 0 : generation: set_generation,
2324 0 : });
2325 : }
2326 :
2327 0 : if create {
2328 : use LocationConfigMode::*;
2329 0 : let generation = match req.config.mode {
2330 0 : AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
2331 : // If a caller provided a generation in a non-attached request, ignore it
2332 : // and leave our generation as None: this enables a subsequent update to set
2333 : // the generation when setting an attached mode for the first time.
2334 0 : _ => None,
2335 : };
2336 :
2337 0 : TenantCreateOrUpdate::Create(
2338 0 : // Synthesize a creation request
2339 0 : TenantCreateRequest {
2340 0 : new_tenant_id: tenant_shard_id,
2341 0 : generation,
2342 0 : shard_parameters: ShardParameters {
2343 0 : count: tenant_shard_id.shard_count,
2344 0 : // We only import un-sharded or single-sharded tenants, so stripe
2345 0 : // size can be made up arbitrarily here.
2346 0 : stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE,
2347 0 : },
2348 0 : placement_policy: Some(placement_policy),
2349 0 : config: req.config.tenant_conf,
2350 0 : },
2351 0 : )
2352 : } else {
2353 0 : assert!(!updates.is_empty());
2354 0 : TenantCreateOrUpdate::Update(updates)
2355 : }
2356 0 : }
2357 :
2358 : /// This API is used by the cloud control plane to migrate unsharded tenants that it created
2359 : /// directly with pageservers into this service.
2360 : ///
2361 : /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
2362 : /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
2363 : /// Think of the first attempt to call this API as a transfer of absolute authority over the
2364 : /// tenant's source of generation numbers.
2365 : ///
2366 : /// The mode in this request coarse-grained control of tenants:
2367 : /// - Call with mode Attached* to upsert the tenant.
2368 : /// - Call with mode Secondary to either onboard a tenant without attaching it, or
2369 : /// to set an existing tenant to PolicyMode::Secondary
2370 : /// - Call with mode Detached to switch to PolicyMode::Detached
2371 0 : pub(crate) async fn tenant_location_config(
2372 0 : &self,
2373 0 : tenant_shard_id: TenantShardId,
2374 0 : req: TenantLocationConfigRequest,
2375 0 : ) -> Result<TenantLocationConfigResponse, ApiError> {
2376 : // We require an exclusive lock, because we are updating both persistent and in-memory state
2377 0 : let _tenant_lock = trace_exclusive_lock(
2378 0 : &self.tenant_op_locks,
2379 0 : tenant_shard_id.tenant_id,
2380 0 : TenantOperations::LocationConfig,
2381 0 : )
2382 0 : .await;
2383 :
2384 0 : if !tenant_shard_id.is_unsharded() {
2385 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
2386 0 : "This API is for importing single-sharded or unsharded tenants"
2387 0 : )));
2388 0 : }
2389 0 :
2390 0 : // First check if this is a creation or an update
2391 0 : let create_or_update = self.tenant_location_config_prepare(tenant_shard_id.tenant_id, req);
2392 0 :
2393 0 : let mut result = TenantLocationConfigResponse {
2394 0 : shards: Vec::new(),
2395 0 : stripe_size: None,
2396 0 : };
2397 0 : let waiters = match create_or_update {
2398 0 : TenantCreateOrUpdate::Create(create_req) => {
2399 0 : let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
2400 0 : result.shards = create_resp
2401 0 : .shards
2402 0 : .into_iter()
2403 0 : .map(|s| TenantShardLocation {
2404 0 : node_id: s.node_id,
2405 0 : shard_id: s.shard_id,
2406 0 : })
2407 0 : .collect();
2408 0 : waiters
2409 : }
2410 0 : TenantCreateOrUpdate::Update(updates) => {
2411 0 : // Persist updates
2412 0 : // Ordering: write to the database before applying changes in-memory, so that
2413 0 : // we will not appear time-travel backwards on a restart.
2414 0 : let mut schedule_context = ScheduleContext::default();
2415 : for ShardUpdate {
2416 0 : tenant_shard_id,
2417 0 : placement_policy,
2418 0 : tenant_config,
2419 0 : generation,
2420 0 : } in &updates
2421 : {
2422 0 : self.persistence
2423 0 : .update_tenant_shard(
2424 0 : TenantFilter::Shard(*tenant_shard_id),
2425 0 : Some(placement_policy.clone()),
2426 0 : Some(tenant_config.clone()),
2427 0 : *generation,
2428 0 : None,
2429 0 : )
2430 0 : .await?;
2431 : }
2432 :
2433 : // Apply updates in-memory
2434 0 : let mut waiters = Vec::new();
2435 0 : {
2436 0 : let mut locked = self.inner.write().unwrap();
2437 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2438 :
2439 : for ShardUpdate {
2440 0 : tenant_shard_id,
2441 0 : placement_policy,
2442 0 : tenant_config,
2443 0 : generation: update_generation,
2444 0 : } in updates
2445 : {
2446 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
2447 0 : tracing::warn!("Shard {tenant_shard_id} removed while updating");
2448 0 : continue;
2449 : };
2450 :
2451 : // Update stripe size
2452 0 : if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
2453 0 : result.stripe_size = Some(shard.shard.stripe_size);
2454 0 : }
2455 :
2456 0 : shard.policy = placement_policy;
2457 0 : shard.config = tenant_config;
2458 0 : if let Some(generation) = update_generation {
2459 0 : shard.generation = Some(generation);
2460 0 : }
2461 :
2462 0 : shard.schedule(scheduler, &mut schedule_context)?;
2463 :
2464 0 : let maybe_waiter = self.maybe_reconcile_shard(shard, nodes);
2465 0 : if let Some(waiter) = maybe_waiter {
2466 0 : waiters.push(waiter);
2467 0 : }
2468 :
2469 0 : if let Some(node_id) = shard.intent.get_attached() {
2470 0 : result.shards.push(TenantShardLocation {
2471 0 : shard_id: tenant_shard_id,
2472 0 : node_id: *node_id,
2473 0 : })
2474 0 : }
2475 : }
2476 : }
2477 0 : waiters
2478 : }
2479 : };
2480 :
2481 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
2482 : // Do not treat a reconcile error as fatal: we have already applied any requested
2483 : // Intent changes, and the reconcile can fail for external reasons like unavailable
2484 : // compute notification API. In these cases, it is important that we do not
2485 : // cause the cloud control plane to retry forever on this API.
2486 0 : tracing::warn!(
2487 0 : "Failed to reconcile after /location_config: {e}, returning success anyway"
2488 : );
2489 0 : }
2490 :
2491 : // Logging the full result is useful because it lets us cross-check what the cloud control
2492 : // plane's tenant_shards table should contain.
2493 0 : tracing::info!("Complete, returning {result:?}");
2494 :
2495 0 : Ok(result)
2496 0 : }
2497 :
2498 0 : pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
2499 : // We require an exclusive lock, because we are updating persistent and in-memory state
2500 0 : let _tenant_lock = trace_exclusive_lock(
2501 0 : &self.tenant_op_locks,
2502 0 : req.tenant_id,
2503 0 : TenantOperations::ConfigSet,
2504 0 : )
2505 0 : .await;
2506 :
2507 0 : let tenant_id = req.tenant_id;
2508 0 : let config = req.config;
2509 0 :
2510 0 : self.persistence
2511 0 : .update_tenant_shard(
2512 0 : TenantFilter::Tenant(req.tenant_id),
2513 0 : None,
2514 0 : Some(config.clone()),
2515 0 : None,
2516 0 : None,
2517 0 : )
2518 0 : .await?;
2519 :
2520 0 : let waiters = {
2521 0 : let mut waiters = Vec::new();
2522 0 : let mut locked = self.inner.write().unwrap();
2523 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
2524 0 : for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2525 0 : shard.config = config.clone();
2526 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
2527 0 : waiters.push(waiter);
2528 0 : }
2529 : }
2530 0 : waiters
2531 : };
2532 :
2533 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
2534 : // Treat this as success because we have stored the configuration. If e.g.
2535 : // a node was unavailable at this time, it should not stop us accepting a
2536 : // configuration change.
2537 0 : tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
2538 0 : }
2539 :
2540 0 : Ok(())
2541 0 : }
2542 :
2543 0 : pub(crate) fn tenant_config_get(
2544 0 : &self,
2545 0 : tenant_id: TenantId,
2546 0 : ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
2547 0 : let config = {
2548 0 : let locked = self.inner.read().unwrap();
2549 0 :
2550 0 : match locked
2551 0 : .tenants
2552 0 : .range(TenantShardId::tenant_range(tenant_id))
2553 0 : .next()
2554 : {
2555 0 : Some((_tenant_shard_id, shard)) => shard.config.clone(),
2556 : None => {
2557 0 : return Err(ApiError::NotFound(
2558 0 : anyhow::anyhow!("Tenant not found").into(),
2559 0 : ))
2560 : }
2561 : }
2562 : };
2563 :
2564 : // Unlike the pageserver, we do not have a set of global defaults: the config is
2565 : // entirely per-tenant. Therefore the distinction between `tenant_specific_overrides`
2566 : // and `effective_config` in the response is meaningless, but we retain that syntax
2567 : // in order to remain compatible with the pageserver API.
2568 :
2569 0 : let response = HashMap::from([
2570 : (
2571 : "tenant_specific_overrides",
2572 0 : serde_json::to_value(&config)
2573 0 : .context("serializing tenant specific overrides")
2574 0 : .map_err(ApiError::InternalServerError)?,
2575 : ),
2576 : (
2577 0 : "effective_config",
2578 0 : serde_json::to_value(&config)
2579 0 : .context("serializing effective config")
2580 0 : .map_err(ApiError::InternalServerError)?,
2581 : ),
2582 : ]);
2583 :
2584 0 : Ok(response)
2585 0 : }
2586 :
2587 0 : pub(crate) async fn tenant_time_travel_remote_storage(
2588 0 : &self,
2589 0 : time_travel_req: &TenantTimeTravelRequest,
2590 0 : tenant_id: TenantId,
2591 0 : timestamp: Cow<'_, str>,
2592 0 : done_if_after: Cow<'_, str>,
2593 0 : ) -> Result<(), ApiError> {
2594 0 : let _tenant_lock = trace_exclusive_lock(
2595 0 : &self.tenant_op_locks,
2596 0 : tenant_id,
2597 0 : TenantOperations::TimeTravelRemoteStorage,
2598 0 : )
2599 0 : .await;
2600 :
2601 0 : let node = {
2602 0 : let mut locked = self.inner.write().unwrap();
2603 : // Just a sanity check to prevent misuse: the API expects that the tenant is fully
2604 : // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
2605 : // but only at the start of the process, so it's really just to prevent operator
2606 : // mistakes.
2607 0 : for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
2608 0 : if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
2609 : {
2610 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2611 0 : "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
2612 0 : )));
2613 0 : }
2614 0 : let maybe_attached = shard
2615 0 : .observed
2616 0 : .locations
2617 0 : .iter()
2618 0 : .filter_map(|(node_id, observed_location)| {
2619 0 : observed_location
2620 0 : .conf
2621 0 : .as_ref()
2622 0 : .map(|loc| (node_id, observed_location, loc.mode))
2623 0 : })
2624 0 : .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
2625 0 : if let Some((node_id, _observed_location, mode)) = maybe_attached {
2626 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}")));
2627 0 : }
2628 : }
2629 0 : let scheduler = &mut locked.scheduler;
2630 : // Right now we only perform the operation on a single node without parallelization
2631 : // TODO fan out the operation to multiple nodes for better performance
2632 0 : let node_id =
2633 0 : scheduler.schedule_shard::<AttachedShardTag>(&[], &ScheduleContext::default())?;
2634 0 : let node = locked
2635 0 : .nodes
2636 0 : .get(&node_id)
2637 0 : .expect("Pageservers may not be deleted while lock is active");
2638 0 : node.clone()
2639 0 : };
2640 0 :
2641 0 : // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
2642 0 : let mut counts = time_travel_req
2643 0 : .shard_counts
2644 0 : .iter()
2645 0 : .copied()
2646 0 : .collect::<HashSet<_>>()
2647 0 : .into_iter()
2648 0 : .collect::<Vec<_>>();
2649 0 : counts.sort_unstable();
2650 :
2651 0 : for count in counts {
2652 0 : let shard_ids = (0..count.count())
2653 0 : .map(|i| TenantShardId {
2654 0 : tenant_id,
2655 0 : shard_number: ShardNumber(i),
2656 0 : shard_count: count,
2657 0 : })
2658 0 : .collect::<Vec<_>>();
2659 0 : for tenant_shard_id in shard_ids {
2660 0 : let client = PageserverClient::new(
2661 0 : node.get_id(),
2662 0 : node.base_url(),
2663 0 : self.config.jwt_token.as_deref(),
2664 0 : );
2665 0 :
2666 0 : tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
2667 :
2668 0 : client
2669 0 : .tenant_time_travel_remote_storage(
2670 0 : tenant_shard_id,
2671 0 : ×tamp,
2672 0 : &done_if_after,
2673 0 : )
2674 0 : .await
2675 0 : .map_err(|e| {
2676 0 : ApiError::InternalServerError(anyhow::anyhow!(
2677 0 : "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
2678 0 : node
2679 0 : ))
2680 0 : })?;
2681 : }
2682 : }
2683 0 : Ok(())
2684 0 : }
2685 :
2686 0 : pub(crate) async fn tenant_secondary_download(
2687 0 : &self,
2688 0 : tenant_id: TenantId,
2689 0 : wait: Option<Duration>,
2690 0 : ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
2691 0 : let _tenant_lock = trace_shared_lock(
2692 0 : &self.tenant_op_locks,
2693 0 : tenant_id,
2694 0 : TenantOperations::SecondaryDownload,
2695 0 : )
2696 0 : .await;
2697 :
2698 : // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
2699 0 : let targets = {
2700 0 : let locked = self.inner.read().unwrap();
2701 0 : let mut targets = Vec::new();
2702 :
2703 0 : for (tenant_shard_id, shard) in
2704 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2705 : {
2706 0 : for node_id in shard.intent.get_secondary() {
2707 0 : let node = locked
2708 0 : .nodes
2709 0 : .get(node_id)
2710 0 : .expect("Pageservers may not be deleted while referenced");
2711 0 :
2712 0 : targets.push((*tenant_shard_id, node.clone()));
2713 0 : }
2714 : }
2715 0 : targets
2716 0 : };
2717 0 :
2718 0 : // Issue concurrent requests to all shards' locations
2719 0 : let mut futs = FuturesUnordered::new();
2720 0 : for (tenant_shard_id, node) in targets {
2721 0 : let client = PageserverClient::new(
2722 0 : node.get_id(),
2723 0 : node.base_url(),
2724 0 : self.config.jwt_token.as_deref(),
2725 0 : );
2726 0 : futs.push(async move {
2727 0 : let result = client
2728 0 : .tenant_secondary_download(tenant_shard_id, wait)
2729 0 : .await;
2730 0 : (result, node, tenant_shard_id)
2731 0 : })
2732 : }
2733 :
2734 : // Handle any errors returned by pageservers. This includes cases like this request racing with
2735 : // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
2736 : // well as more general cases like 503s, 500s, or timeouts.
2737 0 : let mut aggregate_progress = SecondaryProgress::default();
2738 0 : let mut aggregate_status: Option<StatusCode> = None;
2739 0 : let mut error: Option<mgmt_api::Error> = None;
2740 0 : while let Some((result, node, tenant_shard_id)) = futs.next().await {
2741 0 : match result {
2742 0 : Err(e) => {
2743 0 : // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
2744 0 : // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
2745 0 : // than they had hoped for.
2746 0 : tracing::warn!("Secondary download error from pageserver {node}: {e}",);
2747 0 : error = Some(e)
2748 : }
2749 0 : Ok((status_code, progress)) => {
2750 0 : tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
2751 0 : aggregate_progress.layers_downloaded += progress.layers_downloaded;
2752 0 : aggregate_progress.layers_total += progress.layers_total;
2753 0 : aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
2754 0 : aggregate_progress.bytes_total += progress.bytes_total;
2755 0 : aggregate_progress.heatmap_mtime =
2756 0 : std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
2757 0 : aggregate_status = match aggregate_status {
2758 0 : None => Some(status_code),
2759 0 : Some(StatusCode::OK) => Some(status_code),
2760 0 : Some(cur) => {
2761 0 : // Other status codes (e.g. 202) -- do not overwrite.
2762 0 : Some(cur)
2763 : }
2764 : };
2765 : }
2766 : }
2767 : }
2768 :
2769 : // If any of the shards return 202, indicate our result as 202.
2770 0 : match aggregate_status {
2771 : None => {
2772 0 : match error {
2773 0 : Some(e) => {
2774 0 : // No successes, and an error: surface it
2775 0 : Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
2776 : }
2777 : None => {
2778 : // No shards found
2779 0 : Err(ApiError::NotFound(
2780 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
2781 0 : ))
2782 : }
2783 : }
2784 : }
2785 0 : Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
2786 : }
2787 0 : }
2788 :
2789 0 : pub(crate) async fn tenant_delete(&self, tenant_id: TenantId) -> Result<StatusCode, ApiError> {
2790 0 : let _tenant_lock =
2791 0 : trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
2792 :
2793 : // Detach all shards
2794 0 : let (detach_waiters, shard_ids, node) = {
2795 0 : let mut shard_ids = Vec::new();
2796 0 : let mut detach_waiters = Vec::new();
2797 0 : let mut locked = self.inner.write().unwrap();
2798 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2799 0 : for (tenant_shard_id, shard) in
2800 0 : tenants.range_mut(TenantShardId::tenant_range(tenant_id))
2801 : {
2802 0 : shard_ids.push(*tenant_shard_id);
2803 0 :
2804 0 : // Update the tenant's intent to remove all attachments
2805 0 : shard.policy = PlacementPolicy::Detached;
2806 0 : shard
2807 0 : .schedule(scheduler, &mut ScheduleContext::default())
2808 0 : .expect("De-scheduling is infallible");
2809 0 : debug_assert!(shard.intent.get_attached().is_none());
2810 0 : debug_assert!(shard.intent.get_secondary().is_empty());
2811 :
2812 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
2813 0 : detach_waiters.push(waiter);
2814 0 : }
2815 : }
2816 :
2817 : // Pick an arbitrary node to use for remote deletions (does not have to be where the tenant
2818 : // was attached, just has to be able to see the S3 content)
2819 0 : let node_id =
2820 0 : scheduler.schedule_shard::<AttachedShardTag>(&[], &ScheduleContext::default())?;
2821 0 : let node = nodes
2822 0 : .get(&node_id)
2823 0 : .expect("Pageservers may not be deleted while lock is active");
2824 0 : (detach_waiters, shard_ids, node.clone())
2825 0 : };
2826 0 :
2827 0 : // This reconcile wait can fail in a few ways:
2828 0 : // A there is a very long queue for the reconciler semaphore
2829 0 : // B some pageserver is failing to handle a detach promptly
2830 0 : // C some pageserver goes offline right at the moment we send it a request.
2831 0 : //
2832 0 : // A and C are transient: the semaphore will eventually become available, and once a node is marked offline
2833 0 : // the next attempt to reconcile will silently skip detaches for an offline node and succeed. If B happens,
2834 0 : // it's a bug, and needs resolving at the pageserver level (we shouldn't just leave attachments behind while
2835 0 : // deleting the underlying data).
2836 0 : self.await_waiters(detach_waiters, RECONCILE_TIMEOUT)
2837 0 : .await?;
2838 :
2839 0 : let locations = shard_ids
2840 0 : .into_iter()
2841 0 : .map(|s| (s, node.clone()))
2842 0 : .collect::<Vec<_>>();
2843 0 : let results = self.tenant_for_shards_api(
2844 0 : locations,
2845 0 : |tenant_shard_id, client| async move { client.tenant_delete(tenant_shard_id).await },
2846 0 : 1,
2847 0 : 3,
2848 0 : RECONCILE_TIMEOUT,
2849 0 : &self.cancel,
2850 0 : )
2851 0 : .await;
2852 0 : for result in results {
2853 0 : match result {
2854 : Ok(StatusCode::ACCEPTED) => {
2855 : // This should never happen: we waited for detaches to finish above
2856 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2857 0 : "Unexpectedly still attached on {}",
2858 0 : node
2859 0 : )));
2860 : }
2861 0 : Ok(_) => {}
2862 : Err(mgmt_api::Error::Cancelled) => {
2863 0 : return Err(ApiError::ShuttingDown);
2864 : }
2865 0 : Err(e) => {
2866 0 : // This is unexpected: remote deletion should be infallible, unless the object store
2867 0 : // at large is unavailable.
2868 0 : tracing::error!("Error deleting via node {}: {e}", node);
2869 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(e)));
2870 : }
2871 : }
2872 : }
2873 :
2874 : // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
2875 : // our in-memory state and database state.
2876 :
2877 : // Ordering: we delete persistent state first: if we then
2878 : // crash, we will drop the in-memory state.
2879 :
2880 : // Drop persistent state.
2881 0 : self.persistence.delete_tenant(tenant_id).await?;
2882 :
2883 : // Drop in-memory state
2884 : {
2885 0 : let mut locked = self.inner.write().unwrap();
2886 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
2887 :
2888 : // Dereference Scheduler from shards before dropping them
2889 0 : for (_tenant_shard_id, shard) in
2890 0 : tenants.range_mut(TenantShardId::tenant_range(tenant_id))
2891 0 : {
2892 0 : shard.intent.clear(scheduler);
2893 0 : }
2894 :
2895 0 : tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
2896 0 : tracing::info!(
2897 0 : "Deleted tenant {tenant_id}, now have {} tenants",
2898 0 : locked.tenants.len()
2899 : );
2900 : };
2901 :
2902 : // Success is represented as 404, to imitate the existing pageserver deletion API
2903 0 : Ok(StatusCode::NOT_FOUND)
2904 0 : }
2905 :
2906 : /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
2907 : /// for a tenant. The TenantConfig is passed through to pageservers, whereas this function modifies
2908 : /// the tenant's policies (configuration) within the storage controller
2909 0 : pub(crate) async fn tenant_update_policy(
2910 0 : &self,
2911 0 : tenant_id: TenantId,
2912 0 : req: TenantPolicyRequest,
2913 0 : ) -> Result<(), ApiError> {
2914 : // We require an exclusive lock, because we are updating persistent and in-memory state
2915 0 : let _tenant_lock = trace_exclusive_lock(
2916 0 : &self.tenant_op_locks,
2917 0 : tenant_id,
2918 0 : TenantOperations::UpdatePolicy,
2919 0 : )
2920 0 : .await;
2921 :
2922 0 : failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
2923 :
2924 : let TenantPolicyRequest {
2925 0 : placement,
2926 0 : scheduling,
2927 0 : } = req;
2928 0 :
2929 0 : self.persistence
2930 0 : .update_tenant_shard(
2931 0 : TenantFilter::Tenant(tenant_id),
2932 0 : placement.clone(),
2933 0 : None,
2934 0 : None,
2935 0 : scheduling,
2936 0 : )
2937 0 : .await?;
2938 :
2939 0 : let mut schedule_context = ScheduleContext::default();
2940 0 : let mut locked = self.inner.write().unwrap();
2941 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2942 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2943 0 : if let Some(placement) = &placement {
2944 0 : shard.policy = placement.clone();
2945 0 :
2946 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2947 0 : "Updated placement policy to {placement:?}");
2948 0 : }
2949 :
2950 0 : if let Some(scheduling) = &scheduling {
2951 0 : shard.set_scheduling_policy(*scheduling);
2952 0 :
2953 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2954 0 : "Updated scheduling policy to {scheduling:?}");
2955 0 : }
2956 :
2957 : // In case scheduling is being switched back on, try it now.
2958 0 : shard.schedule(scheduler, &mut schedule_context).ok();
2959 0 : self.maybe_reconcile_shard(shard, nodes);
2960 : }
2961 :
2962 0 : Ok(())
2963 0 : }
2964 :
2965 0 : pub(crate) async fn tenant_timeline_create(
2966 0 : &self,
2967 0 : tenant_id: TenantId,
2968 0 : mut create_req: TimelineCreateRequest,
2969 0 : ) -> Result<TimelineInfo, ApiError> {
2970 0 : tracing::info!(
2971 0 : "Creating timeline {}/{}",
2972 : tenant_id,
2973 : create_req.new_timeline_id,
2974 : );
2975 :
2976 0 : let _tenant_lock = trace_shared_lock(
2977 0 : &self.tenant_op_locks,
2978 0 : tenant_id,
2979 0 : TenantOperations::TimelineCreate,
2980 0 : )
2981 0 : .await;
2982 0 : failpoint_support::sleep_millis_async!("tenant-create-timeline-shared-lock");
2983 :
2984 0 : self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
2985 0 : if targets.is_empty() {
2986 0 : return Err(ApiError::NotFound(
2987 0 : anyhow::anyhow!("Tenant not found").into(),
2988 0 : ));
2989 0 : };
2990 0 : let shard_zero = targets.remove(0);
2991 :
2992 0 : async fn create_one(
2993 0 : tenant_shard_id: TenantShardId,
2994 0 : node: Node,
2995 0 : jwt: Option<String>,
2996 0 : create_req: TimelineCreateRequest,
2997 0 : ) -> Result<TimelineInfo, ApiError> {
2998 0 : tracing::info!(
2999 0 : "Creating timeline on shard {}/{}, attached to node {node}",
3000 : tenant_shard_id,
3001 : create_req.new_timeline_id,
3002 : );
3003 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
3004 0 :
3005 0 : client
3006 0 : .timeline_create(tenant_shard_id, &create_req)
3007 0 : .await
3008 0 : .map_err(|e| passthrough_api_error(&node, e))
3009 0 : }
3010 :
3011 : // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
3012 : // use whatever LSN that shard picked when creating on subsequent shards. We arbitrarily use shard zero as the shard
3013 : // that will get the first creation request, and propagate the LSN to all the >0 shards.
3014 0 : let timeline_info = create_one(
3015 0 : shard_zero.0,
3016 0 : shard_zero.1,
3017 0 : self.config.jwt_token.clone(),
3018 0 : create_req.clone(),
3019 0 : )
3020 0 : .await?;
3021 :
3022 : // Propagate the LSN that shard zero picked, if caller didn't provide one
3023 0 : if create_req.ancestor_timeline_id.is_some() && create_req.ancestor_start_lsn.is_none()
3024 0 : {
3025 0 : create_req.ancestor_start_lsn = timeline_info.ancestor_lsn;
3026 0 : }
3027 :
3028 : // Create timeline on remaining shards with number >0
3029 0 : if !targets.is_empty() {
3030 : // If we had multiple shards, issue requests for the remainder now.
3031 0 : let jwt = &self.config.jwt_token;
3032 0 : self.tenant_for_shards(
3033 0 : targets.iter().map(|t| (t.0, t.1.clone())).collect(),
3034 0 : |tenant_shard_id: TenantShardId, node: Node| {
3035 0 : let create_req = create_req.clone();
3036 0 : Box::pin(create_one(tenant_shard_id, node, jwt.clone(), create_req))
3037 0 : },
3038 0 : )
3039 0 : .await?;
3040 0 : }
3041 :
3042 0 : Ok(timeline_info)
3043 0 : })
3044 0 : .await?
3045 0 : }
3046 :
3047 0 : pub(crate) async fn tenant_timeline_archival_config(
3048 0 : &self,
3049 0 : tenant_id: TenantId,
3050 0 : timeline_id: TimelineId,
3051 0 : req: TimelineArchivalConfigRequest,
3052 0 : ) -> Result<(), ApiError> {
3053 0 : tracing::info!(
3054 0 : "Setting archival config of timeline {tenant_id}/{timeline_id} to '{:?}'",
3055 : req.state
3056 : );
3057 :
3058 0 : let _tenant_lock = trace_shared_lock(
3059 0 : &self.tenant_op_locks,
3060 0 : tenant_id,
3061 0 : TenantOperations::TimelineArchivalConfig,
3062 0 : )
3063 0 : .await;
3064 :
3065 0 : self.tenant_remote_mutation(tenant_id, move |targets| async move {
3066 0 : if targets.is_empty() {
3067 0 : return Err(ApiError::NotFound(
3068 0 : anyhow::anyhow!("Tenant not found").into(),
3069 0 : ));
3070 0 : }
3071 0 : async fn config_one(
3072 0 : tenant_shard_id: TenantShardId,
3073 0 : timeline_id: TimelineId,
3074 0 : node: Node,
3075 0 : jwt: Option<String>,
3076 0 : req: TimelineArchivalConfigRequest,
3077 0 : ) -> Result<(), ApiError> {
3078 0 : tracing::info!(
3079 0 : "Setting archival config of timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
3080 : );
3081 :
3082 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
3083 0 :
3084 0 : client
3085 0 : .timeline_archival_config(tenant_shard_id, timeline_id, &req)
3086 0 : .await
3087 0 : .map_err(|e| match e {
3088 0 : mgmt_api::Error::ApiError(StatusCode::PRECONDITION_FAILED, msg) => {
3089 0 : ApiError::PreconditionFailed(msg.into_boxed_str())
3090 : }
3091 0 : _ => passthrough_api_error(&node, e),
3092 0 : })
3093 0 : }
3094 :
3095 : // no shard needs to go first/last; the operation should be idempotent
3096 : // TODO: it would be great to ensure that all shards return the same error
3097 0 : let results = self
3098 0 : .tenant_for_shards(targets, |tenant_shard_id, node| {
3099 0 : futures::FutureExt::boxed(config_one(
3100 0 : tenant_shard_id,
3101 0 : timeline_id,
3102 0 : node,
3103 0 : self.config.jwt_token.clone(),
3104 0 : req.clone(),
3105 0 : ))
3106 0 : })
3107 0 : .await?;
3108 0 : assert!(!results.is_empty(), "must have at least one result");
3109 :
3110 0 : Ok(())
3111 0 : }).await?
3112 0 : }
3113 :
3114 0 : pub(crate) async fn tenant_timeline_detach_ancestor(
3115 0 : &self,
3116 0 : tenant_id: TenantId,
3117 0 : timeline_id: TimelineId,
3118 0 : ) -> Result<models::detach_ancestor::AncestorDetached, ApiError> {
3119 0 : tracing::info!("Detaching timeline {tenant_id}/{timeline_id}",);
3120 :
3121 0 : let _tenant_lock = trace_shared_lock(
3122 0 : &self.tenant_op_locks,
3123 0 : tenant_id,
3124 0 : TenantOperations::TimelineDetachAncestor,
3125 0 : )
3126 0 : .await;
3127 :
3128 0 : self.tenant_remote_mutation(tenant_id, move |targets| async move {
3129 0 : if targets.is_empty() {
3130 0 : return Err(ApiError::NotFound(
3131 0 : anyhow::anyhow!("Tenant not found").into(),
3132 0 : ));
3133 0 : }
3134 :
3135 0 : async fn detach_one(
3136 0 : tenant_shard_id: TenantShardId,
3137 0 : timeline_id: TimelineId,
3138 0 : node: Node,
3139 0 : jwt: Option<String>,
3140 0 : ) -> Result<(ShardNumber, models::detach_ancestor::AncestorDetached), ApiError> {
3141 0 : tracing::info!(
3142 0 : "Detaching timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
3143 : );
3144 :
3145 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
3146 0 :
3147 0 : client
3148 0 : .timeline_detach_ancestor(tenant_shard_id, timeline_id)
3149 0 : .await
3150 0 : .map_err(|e| {
3151 : use mgmt_api::Error;
3152 :
3153 0 : match e {
3154 : // no ancestor (ever)
3155 0 : Error::ApiError(StatusCode::CONFLICT, msg) => ApiError::Conflict(format!(
3156 0 : "{node}: {}",
3157 0 : msg.strip_prefix("Conflict: ").unwrap_or(&msg)
3158 0 : )),
3159 : // too many ancestors
3160 0 : Error::ApiError(StatusCode::BAD_REQUEST, msg) => {
3161 0 : ApiError::BadRequest(anyhow::anyhow!("{node}: {msg}"))
3162 : }
3163 0 : Error::ApiError(StatusCode::INTERNAL_SERVER_ERROR, msg) => {
3164 0 : // avoid turning these into conflicts to remain compatible with
3165 0 : // pageservers, 500 errors are sadly retryable with timeline ancestor
3166 0 : // detach
3167 0 : ApiError::InternalServerError(anyhow::anyhow!("{node}: {msg}"))
3168 : }
3169 : // rest can be mapped as usual
3170 0 : other => passthrough_api_error(&node, other),
3171 : }
3172 0 : })
3173 0 : .map(|res| (tenant_shard_id.shard_number, res))
3174 0 : }
3175 :
3176 : // no shard needs to go first/last; the operation should be idempotent
3177 0 : let mut results = self
3178 0 : .tenant_for_shards(targets, |tenant_shard_id, node| {
3179 0 : futures::FutureExt::boxed(detach_one(
3180 0 : tenant_shard_id,
3181 0 : timeline_id,
3182 0 : node,
3183 0 : self.config.jwt_token.clone(),
3184 0 : ))
3185 0 : })
3186 0 : .await?;
3187 :
3188 0 : let any = results.pop().expect("we must have at least one response");
3189 0 :
3190 0 : let mismatching = results
3191 0 : .iter()
3192 0 : .filter(|(_, res)| res != &any.1)
3193 0 : .collect::<Vec<_>>();
3194 0 : if !mismatching.is_empty() {
3195 : // this can be hit by races which should not happen because operation lock on cplane
3196 0 : let matching = results.len() - mismatching.len();
3197 0 : tracing::error!(
3198 : matching,
3199 : compared_against=?any,
3200 : ?mismatching,
3201 0 : "shards returned different results"
3202 : );
3203 :
3204 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("pageservers returned mixed results for ancestor detach; manual intervention is required.")));
3205 0 : }
3206 0 :
3207 0 : Ok(any.1)
3208 0 : }).await?
3209 0 : }
3210 :
3211 0 : pub(crate) async fn tenant_timeline_block_unblock_gc(
3212 0 : &self,
3213 0 : tenant_id: TenantId,
3214 0 : timeline_id: TimelineId,
3215 0 : dir: BlockUnblock,
3216 0 : ) -> Result<(), ApiError> {
3217 0 : let _tenant_lock = trace_shared_lock(
3218 0 : &self.tenant_op_locks,
3219 0 : tenant_id,
3220 0 : TenantOperations::TimelineGcBlockUnblock,
3221 0 : )
3222 0 : .await;
3223 :
3224 0 : self.tenant_remote_mutation(tenant_id, move |targets| async move {
3225 0 : if targets.is_empty() {
3226 0 : return Err(ApiError::NotFound(
3227 0 : anyhow::anyhow!("Tenant not found").into(),
3228 0 : ));
3229 0 : }
3230 :
3231 0 : async fn do_one(
3232 0 : tenant_shard_id: TenantShardId,
3233 0 : timeline_id: TimelineId,
3234 0 : node: Node,
3235 0 : jwt: Option<String>,
3236 0 : dir: BlockUnblock,
3237 0 : ) -> Result<(), ApiError> {
3238 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
3239 0 :
3240 0 : client
3241 0 : .timeline_block_unblock_gc(tenant_shard_id, timeline_id, dir)
3242 0 : .await
3243 0 : .map_err(|e| passthrough_api_error(&node, e))
3244 0 : }
3245 :
3246 : // no shard needs to go first/last; the operation should be idempotent
3247 0 : self.tenant_for_shards(targets, |tenant_shard_id, node| {
3248 0 : futures::FutureExt::boxed(do_one(
3249 0 : tenant_shard_id,
3250 0 : timeline_id,
3251 0 : node,
3252 0 : self.config.jwt_token.clone(),
3253 0 : dir,
3254 0 : ))
3255 0 : })
3256 0 : .await
3257 0 : })
3258 0 : .await??;
3259 0 : Ok(())
3260 0 : }
3261 :
3262 : /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
3263 : ///
3264 : /// On success, the returned vector contains exactly the same number of elements as the input `locations`.
3265 0 : async fn tenant_for_shards<F, R>(
3266 0 : &self,
3267 0 : locations: Vec<(TenantShardId, Node)>,
3268 0 : mut req_fn: F,
3269 0 : ) -> Result<Vec<R>, ApiError>
3270 0 : where
3271 0 : F: FnMut(
3272 0 : TenantShardId,
3273 0 : Node,
3274 0 : )
3275 0 : -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
3276 0 : {
3277 0 : let mut futs = FuturesUnordered::new();
3278 0 : let mut results = Vec::with_capacity(locations.len());
3279 :
3280 0 : for (tenant_shard_id, node) in locations {
3281 0 : futs.push(req_fn(tenant_shard_id, node));
3282 0 : }
3283 :
3284 0 : while let Some(r) = futs.next().await {
3285 0 : results.push(r?);
3286 : }
3287 :
3288 0 : Ok(results)
3289 0 : }
3290 :
3291 : /// Concurrently invoke a pageserver API call on many shards at once
3292 0 : pub(crate) async fn tenant_for_shards_api<T, O, F>(
3293 0 : &self,
3294 0 : locations: Vec<(TenantShardId, Node)>,
3295 0 : op: O,
3296 0 : warn_threshold: u32,
3297 0 : max_retries: u32,
3298 0 : timeout: Duration,
3299 0 : cancel: &CancellationToken,
3300 0 : ) -> Vec<mgmt_api::Result<T>>
3301 0 : where
3302 0 : O: Fn(TenantShardId, PageserverClient) -> F + Copy,
3303 0 : F: std::future::Future<Output = mgmt_api::Result<T>>,
3304 0 : {
3305 0 : let mut futs = FuturesUnordered::new();
3306 0 : let mut results = Vec::with_capacity(locations.len());
3307 :
3308 0 : for (tenant_shard_id, node) in locations {
3309 0 : futs.push(async move {
3310 0 : node.with_client_retries(
3311 0 : |client| op(tenant_shard_id, client),
3312 0 : &self.config.jwt_token,
3313 0 : warn_threshold,
3314 0 : max_retries,
3315 0 : timeout,
3316 0 : cancel,
3317 0 : )
3318 0 : .await
3319 0 : });
3320 0 : }
3321 :
3322 0 : while let Some(r) = futs.next().await {
3323 0 : let r = r.unwrap_or(Err(mgmt_api::Error::Cancelled));
3324 0 : results.push(r);
3325 0 : }
3326 :
3327 0 : results
3328 0 : }
3329 :
3330 : /// Helper for safely working with the shards in a tenant remotely on pageservers, for example
3331 : /// when creating and deleting timelines:
3332 : /// - Makes sure shards are attached somewhere if they weren't already
3333 : /// - Looks up the shards and the nodes where they were most recently attached
3334 : /// - Guarantees that after the inner function returns, the shards' generations haven't moved on: this
3335 : /// ensures that the remote operation acted on the most recent generation, and is therefore durable.
3336 0 : async fn tenant_remote_mutation<R, O, F>(
3337 0 : &self,
3338 0 : tenant_id: TenantId,
3339 0 : op: O,
3340 0 : ) -> Result<R, ApiError>
3341 0 : where
3342 0 : O: FnOnce(Vec<(TenantShardId, Node)>) -> F,
3343 0 : F: std::future::Future<Output = R>,
3344 0 : {
3345 0 : let target_gens = {
3346 0 : let mut targets = Vec::new();
3347 :
3348 : // Load the currently attached pageservers for the latest generation of each shard. This can
3349 : // run concurrently with reconciliations, and it is not guaranteed that the node we find here
3350 : // will still be the latest when we're done: we will check generations again at the end of
3351 : // this function to handle that.
3352 0 : let generations = self.persistence.tenant_generations(tenant_id).await?;
3353 :
3354 0 : if generations
3355 0 : .iter()
3356 0 : .any(|i| i.generation.is_none() || i.generation_pageserver.is_none())
3357 : {
3358 : // One or more shards has not been attached to a pageserver. Check if this is because it's configured
3359 : // to be detached (409: caller should give up), or because it's meant to be attached but isn't yet (503: caller should retry)
3360 0 : let locked = self.inner.read().unwrap();
3361 0 : for (shard_id, shard) in
3362 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3363 : {
3364 0 : match shard.policy {
3365 0 : PlacementPolicy::Attached(_) => {
3366 0 : // This shard is meant to be attached: the caller is not wrong to try and
3367 0 : // use this function, but we can't service the request right now.
3368 0 : }
3369 : PlacementPolicy::Secondary | PlacementPolicy::Detached => {
3370 0 : return Err(ApiError::Conflict(format!(
3371 0 : "Shard {shard_id} tenant has policy {:?}",
3372 0 : shard.policy
3373 0 : )));
3374 : }
3375 : }
3376 : }
3377 :
3378 0 : return Err(ApiError::ResourceUnavailable(
3379 0 : "One or more shards in tenant is not yet attached".into(),
3380 0 : ));
3381 0 : }
3382 0 :
3383 0 : let locked = self.inner.read().unwrap();
3384 : for ShardGenerationState {
3385 0 : tenant_shard_id,
3386 0 : generation,
3387 0 : generation_pageserver,
3388 0 : } in generations
3389 : {
3390 0 : let node_id = generation_pageserver.expect("We checked for None above");
3391 0 : let node = locked
3392 0 : .nodes
3393 0 : .get(&node_id)
3394 0 : .ok_or(ApiError::Conflict(format!(
3395 0 : "Raced with removal of node {node_id}"
3396 0 : )))?;
3397 0 : targets.push((tenant_shard_id, node.clone(), generation));
3398 : }
3399 :
3400 0 : targets
3401 0 : };
3402 0 :
3403 0 : let targets = target_gens.iter().map(|t| (t.0, t.1.clone())).collect();
3404 0 : let result = op(targets).await;
3405 :
3406 : // Post-check: are all the generations of all the shards the same as they were initially? This proves that
3407 : // our remote operation executed on the latest generation and is therefore persistent.
3408 : {
3409 0 : let latest_generations = self.persistence.tenant_generations(tenant_id).await?;
3410 0 : if latest_generations
3411 0 : .into_iter()
3412 0 : .map(
3413 0 : |ShardGenerationState {
3414 : tenant_shard_id,
3415 : generation,
3416 : generation_pageserver: _,
3417 0 : }| (tenant_shard_id, generation),
3418 0 : )
3419 0 : .collect::<Vec<_>>()
3420 0 : != target_gens
3421 0 : .into_iter()
3422 0 : .map(|i| (i.0, i.2))
3423 0 : .collect::<Vec<_>>()
3424 : {
3425 : // We raced with something that incremented the generation, and therefore cannot be
3426 : // confident that our actions are persistent (they might have hit an old generation).
3427 : //
3428 : // This is safe but requires a retry: ask the client to do that by giving them a 503 response.
3429 0 : return Err(ApiError::ResourceUnavailable(
3430 0 : "Tenant attachment changed, please retry".into(),
3431 0 : ));
3432 0 : }
3433 0 : }
3434 0 :
3435 0 : Ok(result)
3436 0 : }
3437 :
3438 0 : pub(crate) async fn tenant_timeline_delete(
3439 0 : &self,
3440 0 : tenant_id: TenantId,
3441 0 : timeline_id: TimelineId,
3442 0 : ) -> Result<StatusCode, ApiError> {
3443 0 : tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
3444 0 : let _tenant_lock = trace_shared_lock(
3445 0 : &self.tenant_op_locks,
3446 0 : tenant_id,
3447 0 : TenantOperations::TimelineDelete,
3448 0 : )
3449 0 : .await;
3450 :
3451 0 : self.tenant_remote_mutation(tenant_id, move |mut targets| async move {
3452 0 : if targets.is_empty() {
3453 0 : return Err(ApiError::NotFound(
3454 0 : anyhow::anyhow!("Tenant not found").into(),
3455 0 : ));
3456 0 : }
3457 0 : let shard_zero = targets.remove(0);
3458 :
3459 0 : async fn delete_one(
3460 0 : tenant_shard_id: TenantShardId,
3461 0 : timeline_id: TimelineId,
3462 0 : node: Node,
3463 0 : jwt: Option<String>,
3464 0 : ) -> Result<StatusCode, ApiError> {
3465 0 : tracing::info!(
3466 0 : "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
3467 : );
3468 :
3469 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
3470 0 : client
3471 0 : .timeline_delete(tenant_shard_id, timeline_id)
3472 0 : .await
3473 0 : .map_err(|e| {
3474 0 : ApiError::InternalServerError(anyhow::anyhow!(
3475 0 : "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
3476 0 : ))
3477 0 : })
3478 0 : }
3479 :
3480 0 : let statuses = self
3481 0 : .tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
3482 0 : Box::pin(delete_one(
3483 0 : tenant_shard_id,
3484 0 : timeline_id,
3485 0 : node,
3486 0 : self.config.jwt_token.clone(),
3487 0 : ))
3488 0 : })
3489 0 : .await?;
3490 :
3491 : // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero
3492 0 : if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
3493 0 : return Ok(StatusCode::ACCEPTED);
3494 0 : }
3495 :
3496 : // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
3497 : // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
3498 0 : let shard_zero_status = delete_one(
3499 0 : shard_zero.0,
3500 0 : timeline_id,
3501 0 : shard_zero.1,
3502 0 : self.config.jwt_token.clone(),
3503 0 : )
3504 0 : .await?;
3505 0 : Ok(shard_zero_status)
3506 0 : }).await?
3507 0 : }
3508 :
3509 : /// When you need to send an HTTP request to the pageserver that holds shard0 of a tenant, this
3510 : /// function looks up and returns node. If the tenant isn't found, returns Err(ApiError::NotFound)
3511 0 : pub(crate) fn tenant_shard0_node(
3512 0 : &self,
3513 0 : tenant_id: TenantId,
3514 0 : ) -> Result<(Node, TenantShardId), ApiError> {
3515 0 : let locked = self.inner.read().unwrap();
3516 0 : let Some((tenant_shard_id, shard)) = locked
3517 0 : .tenants
3518 0 : .range(TenantShardId::tenant_range(tenant_id))
3519 0 : .next()
3520 : else {
3521 0 : return Err(ApiError::NotFound(
3522 0 : anyhow::anyhow!("Tenant {tenant_id} not found").into(),
3523 0 : ));
3524 : };
3525 :
3526 : // TODO: should use the ID last published to compute_hook, rather than the intent: the intent might
3527 : // point to somewhere we haven't attached yet.
3528 0 : let Some(node_id) = shard.intent.get_attached() else {
3529 0 : tracing::warn!(
3530 0 : tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
3531 0 : "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
3532 : shard.policy
3533 : );
3534 0 : return Err(ApiError::Conflict(
3535 0 : "Cannot call timeline API on non-attached tenant".to_string(),
3536 0 : ));
3537 : };
3538 :
3539 0 : let Some(node) = locked.nodes.get(node_id) else {
3540 : // This should never happen
3541 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3542 0 : "Shard refers to nonexistent node"
3543 0 : )));
3544 : };
3545 :
3546 0 : Ok((node.clone(), *tenant_shard_id))
3547 0 : }
3548 :
3549 0 : pub(crate) fn tenant_locate(
3550 0 : &self,
3551 0 : tenant_id: TenantId,
3552 0 : ) -> Result<TenantLocateResponse, ApiError> {
3553 0 : let locked = self.inner.read().unwrap();
3554 0 : tracing::info!("Locating shards for tenant {tenant_id}");
3555 :
3556 0 : let mut result = Vec::new();
3557 0 : let mut shard_params: Option<ShardParameters> = None;
3558 :
3559 0 : for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3560 : {
3561 0 : let node_id =
3562 0 : shard
3563 0 : .intent
3564 0 : .get_attached()
3565 0 : .ok_or(ApiError::BadRequest(anyhow::anyhow!(
3566 0 : "Cannot locate a tenant that is not attached"
3567 0 : )))?;
3568 :
3569 0 : let node = locked
3570 0 : .nodes
3571 0 : .get(&node_id)
3572 0 : .expect("Pageservers may not be deleted while referenced");
3573 0 :
3574 0 : result.push(node.shard_location(*tenant_shard_id));
3575 0 :
3576 0 : match &shard_params {
3577 0 : None => {
3578 0 : shard_params = Some(ShardParameters {
3579 0 : stripe_size: shard.shard.stripe_size,
3580 0 : count: shard.shard.count,
3581 0 : });
3582 0 : }
3583 0 : Some(params) => {
3584 0 : if params.stripe_size != shard.shard.stripe_size {
3585 : // This should never happen. We enforce at runtime because it's simpler than
3586 : // adding an extra per-tenant data structure to store the things that should be the same
3587 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3588 0 : "Inconsistent shard stripe size parameters!"
3589 0 : )));
3590 0 : }
3591 : }
3592 : }
3593 : }
3594 :
3595 0 : if result.is_empty() {
3596 0 : return Err(ApiError::NotFound(
3597 0 : anyhow::anyhow!("No shards for this tenant ID found").into(),
3598 0 : ));
3599 0 : }
3600 0 : let shard_params = shard_params.expect("result is non-empty, therefore this is set");
3601 0 : tracing::info!(
3602 0 : "Located tenant {} with params {:?} on shards {}",
3603 0 : tenant_id,
3604 0 : shard_params,
3605 0 : result
3606 0 : .iter()
3607 0 : .map(|s| format!("{:?}", s))
3608 0 : .collect::<Vec<_>>()
3609 0 : .join(",")
3610 : );
3611 :
3612 0 : Ok(TenantLocateResponse {
3613 0 : shards: result,
3614 0 : shard_params,
3615 0 : })
3616 0 : }
3617 :
3618 : /// Returns None if the input iterator of shards does not include a shard with number=0
3619 0 : fn tenant_describe_impl<'a>(
3620 0 : &self,
3621 0 : shards: impl Iterator<Item = &'a TenantShard>,
3622 0 : ) -> Option<TenantDescribeResponse> {
3623 0 : let mut shard_zero = None;
3624 0 : let mut describe_shards = Vec::new();
3625 :
3626 0 : for shard in shards {
3627 0 : if shard.tenant_shard_id.is_shard_zero() {
3628 0 : shard_zero = Some(shard);
3629 0 : }
3630 :
3631 0 : describe_shards.push(TenantDescribeResponseShard {
3632 0 : tenant_shard_id: shard.tenant_shard_id,
3633 0 : node_attached: *shard.intent.get_attached(),
3634 0 : node_secondary: shard.intent.get_secondary().to_vec(),
3635 0 : last_error: shard
3636 0 : .last_error
3637 0 : .lock()
3638 0 : .unwrap()
3639 0 : .as_ref()
3640 0 : .map(|e| format!("{e}"))
3641 0 : .unwrap_or("".to_string())
3642 0 : .clone(),
3643 0 : is_reconciling: shard.reconciler.is_some(),
3644 0 : is_pending_compute_notification: shard.pending_compute_notification,
3645 0 : is_splitting: matches!(shard.splitting, SplitState::Splitting),
3646 0 : scheduling_policy: *shard.get_scheduling_policy(),
3647 0 : preferred_az_id: shard.preferred_az().map(ToString::to_string),
3648 : })
3649 : }
3650 :
3651 0 : let shard_zero = shard_zero?;
3652 :
3653 0 : Some(TenantDescribeResponse {
3654 0 : tenant_id: shard_zero.tenant_shard_id.tenant_id,
3655 0 : shards: describe_shards,
3656 0 : stripe_size: shard_zero.shard.stripe_size,
3657 0 : policy: shard_zero.policy.clone(),
3658 0 : config: shard_zero.config.clone(),
3659 0 : })
3660 0 : }
3661 :
3662 0 : pub(crate) fn tenant_describe(
3663 0 : &self,
3664 0 : tenant_id: TenantId,
3665 0 : ) -> Result<TenantDescribeResponse, ApiError> {
3666 0 : let locked = self.inner.read().unwrap();
3667 0 :
3668 0 : self.tenant_describe_impl(
3669 0 : locked
3670 0 : .tenants
3671 0 : .range(TenantShardId::tenant_range(tenant_id))
3672 0 : .map(|(_k, v)| v),
3673 0 : )
3674 0 : .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
3675 0 : }
3676 :
3677 0 : pub(crate) fn tenant_list(&self) -> Vec<TenantDescribeResponse> {
3678 0 : let locked = self.inner.read().unwrap();
3679 0 :
3680 0 : let mut result = Vec::new();
3681 0 : for (_tenant_id, tenant_shards) in
3682 0 : &locked.tenants.iter().group_by(|(id, _shard)| id.tenant_id)
3683 0 : {
3684 0 : result.push(
3685 0 : self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
3686 0 : .expect("Groups are always non-empty"),
3687 0 : );
3688 0 : }
3689 :
3690 0 : result
3691 0 : }
3692 :
3693 0 : #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
3694 : async fn abort_tenant_shard_split(
3695 : &self,
3696 : op: &TenantShardSplitAbort,
3697 : ) -> Result<(), TenantShardSplitAbortError> {
3698 : // Cleaning up a split:
3699 : // - Parent shards are not destroyed during a split, just detached.
3700 : // - Failed pageserver split API calls can leave the remote node with just the parent attached,
3701 : // just the children attached, or both.
3702 : //
3703 : // Therefore our work to do is to:
3704 : // 1. Clean up storage controller's internal state to just refer to parents, no children
3705 : // 2. Call out to pageservers to ensure that children are detached
3706 : // 3. Call out to pageservers to ensure that parents are attached.
3707 : //
3708 : // Crash safety:
3709 : // - If the storage controller stops running during this cleanup *after* clearing the splitting state
3710 : // from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
3711 : // and detach them.
3712 : // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
3713 : // from our database, then we will re-enter this cleanup routine on startup.
3714 :
3715 : let TenantShardSplitAbort {
3716 : tenant_id,
3717 : new_shard_count,
3718 : new_stripe_size,
3719 : ..
3720 : } = op;
3721 :
3722 : // First abort persistent state, if any exists.
3723 : match self
3724 : .persistence
3725 : .abort_shard_split(*tenant_id, *new_shard_count)
3726 : .await?
3727 : {
3728 : AbortShardSplitStatus::Aborted => {
3729 : // Proceed to roll back any child shards created on pageservers
3730 : }
3731 : AbortShardSplitStatus::Complete => {
3732 : // The split completed (we might hit that path if e.g. our database transaction
3733 : // to write the completion landed in the database, but we dropped connection
3734 : // before seeing the result).
3735 : //
3736 : // We must update in-memory state to reflect the successful split.
3737 : self.tenant_shard_split_commit_inmem(
3738 : *tenant_id,
3739 : *new_shard_count,
3740 : *new_stripe_size,
3741 : );
3742 : return Ok(());
3743 : }
3744 : }
3745 :
3746 : // Clean up in-memory state, and accumulate the list of child locations that need detaching
3747 : let detach_locations: Vec<(Node, TenantShardId)> = {
3748 : let mut detach_locations = Vec::new();
3749 : let mut locked = self.inner.write().unwrap();
3750 : let (nodes, tenants, scheduler) = locked.parts_mut();
3751 :
3752 : for (tenant_shard_id, shard) in
3753 : tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
3754 : {
3755 : if shard.shard.count == op.new_shard_count {
3756 : // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
3757 : // is infallible, so if we got an error we shouldn't have got that far.
3758 : tracing::warn!(
3759 : "During split abort, child shard {tenant_shard_id} found in-memory"
3760 : );
3761 : continue;
3762 : }
3763 :
3764 : // Add the children of this shard to this list of things to detach
3765 : if let Some(node_id) = shard.intent.get_attached() {
3766 : for child_id in tenant_shard_id.split(*new_shard_count) {
3767 : detach_locations.push((
3768 : nodes
3769 : .get(node_id)
3770 : .expect("Intent references nonexistent node")
3771 : .clone(),
3772 : child_id,
3773 : ));
3774 : }
3775 : } else {
3776 : tracing::warn!(
3777 : "During split abort, shard {tenant_shard_id} has no attached location"
3778 : );
3779 : }
3780 :
3781 : tracing::info!("Restoring parent shard {tenant_shard_id}");
3782 : shard.splitting = SplitState::Idle;
3783 : if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
3784 : // If this shard can't be scheduled now (perhaps due to offline nodes or
3785 : // capacity issues), that must not prevent us rolling back a split. In this
3786 : // case it should be eventually scheduled in the background.
3787 : tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
3788 : }
3789 :
3790 : self.maybe_reconcile_shard(shard, nodes);
3791 : }
3792 :
3793 : // We don't expect any new_shard_count shards to exist here, but drop them just in case
3794 0 : tenants.retain(|_id, s| s.shard.count != *new_shard_count);
3795 :
3796 : detach_locations
3797 : };
3798 :
3799 : for (node, child_id) in detach_locations {
3800 : if !node.is_available() {
3801 : // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
3802 : // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
3803 : // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
3804 : // them from the node.
3805 : tracing::warn!("Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated.");
3806 : continue;
3807 : }
3808 :
3809 : // Detach the remote child. If the pageserver split API call is still in progress, this call will get
3810 : // a 503 and retry, up to our limit.
3811 : tracing::info!("Detaching {child_id} on {node}...");
3812 : match node
3813 : .with_client_retries(
3814 0 : |client| async move {
3815 0 : let config = LocationConfig {
3816 0 : mode: LocationConfigMode::Detached,
3817 0 : generation: None,
3818 0 : secondary_conf: None,
3819 0 : shard_number: child_id.shard_number.0,
3820 0 : shard_count: child_id.shard_count.literal(),
3821 0 : // Stripe size and tenant config don't matter when detaching
3822 0 : shard_stripe_size: 0,
3823 0 : tenant_conf: TenantConfig::default(),
3824 0 : };
3825 0 :
3826 0 : client.location_config(child_id, config, None, false).await
3827 0 : },
3828 : &self.config.jwt_token,
3829 : 1,
3830 : 10,
3831 : Duration::from_secs(5),
3832 : &self.cancel,
3833 : )
3834 : .await
3835 : {
3836 : Some(Ok(_)) => {}
3837 : Some(Err(e)) => {
3838 : // We failed to communicate with the remote node. This is problematic: we may be
3839 : // leaving it with a rogue child shard.
3840 : tracing::warn!(
3841 : "Failed to detach child {child_id} from node {node} during abort"
3842 : );
3843 : return Err(e.into());
3844 : }
3845 : None => {
3846 : // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
3847 : // clean up on restart. The node going offline requires a retry.
3848 : return Err(TenantShardSplitAbortError::Unavailable);
3849 : }
3850 : };
3851 : }
3852 :
3853 : tracing::info!("Successfully aborted split");
3854 : Ok(())
3855 : }
3856 :
3857 : /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
3858 : /// of the tenant map to reflect the child shards that exist after the split.
3859 0 : fn tenant_shard_split_commit_inmem(
3860 0 : &self,
3861 0 : tenant_id: TenantId,
3862 0 : new_shard_count: ShardCount,
3863 0 : new_stripe_size: Option<ShardStripeSize>,
3864 0 : ) -> (
3865 0 : TenantShardSplitResponse,
3866 0 : Vec<(TenantShardId, NodeId, ShardStripeSize)>,
3867 0 : Vec<ReconcilerWaiter>,
3868 0 : ) {
3869 0 : let mut response = TenantShardSplitResponse {
3870 0 : new_shards: Vec::new(),
3871 0 : };
3872 0 : let mut child_locations = Vec::new();
3873 0 : let mut waiters = Vec::new();
3874 0 :
3875 0 : {
3876 0 : let mut locked = self.inner.write().unwrap();
3877 0 :
3878 0 : let parent_ids = locked
3879 0 : .tenants
3880 0 : .range(TenantShardId::tenant_range(tenant_id))
3881 0 : .map(|(shard_id, _)| *shard_id)
3882 0 : .collect::<Vec<_>>();
3883 0 :
3884 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3885 0 : for parent_id in parent_ids {
3886 0 : let child_ids = parent_id.split(new_shard_count);
3887 :
3888 0 : let (pageserver, generation, policy, parent_ident, config) = {
3889 0 : let mut old_state = tenants
3890 0 : .remove(&parent_id)
3891 0 : .expect("It was present, we just split it");
3892 0 :
3893 0 : // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
3894 0 : // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
3895 0 : // nothing else can clear this.
3896 0 : assert!(matches!(old_state.splitting, SplitState::Splitting));
3897 :
3898 0 : let old_attached = old_state.intent.get_attached().unwrap();
3899 0 : old_state.intent.clear(scheduler);
3900 0 : let generation = old_state.generation.expect("Shard must have been attached");
3901 0 : (
3902 0 : old_attached,
3903 0 : generation,
3904 0 : old_state.policy,
3905 0 : old_state.shard,
3906 0 : old_state.config,
3907 0 : )
3908 0 : };
3909 0 :
3910 0 : let mut schedule_context = ScheduleContext::default();
3911 0 : for child in child_ids {
3912 0 : let mut child_shard = parent_ident;
3913 0 : child_shard.number = child.shard_number;
3914 0 : child_shard.count = child.shard_count;
3915 0 : if let Some(stripe_size) = new_stripe_size {
3916 0 : child_shard.stripe_size = stripe_size;
3917 0 : }
3918 :
3919 0 : let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
3920 0 : child_observed.insert(
3921 0 : pageserver,
3922 0 : ObservedStateLocation {
3923 0 : conf: Some(attached_location_conf(
3924 0 : generation,
3925 0 : &child_shard,
3926 0 : &config,
3927 0 : &policy,
3928 0 : )),
3929 0 : },
3930 0 : );
3931 0 :
3932 0 : let mut child_state = TenantShard::new(child, child_shard, policy.clone());
3933 0 : child_state.intent = IntentState::single(scheduler, Some(pageserver));
3934 0 : child_state.observed = ObservedState {
3935 0 : locations: child_observed,
3936 0 : };
3937 0 : child_state.generation = Some(generation);
3938 0 : child_state.config = config.clone();
3939 0 :
3940 0 : // The child's TenantShard::splitting is intentionally left at the default value of Idle,
3941 0 : // as at this point in the split process we have succeeded and this part is infallible:
3942 0 : // we will never need to do any special recovery from this state.
3943 0 :
3944 0 : child_locations.push((child, pageserver, child_shard.stripe_size));
3945 :
3946 0 : if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
3947 : // This is not fatal, because we've implicitly already got an attached
3948 : // location for the child shard. Failure here just means we couldn't
3949 : // find a secondary (e.g. because cluster is overloaded).
3950 0 : tracing::warn!("Failed to schedule child shard {child}: {e}");
3951 0 : }
3952 : // In the background, attach secondary locations for the new shards
3953 0 : if let Some(waiter) = self.maybe_reconcile_shard(&mut child_state, nodes) {
3954 0 : waiters.push(waiter);
3955 0 : }
3956 :
3957 0 : tenants.insert(child, child_state);
3958 0 : response.new_shards.push(child);
3959 : }
3960 : }
3961 0 : (response, child_locations, waiters)
3962 0 : }
3963 0 : }
3964 :
3965 0 : async fn tenant_shard_split_start_secondaries(
3966 0 : &self,
3967 0 : tenant_id: TenantId,
3968 0 : waiters: Vec<ReconcilerWaiter>,
3969 0 : ) {
3970 : // Wait for initial reconcile of child shards, this creates the secondary locations
3971 0 : if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
3972 : // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
3973 : // their secondaries couldn't be attached.
3974 0 : tracing::warn!("Failed to reconcile after split: {e}");
3975 0 : return;
3976 0 : }
3977 :
3978 : // Take the state lock to discover the attached & secondary intents for all shards
3979 0 : let (attached, secondary) = {
3980 0 : let locked = self.inner.read().unwrap();
3981 0 : let mut attached = Vec::new();
3982 0 : let mut secondary = Vec::new();
3983 :
3984 0 : for (tenant_shard_id, shard) in
3985 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3986 : {
3987 0 : let Some(node_id) = shard.intent.get_attached() else {
3988 : // Unexpected. Race with a PlacementPolicy change?
3989 0 : tracing::warn!(
3990 0 : "No attached node on {tenant_shard_id} immediately after shard split!"
3991 : );
3992 0 : continue;
3993 : };
3994 :
3995 0 : let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
3996 : // No secondary location. Nothing for us to do.
3997 0 : continue;
3998 : };
3999 :
4000 0 : let attached_node = locked
4001 0 : .nodes
4002 0 : .get(node_id)
4003 0 : .expect("Pageservers may not be deleted while referenced");
4004 0 :
4005 0 : let secondary_node = locked
4006 0 : .nodes
4007 0 : .get(secondary_node_id)
4008 0 : .expect("Pageservers may not be deleted while referenced");
4009 0 :
4010 0 : attached.push((*tenant_shard_id, attached_node.clone()));
4011 0 : secondary.push((*tenant_shard_id, secondary_node.clone()));
4012 : }
4013 0 : (attached, secondary)
4014 0 : };
4015 0 :
4016 0 : if secondary.is_empty() {
4017 : // No secondary locations; nothing for us to do
4018 0 : return;
4019 0 : }
4020 :
4021 0 : for result in self
4022 0 : .tenant_for_shards_api(
4023 0 : attached,
4024 0 : |tenant_shard_id, client| async move {
4025 0 : client.tenant_heatmap_upload(tenant_shard_id).await
4026 0 : },
4027 0 : 1,
4028 0 : 1,
4029 0 : SHORT_RECONCILE_TIMEOUT,
4030 0 : &self.cancel,
4031 0 : )
4032 0 : .await
4033 : {
4034 0 : if let Err(e) = result {
4035 0 : tracing::warn!("Error calling heatmap upload after shard split: {e}");
4036 0 : return;
4037 0 : }
4038 : }
4039 :
4040 0 : for result in self
4041 0 : .tenant_for_shards_api(
4042 0 : secondary,
4043 0 : |tenant_shard_id, client| async move {
4044 0 : client
4045 0 : .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
4046 0 : .await
4047 0 : },
4048 0 : 1,
4049 0 : 1,
4050 0 : SHORT_RECONCILE_TIMEOUT,
4051 0 : &self.cancel,
4052 0 : )
4053 0 : .await
4054 : {
4055 0 : if let Err(e) = result {
4056 0 : tracing::warn!("Error calling secondary download after shard split: {e}");
4057 0 : return;
4058 0 : }
4059 : }
4060 0 : }
4061 :
4062 0 : pub(crate) async fn tenant_shard_split(
4063 0 : &self,
4064 0 : tenant_id: TenantId,
4065 0 : split_req: TenantShardSplitRequest,
4066 0 : ) -> Result<TenantShardSplitResponse, ApiError> {
4067 : // TODO: return 503 if we get stuck waiting for this lock
4068 : // (issue https://github.com/neondatabase/neon/issues/7108)
4069 0 : let _tenant_lock = trace_exclusive_lock(
4070 0 : &self.tenant_op_locks,
4071 0 : tenant_id,
4072 0 : TenantOperations::ShardSplit,
4073 0 : )
4074 0 : .await;
4075 :
4076 0 : let new_shard_count = ShardCount::new(split_req.new_shard_count);
4077 0 : let new_stripe_size = split_req.new_stripe_size;
4078 :
4079 : // Validate the request and construct parameters. This phase is fallible, but does not require
4080 : // rollback on errors, as it does no I/O and mutates no state.
4081 0 : let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
4082 0 : ShardSplitAction::NoOp(resp) => return Ok(resp),
4083 0 : ShardSplitAction::Split(params) => params,
4084 : };
4085 :
4086 : // Execute this split: this phase mutates state and does remote I/O on pageservers. If it fails,
4087 : // we must roll back.
4088 0 : let r = self
4089 0 : .do_tenant_shard_split(tenant_id, shard_split_params)
4090 0 : .await;
4091 :
4092 0 : let (response, waiters) = match r {
4093 0 : Ok(r) => r,
4094 0 : Err(e) => {
4095 0 : // Split might be part-done, we must do work to abort it.
4096 0 : tracing::warn!("Enqueuing background abort of split on {tenant_id}");
4097 0 : self.abort_tx
4098 0 : .send(TenantShardSplitAbort {
4099 0 : tenant_id,
4100 0 : new_shard_count,
4101 0 : new_stripe_size,
4102 0 : _tenant_lock,
4103 0 : })
4104 0 : // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
4105 0 : .ok();
4106 0 : return Err(e);
4107 : }
4108 : };
4109 :
4110 : // The split is now complete. As an optimization, we will trigger all the child shards to upload
4111 : // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
4112 : // for the background heatmap/download interval before secondaries get warm enough to migrate shards
4113 : // in [`Self::optimize_all`]
4114 0 : self.tenant_shard_split_start_secondaries(tenant_id, waiters)
4115 0 : .await;
4116 0 : Ok(response)
4117 0 : }
4118 :
4119 0 : fn prepare_tenant_shard_split(
4120 0 : &self,
4121 0 : tenant_id: TenantId,
4122 0 : split_req: TenantShardSplitRequest,
4123 0 : ) -> Result<ShardSplitAction, ApiError> {
4124 0 : fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
4125 0 : anyhow::anyhow!("failpoint")
4126 0 : )));
4127 :
4128 0 : let mut policy = None;
4129 0 : let mut config = None;
4130 0 : let mut shard_ident = None;
4131 : // Validate input, and calculate which shards we will create
4132 0 : let (old_shard_count, targets) =
4133 : {
4134 0 : let locked = self.inner.read().unwrap();
4135 0 :
4136 0 : let pageservers = locked.nodes.clone();
4137 0 :
4138 0 : let mut targets = Vec::new();
4139 0 :
4140 0 : // In case this is a retry, count how many already-split shards we found
4141 0 : let mut children_found = Vec::new();
4142 0 : let mut old_shard_count = None;
4143 :
4144 0 : for (tenant_shard_id, shard) in
4145 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
4146 : {
4147 0 : match shard.shard.count.count().cmp(&split_req.new_shard_count) {
4148 : Ordering::Equal => {
4149 : // Already split this
4150 0 : children_found.push(*tenant_shard_id);
4151 0 : continue;
4152 : }
4153 : Ordering::Greater => {
4154 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
4155 0 : "Requested count {} but already have shards at count {}",
4156 0 : split_req.new_shard_count,
4157 0 : shard.shard.count.count()
4158 0 : )));
4159 : }
4160 0 : Ordering::Less => {
4161 0 : // Fall through: this shard has lower count than requested,
4162 0 : // is a candidate for splitting.
4163 0 : }
4164 0 : }
4165 0 :
4166 0 : match old_shard_count {
4167 0 : None => old_shard_count = Some(shard.shard.count),
4168 0 : Some(old_shard_count) => {
4169 0 : if old_shard_count != shard.shard.count {
4170 : // We may hit this case if a caller asked for two splits to
4171 : // different sizes, before the first one is complete.
4172 : // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
4173 : // of shard_count=1 and shard_count=2 shards in the map.
4174 0 : return Err(ApiError::Conflict(
4175 0 : "Cannot split, currently mid-split".to_string(),
4176 0 : ));
4177 0 : }
4178 : }
4179 : }
4180 0 : if policy.is_none() {
4181 0 : policy = Some(shard.policy.clone());
4182 0 : }
4183 0 : if shard_ident.is_none() {
4184 0 : shard_ident = Some(shard.shard);
4185 0 : }
4186 0 : if config.is_none() {
4187 0 : config = Some(shard.config.clone());
4188 0 : }
4189 :
4190 0 : if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
4191 0 : tracing::info!(
4192 0 : "Tenant shard {} already has shard count {}",
4193 : tenant_shard_id,
4194 : split_req.new_shard_count
4195 : );
4196 0 : continue;
4197 0 : }
4198 :
4199 0 : let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
4200 0 : anyhow::anyhow!("Cannot split a tenant that is not attached"),
4201 0 : ))?;
4202 :
4203 0 : let node = pageservers
4204 0 : .get(&node_id)
4205 0 : .expect("Pageservers may not be deleted while referenced");
4206 0 :
4207 0 : targets.push(ShardSplitTarget {
4208 0 : parent_id: *tenant_shard_id,
4209 0 : node: node.clone(),
4210 0 : child_ids: tenant_shard_id
4211 0 : .split(ShardCount::new(split_req.new_shard_count)),
4212 0 : });
4213 : }
4214 :
4215 0 : if targets.is_empty() {
4216 0 : if children_found.len() == split_req.new_shard_count as usize {
4217 0 : return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
4218 0 : new_shards: children_found,
4219 0 : }));
4220 : } else {
4221 : // No shards found to split, and no existing children found: the
4222 : // tenant doesn't exist at all.
4223 0 : return Err(ApiError::NotFound(
4224 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
4225 0 : ));
4226 : }
4227 0 : }
4228 0 :
4229 0 : (old_shard_count, targets)
4230 0 : };
4231 0 :
4232 0 : // unwrap safety: we would have returned above if we didn't find at least one shard to split
4233 0 : let old_shard_count = old_shard_count.unwrap();
4234 0 : let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
4235 : // This ShardIdentity will be used as the template for all children, so this implicitly
4236 : // applies the new stripe size to the children.
4237 0 : let mut shard_ident = shard_ident.unwrap();
4238 0 : if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
4239 0 : return Err(ApiError::BadRequest(anyhow::anyhow!("Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards", shard_ident.stripe_size)));
4240 0 : }
4241 0 :
4242 0 : shard_ident.stripe_size = new_stripe_size;
4243 0 : tracing::info!("applied stripe size {}", shard_ident.stripe_size.0);
4244 0 : shard_ident
4245 : } else {
4246 0 : shard_ident.unwrap()
4247 : };
4248 0 : let policy = policy.unwrap();
4249 0 : let config = config.unwrap();
4250 0 :
4251 0 : Ok(ShardSplitAction::Split(Box::new(ShardSplitParams {
4252 0 : old_shard_count,
4253 0 : new_shard_count: ShardCount::new(split_req.new_shard_count),
4254 0 : new_stripe_size: split_req.new_stripe_size,
4255 0 : targets,
4256 0 : policy,
4257 0 : config,
4258 0 : shard_ident,
4259 0 : })))
4260 0 : }
4261 :
4262 0 : async fn do_tenant_shard_split(
4263 0 : &self,
4264 0 : tenant_id: TenantId,
4265 0 : params: Box<ShardSplitParams>,
4266 0 : ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
4267 0 : // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
4268 0 : // request could occur here, deleting or mutating the tenant. begin_shard_split checks that the
4269 0 : // parent shards exist as expected, but it would be neater to do the above pre-checks within the
4270 0 : // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
4271 0 : // (https://github.com/neondatabase/neon/issues/6676)
4272 0 :
4273 0 : let ShardSplitParams {
4274 0 : old_shard_count,
4275 0 : new_shard_count,
4276 0 : new_stripe_size,
4277 0 : mut targets,
4278 0 : policy,
4279 0 : config,
4280 0 : shard_ident,
4281 0 : } = *params;
4282 :
4283 : // Drop any secondary locations: pageservers do not support splitting these, and in any case the
4284 : // end-state for a split tenant will usually be to have secondary locations on different nodes.
4285 : // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
4286 : // at the time of split.
4287 0 : let waiters = {
4288 0 : let mut locked = self.inner.write().unwrap();
4289 0 : let mut waiters = Vec::new();
4290 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4291 0 : for target in &mut targets {
4292 0 : let Some(shard) = tenants.get_mut(&target.parent_id) else {
4293 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
4294 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4295 0 : "Shard {} not found",
4296 0 : target.parent_id
4297 0 : )));
4298 : };
4299 :
4300 0 : if shard.intent.get_attached() != &Some(target.node.get_id()) {
4301 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
4302 0 : return Err(ApiError::Conflict(format!(
4303 0 : "Shard {} unexpectedly rescheduled during split",
4304 0 : target.parent_id
4305 0 : )));
4306 0 : }
4307 0 :
4308 0 : // Irrespective of PlacementPolicy, clear secondary locations from intent
4309 0 : shard.intent.clear_secondary(scheduler);
4310 :
4311 : // Run Reconciler to execute detach fo secondary locations.
4312 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
4313 0 : waiters.push(waiter);
4314 0 : }
4315 : }
4316 0 : waiters
4317 0 : };
4318 0 : self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
4319 :
4320 : // Before creating any new child shards in memory or on the pageservers, persist them: this
4321 : // enables us to ensure that we will always be able to clean up if something goes wrong. This also
4322 : // acts as the protection against two concurrent attempts to split: one of them will get a database
4323 : // error trying to insert the child shards.
4324 0 : let mut child_tsps = Vec::new();
4325 0 : for target in &targets {
4326 0 : let mut this_child_tsps = Vec::new();
4327 0 : for child in &target.child_ids {
4328 0 : let mut child_shard = shard_ident;
4329 0 : child_shard.number = child.shard_number;
4330 0 : child_shard.count = child.shard_count;
4331 0 :
4332 0 : tracing::info!(
4333 0 : "Create child shard persistence with stripe size {}",
4334 : shard_ident.stripe_size.0
4335 : );
4336 :
4337 0 : this_child_tsps.push(TenantShardPersistence {
4338 0 : tenant_id: child.tenant_id.to_string(),
4339 0 : shard_number: child.shard_number.0 as i32,
4340 0 : shard_count: child.shard_count.literal() as i32,
4341 0 : shard_stripe_size: shard_ident.stripe_size.0 as i32,
4342 0 : // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
4343 0 : // populate the correct generation as part of its transaction, to protect us
4344 0 : // against racing with changes in the state of the parent.
4345 0 : generation: None,
4346 0 : generation_pageserver: Some(target.node.get_id().0 as i64),
4347 0 : placement_policy: serde_json::to_string(&policy).unwrap(),
4348 0 : config: serde_json::to_string(&config).unwrap(),
4349 0 : splitting: SplitState::Splitting,
4350 0 :
4351 0 : // Scheduling policies and preferred AZ do not carry through to children
4352 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
4353 0 : .unwrap(),
4354 0 : preferred_az_id: None,
4355 0 : });
4356 : }
4357 :
4358 0 : child_tsps.push((target.parent_id, this_child_tsps));
4359 : }
4360 :
4361 0 : if let Err(e) = self
4362 0 : .persistence
4363 0 : .begin_shard_split(old_shard_count, tenant_id, child_tsps)
4364 0 : .await
4365 : {
4366 0 : match e {
4367 : DatabaseError::Query(diesel::result::Error::DatabaseError(
4368 : DatabaseErrorKind::UniqueViolation,
4369 : _,
4370 : )) => {
4371 : // Inserting a child shard violated a unique constraint: we raced with another call to
4372 : // this function
4373 0 : tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
4374 0 : return Err(ApiError::Conflict("Tenant is already splitting".into()));
4375 : }
4376 0 : _ => return Err(ApiError::InternalServerError(e.into())),
4377 : }
4378 0 : }
4379 0 : fail::fail_point!("shard-split-post-begin", |_| Err(
4380 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
4381 0 : ));
4382 :
4383 : // Now that I have persisted the splitting state, apply it in-memory. This is infallible, so
4384 : // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
4385 : // is not set in memory, then it was not persisted.
4386 : {
4387 0 : let mut locked = self.inner.write().unwrap();
4388 0 : for target in &targets {
4389 0 : if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
4390 0 : parent_shard.splitting = SplitState::Splitting;
4391 0 : // Put the observed state to None, to reflect that it is indeterminate once we start the
4392 0 : // split operation.
4393 0 : parent_shard
4394 0 : .observed
4395 0 : .locations
4396 0 : .insert(target.node.get_id(), ObservedStateLocation { conf: None });
4397 0 : }
4398 : }
4399 : }
4400 :
4401 : // TODO: issue split calls concurrently (this only matters once we're splitting
4402 : // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
4403 :
4404 0 : for target in &targets {
4405 : let ShardSplitTarget {
4406 0 : parent_id,
4407 0 : node,
4408 0 : child_ids,
4409 0 : } = target;
4410 0 : let client = PageserverClient::new(
4411 0 : node.get_id(),
4412 0 : node.base_url(),
4413 0 : self.config.jwt_token.as_deref(),
4414 0 : );
4415 0 : let response = client
4416 0 : .tenant_shard_split(
4417 0 : *parent_id,
4418 0 : TenantShardSplitRequest {
4419 0 : new_shard_count: new_shard_count.literal(),
4420 0 : new_stripe_size,
4421 0 : },
4422 0 : )
4423 0 : .await
4424 0 : .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?;
4425 :
4426 0 : fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
4427 0 : "failpoint".to_string()
4428 0 : )));
4429 :
4430 0 : failpoint_support::sleep_millis_async!("shard-split-post-remote-sleep", &self.cancel);
4431 :
4432 0 : tracing::info!(
4433 0 : "Split {} into {}",
4434 0 : parent_id,
4435 0 : response
4436 0 : .new_shards
4437 0 : .iter()
4438 0 : .map(|s| format!("{:?}", s))
4439 0 : .collect::<Vec<_>>()
4440 0 : .join(",")
4441 : );
4442 :
4443 0 : if &response.new_shards != child_ids {
4444 : // This should never happen: the pageserver should agree with us on how shard splits work.
4445 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4446 0 : "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
4447 0 : parent_id,
4448 0 : response.new_shards,
4449 0 : child_ids
4450 0 : )));
4451 0 : }
4452 : }
4453 :
4454 : // TODO: if the pageserver restarted concurrently with our split API call,
4455 : // the actual generation of the child shard might differ from the generation
4456 : // we expect it to have. In order for our in-database generation to end up
4457 : // correct, we should carry the child generation back in the response and apply it here
4458 : // in complete_shard_split (and apply the correct generation in memory)
4459 : // (or, we can carry generation in the request and reject the request if
4460 : // it doesn't match, but that requires more retry logic on this side)
4461 :
4462 0 : self.persistence
4463 0 : .complete_shard_split(tenant_id, old_shard_count)
4464 0 : .await?;
4465 :
4466 0 : fail::fail_point!("shard-split-post-complete", |_| Err(
4467 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
4468 0 : ));
4469 :
4470 : // Replace all the shards we just split with their children: this phase is infallible.
4471 0 : let (response, child_locations, waiters) =
4472 0 : self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
4473 0 :
4474 0 : // Now that we have scheduled the child shards, attempt to set their preferred AZ
4475 0 : // to that of the pageserver they've been attached on.
4476 0 : let preferred_azs = {
4477 0 : let locked = self.inner.read().unwrap();
4478 0 : child_locations
4479 0 : .iter()
4480 0 : .filter_map(|(tid, node_id, _stripe_size)| {
4481 0 : let az_id = locked
4482 0 : .nodes
4483 0 : .get(node_id)
4484 0 : .map(|n| n.get_availability_zone_id().to_string())?;
4485 :
4486 0 : Some((*tid, az_id))
4487 0 : })
4488 0 : .collect::<Vec<_>>()
4489 : };
4490 :
4491 0 : let updated = self
4492 0 : .persistence
4493 0 : .set_tenant_shard_preferred_azs(preferred_azs)
4494 0 : .await
4495 0 : .map_err(|err| {
4496 0 : ApiError::InternalServerError(anyhow::anyhow!(
4497 0 : "Failed to persist preferred az ids: {err}"
4498 0 : ))
4499 0 : });
4500 0 :
4501 0 : match updated {
4502 0 : Ok(updated) => {
4503 0 : let mut locked = self.inner.write().unwrap();
4504 0 : for (tid, az_id) in updated {
4505 0 : if let Some(shard) = locked.tenants.get_mut(&tid) {
4506 0 : shard.set_preferred_az(az_id);
4507 0 : }
4508 : }
4509 : }
4510 0 : Err(err) => {
4511 0 : tracing::warn!("Failed to persist preferred AZs after split: {err}");
4512 : }
4513 : }
4514 :
4515 : // Send compute notifications for all the new shards
4516 0 : let mut failed_notifications = Vec::new();
4517 0 : for (child_id, child_ps, stripe_size) in child_locations {
4518 0 : if let Err(e) = self
4519 0 : .compute_hook
4520 0 : .notify(child_id, child_ps, stripe_size, &self.cancel)
4521 0 : .await
4522 : {
4523 0 : tracing::warn!("Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
4524 : child_id, child_ps);
4525 0 : failed_notifications.push(child_id);
4526 0 : }
4527 : }
4528 :
4529 : // If we failed any compute notifications, make a note to retry later.
4530 0 : if !failed_notifications.is_empty() {
4531 0 : let mut locked = self.inner.write().unwrap();
4532 0 : for failed in failed_notifications {
4533 0 : if let Some(shard) = locked.tenants.get_mut(&failed) {
4534 0 : shard.pending_compute_notification = true;
4535 0 : }
4536 : }
4537 0 : }
4538 :
4539 0 : Ok((response, waiters))
4540 0 : }
4541 :
4542 0 : pub(crate) async fn tenant_shard_migrate(
4543 0 : &self,
4544 0 : tenant_shard_id: TenantShardId,
4545 0 : migrate_req: TenantShardMigrateRequest,
4546 0 : ) -> Result<TenantShardMigrateResponse, ApiError> {
4547 0 : let waiter = {
4548 0 : let mut locked = self.inner.write().unwrap();
4549 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4550 :
4551 0 : let Some(node) = nodes.get(&migrate_req.node_id) else {
4552 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
4553 0 : "Node {} not found",
4554 0 : migrate_req.node_id
4555 0 : )));
4556 : };
4557 :
4558 0 : if !node.is_available() {
4559 : // Warn but proceed: the caller may intend to manually adjust the placement of
4560 : // a shard even if the node is down, e.g. if intervening during an incident.
4561 0 : tracing::warn!("Migrating to unavailable node {node}");
4562 0 : }
4563 :
4564 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
4565 0 : return Err(ApiError::NotFound(
4566 0 : anyhow::anyhow!("Tenant shard not found").into(),
4567 0 : ));
4568 : };
4569 :
4570 0 : if shard.intent.get_attached() == &Some(migrate_req.node_id) {
4571 : // No-op case: we will still proceed to wait for reconciliation in case it is
4572 : // incomplete from an earlier update to the intent.
4573 0 : tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
4574 : } else {
4575 0 : let old_attached = *shard.intent.get_attached();
4576 0 :
4577 0 : match shard.policy {
4578 0 : PlacementPolicy::Attached(n) => {
4579 0 : // If our new attached node was a secondary, it no longer should be.
4580 0 : shard.intent.remove_secondary(scheduler, migrate_req.node_id);
4581 :
4582 : // If we were already attached to something, demote that to a secondary
4583 0 : if let Some(old_attached) = old_attached {
4584 0 : if n > 0 {
4585 : // Remove other secondaries to make room for the location we'll demote
4586 0 : while shard.intent.get_secondary().len() >= n {
4587 0 : shard.intent.pop_secondary(scheduler);
4588 0 : }
4589 :
4590 0 : shard.intent.push_secondary(scheduler, old_attached);
4591 0 : }
4592 0 : }
4593 :
4594 0 : shard.intent.set_attached(scheduler, Some(migrate_req.node_id));
4595 : }
4596 0 : PlacementPolicy::Secondary => {
4597 0 : shard.intent.clear(scheduler);
4598 0 : shard.intent.push_secondary(scheduler, migrate_req.node_id);
4599 0 : }
4600 : PlacementPolicy::Detached => {
4601 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
4602 0 : "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
4603 0 : )))
4604 : }
4605 : }
4606 :
4607 0 : tracing::info!("Migrating: new intent {:?}", shard.intent);
4608 0 : shard.sequence = shard.sequence.next();
4609 : }
4610 :
4611 0 : self.maybe_reconcile_shard(shard, nodes)
4612 : };
4613 :
4614 0 : if let Some(waiter) = waiter {
4615 0 : waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
4616 : } else {
4617 0 : tracing::info!("Migration is a no-op");
4618 : }
4619 :
4620 0 : Ok(TenantShardMigrateResponse {})
4621 0 : }
4622 :
4623 : /// This is for debug/support only: we simply drop all state for a tenant, without
4624 : /// detaching or deleting it on pageservers.
4625 0 : pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
4626 0 : self.persistence.delete_tenant(tenant_id).await?;
4627 :
4628 0 : let mut locked = self.inner.write().unwrap();
4629 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
4630 0 : let mut shards = Vec::new();
4631 0 : for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
4632 0 : shards.push(*tenant_shard_id);
4633 0 : }
4634 :
4635 0 : for shard_id in shards {
4636 0 : if let Some(mut shard) = tenants.remove(&shard_id) {
4637 0 : shard.intent.clear(scheduler);
4638 0 : }
4639 : }
4640 :
4641 0 : Ok(())
4642 0 : }
4643 :
4644 : /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
4645 : /// tenant with a very high generation number so that it will see the existing data.
4646 0 : pub(crate) async fn tenant_import(
4647 0 : &self,
4648 0 : tenant_id: TenantId,
4649 0 : ) -> Result<TenantCreateResponse, ApiError> {
4650 0 : // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
4651 0 : let maybe_node = {
4652 0 : self.inner
4653 0 : .read()
4654 0 : .unwrap()
4655 0 : .nodes
4656 0 : .values()
4657 0 : .find(|n| n.is_available())
4658 0 : .cloned()
4659 : };
4660 0 : let Some(node) = maybe_node else {
4661 0 : return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
4662 : };
4663 :
4664 0 : let client = PageserverClient::new(
4665 0 : node.get_id(),
4666 0 : node.base_url(),
4667 0 : self.config.jwt_token.as_deref(),
4668 0 : );
4669 :
4670 0 : let scan_result = client
4671 0 : .tenant_scan_remote_storage(tenant_id)
4672 0 : .await
4673 0 : .map_err(|e| passthrough_api_error(&node, e))?;
4674 :
4675 : // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
4676 0 : let Some(shard_count) = scan_result
4677 0 : .shards
4678 0 : .iter()
4679 0 : .map(|s| s.tenant_shard_id.shard_count)
4680 0 : .max()
4681 : else {
4682 0 : return Err(ApiError::NotFound(
4683 0 : anyhow::anyhow!("No shards found").into(),
4684 0 : ));
4685 : };
4686 :
4687 : // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
4688 : // to
4689 0 : let generation = scan_result
4690 0 : .shards
4691 0 : .iter()
4692 0 : .map(|s| s.generation)
4693 0 : .max()
4694 0 : .expect("We already validated >0 shards");
4695 0 :
4696 0 : // FIXME: we have no way to recover the shard stripe size from contents of remote storage: this will
4697 0 : // only work if they were using the default stripe size.
4698 0 : let stripe_size = ShardParameters::DEFAULT_STRIPE_SIZE;
4699 :
4700 0 : let (response, waiters) = self
4701 0 : .do_tenant_create(TenantCreateRequest {
4702 0 : new_tenant_id: TenantShardId::unsharded(tenant_id),
4703 0 : generation,
4704 0 :
4705 0 : shard_parameters: ShardParameters {
4706 0 : count: shard_count,
4707 0 : stripe_size,
4708 0 : },
4709 0 : placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
4710 0 :
4711 0 : // There is no way to know what the tenant's config was: revert to defaults
4712 0 : //
4713 0 : // TODO: remove `switch_aux_file_policy` once we finish auxv2 migration
4714 0 : //
4715 0 : // we write to both v1+v2 storage, so that the test case can use either storage format for testing
4716 0 : config: TenantConfig {
4717 0 : switch_aux_file_policy: Some(models::AuxFilePolicy::CrossValidation),
4718 0 : ..TenantConfig::default()
4719 0 : },
4720 0 : })
4721 0 : .await?;
4722 :
4723 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
4724 : // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
4725 : // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
4726 : // reconcile, as reconciliation includes notifying compute.
4727 0 : tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
4728 0 : }
4729 :
4730 0 : Ok(response)
4731 0 : }
4732 :
4733 : /// For debug/support: a full JSON dump of TenantShards. Returns a response so that
4734 : /// we don't have to make TenantShard clonable in the return path.
4735 0 : pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
4736 0 : let serialized = {
4737 0 : let locked = self.inner.read().unwrap();
4738 0 : let result = locked.tenants.values().collect::<Vec<_>>();
4739 0 : serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
4740 : };
4741 :
4742 0 : hyper::Response::builder()
4743 0 : .status(hyper::StatusCode::OK)
4744 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
4745 0 : .body(hyper::Body::from(serialized))
4746 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
4747 0 : }
4748 :
4749 : /// Check the consistency of in-memory state vs. persistent state, and check that the
4750 : /// scheduler's statistics are up to date.
4751 : ///
4752 : /// These consistency checks expect an **idle** system. If changes are going on while
4753 : /// we run, then we can falsely indicate a consistency issue. This is sufficient for end-of-test
4754 : /// checks, but not suitable for running continuously in the background in the field.
4755 0 : pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
4756 0 : let (mut expect_nodes, mut expect_shards) = {
4757 0 : let locked = self.inner.read().unwrap();
4758 0 :
4759 0 : locked
4760 0 : .scheduler
4761 0 : .consistency_check(locked.nodes.values(), locked.tenants.values())
4762 0 : .context("Scheduler checks")
4763 0 : .map_err(ApiError::InternalServerError)?;
4764 :
4765 0 : let expect_nodes = locked
4766 0 : .nodes
4767 0 : .values()
4768 0 : .map(|n| n.to_persistent())
4769 0 : .collect::<Vec<_>>();
4770 0 :
4771 0 : let expect_shards = locked
4772 0 : .tenants
4773 0 : .values()
4774 0 : .map(|t| t.to_persistent())
4775 0 : .collect::<Vec<_>>();
4776 :
4777 : // This method can only validate the state of an idle system: if a reconcile is in
4778 : // progress, fail out early to avoid giving false errors on state that won't match
4779 : // between database and memory under a ReconcileResult is processed.
4780 0 : for t in locked.tenants.values() {
4781 0 : if t.reconciler.is_some() {
4782 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4783 0 : "Shard {} reconciliation in progress",
4784 0 : t.tenant_shard_id
4785 0 : )));
4786 0 : }
4787 : }
4788 :
4789 0 : (expect_nodes, expect_shards)
4790 : };
4791 :
4792 0 : let mut nodes = self.persistence.list_nodes().await?;
4793 0 : expect_nodes.sort_by_key(|n| n.node_id);
4794 0 : nodes.sort_by_key(|n| n.node_id);
4795 0 :
4796 0 : if nodes != expect_nodes {
4797 0 : tracing::error!("Consistency check failed on nodes.");
4798 0 : tracing::error!(
4799 0 : "Nodes in memory: {}",
4800 0 : serde_json::to_string(&expect_nodes)
4801 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4802 : );
4803 0 : tracing::error!(
4804 0 : "Nodes in database: {}",
4805 0 : serde_json::to_string(&nodes)
4806 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4807 : );
4808 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4809 0 : "Node consistency failure"
4810 0 : )));
4811 0 : }
4812 :
4813 0 : let mut shards = self.persistence.list_tenant_shards().await?;
4814 0 : shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
4815 0 : expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
4816 0 :
4817 0 : if shards != expect_shards {
4818 0 : tracing::error!("Consistency check failed on shards.");
4819 0 : tracing::error!(
4820 0 : "Shards in memory: {}",
4821 0 : serde_json::to_string(&expect_shards)
4822 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4823 : );
4824 0 : tracing::error!(
4825 0 : "Shards in database: {}",
4826 0 : serde_json::to_string(&shards)
4827 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4828 : );
4829 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4830 0 : "Shard consistency failure"
4831 0 : )));
4832 0 : }
4833 0 :
4834 0 : Ok(())
4835 0 : }
4836 :
4837 : /// For debug/support: a JSON dump of the [`Scheduler`]. Returns a response so that
4838 : /// we don't have to make TenantShard clonable in the return path.
4839 0 : pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
4840 0 : let serialized = {
4841 0 : let locked = self.inner.read().unwrap();
4842 0 : serde_json::to_string(&locked.scheduler)
4843 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4844 : };
4845 :
4846 0 : hyper::Response::builder()
4847 0 : .status(hyper::StatusCode::OK)
4848 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
4849 0 : .body(hyper::Body::from(serialized))
4850 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
4851 0 : }
4852 :
4853 : /// This is for debug/support only: we simply drop all state for a tenant, without
4854 : /// detaching or deleting it on pageservers. We do not try and re-schedule any
4855 : /// tenants that were on this node.
4856 0 : pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
4857 0 : self.persistence.delete_node(node_id).await?;
4858 :
4859 0 : let mut locked = self.inner.write().unwrap();
4860 :
4861 0 : for shard in locked.tenants.values_mut() {
4862 0 : shard.deref_node(node_id);
4863 0 : shard.observed.locations.remove(&node_id);
4864 0 : }
4865 :
4866 0 : let mut nodes = (*locked.nodes).clone();
4867 0 : nodes.remove(&node_id);
4868 0 : locked.nodes = Arc::new(nodes);
4869 0 :
4870 0 : locked.scheduler.node_remove(node_id);
4871 0 :
4872 0 : Ok(())
4873 0 : }
4874 :
4875 : /// If a node has any work on it, it will be rescheduled: this is "clean" in the sense
4876 : /// that we don't leave any bad state behind in the storage controller, but unclean
4877 : /// in the sense that we are not carefully draining the node.
4878 0 : pub(crate) async fn node_delete(&self, node_id: NodeId) -> Result<(), ApiError> {
4879 0 : let _node_lock =
4880 0 : trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Delete).await;
4881 :
4882 : // 1. Atomically update in-memory state:
4883 : // - set the scheduling state to Pause to make subsequent scheduling ops skip it
4884 : // - update shards' intents to exclude the node, and reschedule any shards whose intents we modified.
4885 : // - drop the node from the main nodes map, so that when running reconciles complete they do not
4886 : // re-insert references to this node into the ObservedState of shards
4887 : // - drop the node from the scheduler
4888 : {
4889 0 : let mut locked = self.inner.write().unwrap();
4890 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4891 0 :
4892 0 : {
4893 0 : let mut nodes_mut = (*nodes).deref().clone();
4894 0 : match nodes_mut.get_mut(&node_id) {
4895 0 : Some(node) => {
4896 0 : // We do not bother setting this in the database, because we're about to delete the row anyway, and
4897 0 : // if we crash it would not be desirable to leave the node paused after a restart.
4898 0 : node.set_scheduling(NodeSchedulingPolicy::Pause);
4899 0 : }
4900 : None => {
4901 0 : tracing::info!(
4902 0 : "Node not found: presuming this is a retry and returning success"
4903 : );
4904 0 : return Ok(());
4905 : }
4906 : }
4907 :
4908 0 : *nodes = Arc::new(nodes_mut);
4909 : }
4910 :
4911 0 : for (tenant_shard_id, shard) in tenants {
4912 0 : if shard.deref_node(node_id) {
4913 : // FIXME: we need to build a ScheduleContext that reflects this shard's peers, otherwise
4914 : // it won't properly do anti-affinity.
4915 0 : let mut schedule_context = ScheduleContext::default();
4916 :
4917 0 : if let Err(e) = shard.schedule(scheduler, &mut schedule_context) {
4918 : // TODO: implement force flag to remove a node even if we can't reschedule
4919 : // a tenant
4920 0 : tracing::error!("Refusing to delete node, shard {tenant_shard_id} can't be rescheduled: {e}");
4921 0 : return Err(e.into());
4922 : } else {
4923 0 : tracing::info!(
4924 0 : "Rescheduled shard {tenant_shard_id} away from node during deletion"
4925 : )
4926 : }
4927 :
4928 0 : self.maybe_reconcile_shard(shard, nodes);
4929 0 : }
4930 :
4931 : // Here we remove an existing observed location for the node we're removing, and it will
4932 : // not be re-added by a reconciler's completion because we filter out removed nodes in
4933 : // process_result.
4934 : //
4935 : // Note that we update the shard's observed state _after_ calling maybe_reconcile_shard: that
4936 : // means any reconciles we spawned will know about the node we're deleting, enabling them
4937 : // to do live migrations if it's still online.
4938 0 : shard.observed.locations.remove(&node_id);
4939 : }
4940 :
4941 0 : scheduler.node_remove(node_id);
4942 0 :
4943 0 : {
4944 0 : let mut nodes_mut = (**nodes).clone();
4945 0 : nodes_mut.remove(&node_id);
4946 0 : *nodes = Arc::new(nodes_mut);
4947 0 : }
4948 0 : }
4949 0 :
4950 0 : // Note: some `generation_pageserver` columns on tenant shards in the database may still refer to
4951 0 : // the removed node, as this column means "The pageserver to which this generation was issued", and
4952 0 : // their generations won't get updated until the reconcilers moving them away from this node complete.
4953 0 : // That is safe because in Service::spawn we only use generation_pageserver if it refers to a node
4954 0 : // that exists.
4955 0 :
4956 0 : // 2. Actually delete the node from the database and from in-memory state
4957 0 : tracing::info!("Deleting node from database");
4958 0 : self.persistence.delete_node(node_id).await?;
4959 :
4960 0 : Ok(())
4961 0 : }
4962 :
4963 0 : pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
4964 0 : let nodes = {
4965 0 : self.inner
4966 0 : .read()
4967 0 : .unwrap()
4968 0 : .nodes
4969 0 : .values()
4970 0 : .cloned()
4971 0 : .collect::<Vec<_>>()
4972 0 : };
4973 0 :
4974 0 : Ok(nodes)
4975 0 : }
4976 :
4977 0 : pub(crate) async fn get_node(&self, node_id: NodeId) -> Result<Node, ApiError> {
4978 0 : self.inner
4979 0 : .read()
4980 0 : .unwrap()
4981 0 : .nodes
4982 0 : .get(&node_id)
4983 0 : .cloned()
4984 0 : .ok_or(ApiError::NotFound(
4985 0 : format!("Node {node_id} not registered").into(),
4986 0 : ))
4987 0 : }
4988 :
4989 0 : pub(crate) async fn get_node_shards(
4990 0 : &self,
4991 0 : node_id: NodeId,
4992 0 : ) -> Result<NodeShardResponse, ApiError> {
4993 0 : let locked = self.inner.read().unwrap();
4994 0 : let mut shards = Vec::new();
4995 0 : for (tid, tenant) in locked.tenants.iter() {
4996 0 : let is_intended_secondary = match (
4997 0 : tenant.intent.get_attached() == &Some(node_id),
4998 0 : tenant.intent.get_secondary().contains(&node_id),
4999 0 : ) {
5000 : (true, true) => {
5001 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
5002 0 : "{} attached as primary+secondary on the same node",
5003 0 : tid
5004 0 : )))
5005 : }
5006 0 : (true, false) => Some(false),
5007 0 : (false, true) => Some(true),
5008 0 : (false, false) => None,
5009 : };
5010 0 : let is_observed_secondary = if let Some(ObservedStateLocation { conf: Some(conf) }) =
5011 0 : tenant.observed.locations.get(&node_id)
5012 : {
5013 0 : Some(conf.secondary_conf.is_some())
5014 : } else {
5015 0 : None
5016 : };
5017 0 : if is_intended_secondary.is_some() || is_observed_secondary.is_some() {
5018 0 : shards.push(NodeShard {
5019 0 : tenant_shard_id: *tid,
5020 0 : is_intended_secondary,
5021 0 : is_observed_secondary,
5022 0 : });
5023 0 : }
5024 : }
5025 0 : Ok(NodeShardResponse { node_id, shards })
5026 0 : }
5027 :
5028 0 : pub(crate) async fn get_leader(&self) -> DatabaseResult<Option<ControllerPersistence>> {
5029 0 : self.persistence.get_leader().await
5030 0 : }
5031 :
5032 0 : pub(crate) async fn node_register(
5033 0 : &self,
5034 0 : register_req: NodeRegisterRequest,
5035 0 : ) -> Result<(), ApiError> {
5036 0 : let _node_lock = trace_exclusive_lock(
5037 0 : &self.node_op_locks,
5038 0 : register_req.node_id,
5039 0 : NodeOperations::Register,
5040 0 : )
5041 0 : .await;
5042 :
5043 : enum RegistrationStatus {
5044 : Matched,
5045 : Mismatched,
5046 : New,
5047 : }
5048 :
5049 0 : let registration_status = {
5050 0 : let locked = self.inner.read().unwrap();
5051 0 : if let Some(node) = locked.nodes.get(®ister_req.node_id) {
5052 0 : if node.registration_match(®ister_req) {
5053 0 : RegistrationStatus::Matched
5054 : } else {
5055 0 : RegistrationStatus::Mismatched
5056 : }
5057 : } else {
5058 0 : RegistrationStatus::New
5059 : }
5060 : };
5061 :
5062 0 : match registration_status {
5063 : RegistrationStatus::Matched => {
5064 0 : tracing::info!(
5065 0 : "Node {} re-registered with matching address",
5066 : register_req.node_id
5067 : );
5068 :
5069 0 : return Ok(());
5070 : }
5071 : RegistrationStatus::Mismatched => {
5072 : // TODO: decide if we want to allow modifying node addresses without removing and re-adding
5073 : // the node. Safest/simplest thing is to refuse it, and usually we deploy with
5074 : // a fixed address through the lifetime of a node.
5075 0 : tracing::warn!(
5076 0 : "Node {} tried to register with different address",
5077 : register_req.node_id
5078 : );
5079 0 : return Err(ApiError::Conflict(
5080 0 : "Node is already registered with different address".to_string(),
5081 0 : ));
5082 : }
5083 0 : RegistrationStatus::New => {
5084 0 : // fallthrough
5085 0 : }
5086 0 : }
5087 0 :
5088 0 : // We do not require that a node is actually online when registered (it will start life
5089 0 : // with it's availability set to Offline), but we _do_ require that its DNS record exists. We're
5090 0 : // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
5091 0 : // that register themselves with a broken DNS config. We check only the HTTP hostname, because
5092 0 : // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
5093 0 : if tokio::net::lookup_host(format!(
5094 0 : "{}:{}",
5095 0 : register_req.listen_http_addr, register_req.listen_http_port
5096 0 : ))
5097 0 : .await
5098 0 : .is_err()
5099 : {
5100 : // If we have a transient DNS issue, it's up to the caller to retry their registration. Because
5101 : // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
5102 : // we return a soft 503 error, to encourage callers to retry past transient issues.
5103 0 : return Err(ApiError::ResourceUnavailable(
5104 0 : format!(
5105 0 : "Node {} tried to register with unknown DNS name '{}'",
5106 0 : register_req.node_id, register_req.listen_http_addr
5107 0 : )
5108 0 : .into(),
5109 0 : ));
5110 0 : }
5111 0 :
5112 0 : // Ordering: we must persist the new node _before_ adding it to in-memory state.
5113 0 : // This ensures that before we use it for anything or expose it via any external
5114 0 : // API, it is guaranteed to be available after a restart.
5115 0 : let new_node = Node::new(
5116 0 : register_req.node_id,
5117 0 : register_req.listen_http_addr,
5118 0 : register_req.listen_http_port,
5119 0 : register_req.listen_pg_addr,
5120 0 : register_req.listen_pg_port,
5121 0 : register_req.availability_zone_id,
5122 0 : );
5123 0 :
5124 0 : // TODO: idempotency if the node already exists in the database
5125 0 : self.persistence.insert_node(&new_node).await?;
5126 :
5127 0 : let mut locked = self.inner.write().unwrap();
5128 0 : let mut new_nodes = (*locked.nodes).clone();
5129 0 :
5130 0 : locked.scheduler.node_upsert(&new_node);
5131 0 : new_nodes.insert(register_req.node_id, new_node);
5132 0 :
5133 0 : locked.nodes = Arc::new(new_nodes);
5134 0 :
5135 0 : tracing::info!(
5136 0 : "Registered pageserver {}, now have {} pageservers",
5137 0 : register_req.node_id,
5138 0 : locked.nodes.len()
5139 : );
5140 0 : Ok(())
5141 0 : }
5142 :
5143 0 : pub(crate) async fn node_configure(
5144 0 : &self,
5145 0 : node_id: NodeId,
5146 0 : availability: Option<NodeAvailability>,
5147 0 : scheduling: Option<NodeSchedulingPolicy>,
5148 0 : ) -> Result<(), ApiError> {
5149 0 : let _node_lock =
5150 0 : trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
5151 :
5152 0 : if let Some(scheduling) = scheduling {
5153 : // Scheduling is a persistent part of Node: we must write updates to the database before
5154 : // applying them in memory
5155 0 : self.persistence.update_node(node_id, scheduling).await?;
5156 0 : }
5157 :
5158 : // If we're activating a node, then before setting it active we must reconcile any shard locations
5159 : // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
5160 : // by calling [`Self::node_activate_reconcile`]
5161 : //
5162 : // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
5163 : // nothing else can mutate its availability while we run.
5164 0 : let availability_transition = if let Some(input_availability) = availability.as_ref() {
5165 0 : let (activate_node, availability_transition) = {
5166 0 : let locked = self.inner.read().unwrap();
5167 0 : let Some(node) = locked.nodes.get(&node_id) else {
5168 0 : return Err(ApiError::NotFound(
5169 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
5170 0 : ));
5171 : };
5172 :
5173 0 : (
5174 0 : node.clone(),
5175 0 : node.get_availability_transition(input_availability),
5176 0 : )
5177 : };
5178 :
5179 0 : if matches!(availability_transition, AvailabilityTransition::ToActive) {
5180 0 : self.node_activate_reconcile(activate_node, &_node_lock)
5181 0 : .await?;
5182 0 : }
5183 0 : availability_transition
5184 : } else {
5185 0 : AvailabilityTransition::Unchanged
5186 : };
5187 :
5188 : // Apply changes from the request to our in-memory state for the Node
5189 0 : let mut locked = self.inner.write().unwrap();
5190 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5191 0 :
5192 0 : let mut new_nodes = (**nodes).clone();
5193 :
5194 0 : let Some(node) = new_nodes.get_mut(&node_id) else {
5195 0 : return Err(ApiError::NotFound(
5196 0 : anyhow::anyhow!("Node not registered").into(),
5197 0 : ));
5198 : };
5199 :
5200 0 : if let Some(availability) = availability.as_ref() {
5201 0 : node.set_availability(availability.clone());
5202 0 : }
5203 :
5204 0 : if let Some(scheduling) = scheduling {
5205 0 : node.set_scheduling(scheduling);
5206 0 : }
5207 :
5208 : // Update the scheduler, in case the elegibility of the node for new shards has changed
5209 0 : scheduler.node_upsert(node);
5210 0 :
5211 0 : let new_nodes = Arc::new(new_nodes);
5212 0 :
5213 0 : // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
5214 0 : match availability_transition {
5215 : AvailabilityTransition::ToOffline => {
5216 0 : tracing::info!("Node {} transition to offline", node_id);
5217 0 : let mut tenants_affected: usize = 0;
5218 :
5219 0 : for (tenant_shard_id, tenant_shard) in tenants {
5220 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
5221 0 : // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
5222 0 : // not assume our knowledge of the node's configuration is accurate until it comes back online
5223 0 : observed_loc.conf = None;
5224 0 : }
5225 :
5226 0 : if new_nodes.len() == 1 {
5227 : // Special case for single-node cluster: there is no point trying to reschedule
5228 : // any tenant shards: avoid doing so, in order to avoid spewing warnings about
5229 : // failures to schedule them.
5230 0 : continue;
5231 0 : }
5232 0 :
5233 0 : if !new_nodes
5234 0 : .values()
5235 0 : .any(|n| matches!(n.may_schedule(), MaySchedule::Yes(_)))
5236 : {
5237 : // Special case for when all nodes are unavailable and/or unschedulable: there is no point
5238 : // trying to reschedule since there's nowhere else to go. Without this
5239 : // branch we incorrectly detach tenants in response to node unavailability.
5240 0 : continue;
5241 0 : }
5242 0 :
5243 0 : if tenant_shard.intent.demote_attached(scheduler, node_id) {
5244 0 : tenant_shard.sequence = tenant_shard.sequence.next();
5245 0 :
5246 0 : // TODO: populate a ScheduleContext including all shards in the same tenant_id (only matters
5247 0 : // for tenants without secondary locations: if they have a secondary location, then this
5248 0 : // schedule() call is just promoting an existing secondary)
5249 0 : let mut schedule_context = ScheduleContext::default();
5250 0 :
5251 0 : match tenant_shard.schedule(scheduler, &mut schedule_context) {
5252 0 : Err(e) => {
5253 0 : // It is possible that some tenants will become unschedulable when too many pageservers
5254 0 : // go offline: in this case there isn't much we can do other than make the issue observable.
5255 0 : // TODO: give TenantShard a scheduling error attribute to be queried later.
5256 0 : tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
5257 : }
5258 : Ok(()) => {
5259 0 : if self
5260 0 : .maybe_reconcile_shard(tenant_shard, &new_nodes)
5261 0 : .is_some()
5262 0 : {
5263 0 : tenants_affected += 1;
5264 0 : };
5265 : }
5266 : }
5267 0 : }
5268 : }
5269 0 : tracing::info!(
5270 0 : "Launched {} reconciler tasks for tenants affected by node {} going offline",
5271 : tenants_affected,
5272 : node_id
5273 : )
5274 : }
5275 : AvailabilityTransition::ToActive => {
5276 0 : tracing::info!("Node {} transition to active", node_id);
5277 : // When a node comes back online, we must reconcile any tenant that has a None observed
5278 : // location on the node.
5279 0 : for tenant_shard in locked.tenants.values_mut() {
5280 : // If a reconciliation is already in progress, rely on the previous scheduling
5281 : // decision and skip triggering a new reconciliation.
5282 0 : if tenant_shard.reconciler.is_some() {
5283 0 : continue;
5284 0 : }
5285 :
5286 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
5287 0 : if observed_loc.conf.is_none() {
5288 0 : self.maybe_reconcile_shard(tenant_shard, &new_nodes);
5289 0 : }
5290 0 : }
5291 : }
5292 :
5293 : // TODO: in the background, we should balance work back onto this pageserver
5294 : }
5295 : // No action required for the intermediate unavailable state.
5296 : // When we transition into active or offline from the unavailable state,
5297 : // the correct handling above will kick in.
5298 : AvailabilityTransition::ToWarmingUpFromActive => {
5299 0 : tracing::info!("Node {} transition to unavailable from active", node_id);
5300 : }
5301 : AvailabilityTransition::ToWarmingUpFromOffline => {
5302 0 : tracing::info!("Node {} transition to unavailable from offline", node_id);
5303 : }
5304 : AvailabilityTransition::Unchanged => {
5305 0 : tracing::debug!("Node {} no availability change during config", node_id);
5306 : }
5307 : }
5308 :
5309 0 : locked.nodes = new_nodes;
5310 0 :
5311 0 : Ok(())
5312 0 : }
5313 :
5314 : /// Wrapper around [`Self::node_configure`] which only allows changes while there is no ongoing
5315 : /// operation for HTTP api.
5316 0 : pub(crate) async fn external_node_configure(
5317 0 : &self,
5318 0 : node_id: NodeId,
5319 0 : availability: Option<NodeAvailability>,
5320 0 : scheduling: Option<NodeSchedulingPolicy>,
5321 0 : ) -> Result<(), ApiError> {
5322 0 : {
5323 0 : let locked = self.inner.read().unwrap();
5324 0 : if let Some(op) = locked.ongoing_operation.as_ref().map(|op| op.operation) {
5325 0 : return Err(ApiError::PreconditionFailed(
5326 0 : format!("Ongoing background operation forbids configuring: {op}").into(),
5327 0 : ));
5328 0 : }
5329 0 : }
5330 0 :
5331 0 : self.node_configure(node_id, availability, scheduling).await
5332 0 : }
5333 :
5334 0 : pub(crate) async fn start_node_drain(
5335 0 : self: &Arc<Self>,
5336 0 : node_id: NodeId,
5337 0 : ) -> Result<(), ApiError> {
5338 0 : let (ongoing_op, node_available, node_policy, schedulable_nodes_count) = {
5339 0 : let locked = self.inner.read().unwrap();
5340 0 : let nodes = &locked.nodes;
5341 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
5342 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
5343 0 : ))?;
5344 0 : let schedulable_nodes_count = nodes
5345 0 : .iter()
5346 0 : .filter(|(_, n)| matches!(n.may_schedule(), MaySchedule::Yes(_)))
5347 0 : .count();
5348 0 :
5349 0 : (
5350 0 : locked
5351 0 : .ongoing_operation
5352 0 : .as_ref()
5353 0 : .map(|ongoing| ongoing.operation),
5354 0 : node.is_available(),
5355 0 : node.get_scheduling(),
5356 0 : schedulable_nodes_count,
5357 0 : )
5358 0 : };
5359 :
5360 0 : if let Some(ongoing) = ongoing_op {
5361 0 : return Err(ApiError::PreconditionFailed(
5362 0 : format!("Background operation already ongoing for node: {}", ongoing).into(),
5363 0 : ));
5364 0 : }
5365 0 :
5366 0 : if !node_available {
5367 0 : return Err(ApiError::ResourceUnavailable(
5368 0 : format!("Node {node_id} is currently unavailable").into(),
5369 0 : ));
5370 0 : }
5371 0 :
5372 0 : if schedulable_nodes_count == 0 {
5373 0 : return Err(ApiError::PreconditionFailed(
5374 0 : "No other schedulable nodes to drain to".into(),
5375 0 : ));
5376 0 : }
5377 0 :
5378 0 : match node_policy {
5379 : NodeSchedulingPolicy::Active | NodeSchedulingPolicy::Pause => {
5380 0 : self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Draining))
5381 0 : .await?;
5382 :
5383 0 : let cancel = self.cancel.child_token();
5384 0 : let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
5385 :
5386 0 : self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
5387 0 : operation: Operation::Drain(Drain { node_id }),
5388 0 : cancel: cancel.clone(),
5389 0 : });
5390 :
5391 0 : let span = tracing::info_span!(parent: None, "drain_node", %node_id);
5392 :
5393 0 : tokio::task::spawn({
5394 0 : let service = self.clone();
5395 0 : let cancel = cancel.clone();
5396 0 : async move {
5397 0 : let _gate_guard = gate_guard;
5398 0 :
5399 0 : scopeguard::defer! {
5400 0 : let prev = service.inner.write().unwrap().ongoing_operation.take();
5401 0 :
5402 0 : if let Some(Operation::Drain(removed_drain)) = prev.map(|h| h.operation) {
5403 0 : assert_eq!(removed_drain.node_id, node_id, "We always take the same operation");
5404 0 : } else {
5405 0 : panic!("We always remove the same operation")
5406 0 : }
5407 0 : }
5408 0 :
5409 0 : tracing::info!("Drain background operation starting");
5410 0 : let res = service.drain_node(node_id, cancel).await;
5411 0 : match res {
5412 : Ok(()) => {
5413 0 : tracing::info!("Drain background operation completed successfully");
5414 : }
5415 : Err(OperationError::Cancelled) => {
5416 0 : tracing::info!("Drain background operation was cancelled");
5417 : }
5418 0 : Err(err) => {
5419 0 : tracing::error!("Drain background operation encountered: {err}")
5420 : }
5421 : }
5422 0 : }
5423 0 : }.instrument(span));
5424 0 : }
5425 : NodeSchedulingPolicy::Draining => {
5426 0 : return Err(ApiError::Conflict(format!(
5427 0 : "Node {node_id} has drain in progress"
5428 0 : )));
5429 : }
5430 0 : policy => {
5431 0 : return Err(ApiError::PreconditionFailed(
5432 0 : format!("Node {node_id} cannot be drained due to {policy:?} policy").into(),
5433 0 : ));
5434 : }
5435 : }
5436 :
5437 0 : Ok(())
5438 0 : }
5439 :
5440 0 : pub(crate) async fn cancel_node_drain(&self, node_id: NodeId) -> Result<(), ApiError> {
5441 0 : let node_available = {
5442 0 : let locked = self.inner.read().unwrap();
5443 0 : let nodes = &locked.nodes;
5444 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
5445 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
5446 0 : ))?;
5447 :
5448 0 : node.is_available()
5449 0 : };
5450 0 :
5451 0 : if !node_available {
5452 0 : return Err(ApiError::ResourceUnavailable(
5453 0 : format!("Node {node_id} is currently unavailable").into(),
5454 0 : ));
5455 0 : }
5456 :
5457 0 : if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
5458 0 : if let Operation::Drain(drain) = op_handler.operation {
5459 0 : if drain.node_id == node_id {
5460 0 : tracing::info!("Cancelling background drain operation for node {node_id}");
5461 0 : op_handler.cancel.cancel();
5462 0 : return Ok(());
5463 0 : }
5464 0 : }
5465 0 : }
5466 :
5467 0 : Err(ApiError::PreconditionFailed(
5468 0 : format!("Node {node_id} has no drain in progress").into(),
5469 0 : ))
5470 0 : }
5471 :
5472 0 : pub(crate) async fn start_node_fill(self: &Arc<Self>, node_id: NodeId) -> Result<(), ApiError> {
5473 0 : let (ongoing_op, node_available, node_policy, total_nodes_count) = {
5474 0 : let locked = self.inner.read().unwrap();
5475 0 : let nodes = &locked.nodes;
5476 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
5477 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
5478 0 : ))?;
5479 :
5480 0 : (
5481 0 : locked
5482 0 : .ongoing_operation
5483 0 : .as_ref()
5484 0 : .map(|ongoing| ongoing.operation),
5485 0 : node.is_available(),
5486 0 : node.get_scheduling(),
5487 0 : nodes.len(),
5488 0 : )
5489 0 : };
5490 :
5491 0 : if let Some(ongoing) = ongoing_op {
5492 0 : return Err(ApiError::PreconditionFailed(
5493 0 : format!("Background operation already ongoing for node: {}", ongoing).into(),
5494 0 : ));
5495 0 : }
5496 0 :
5497 0 : if !node_available {
5498 0 : return Err(ApiError::ResourceUnavailable(
5499 0 : format!("Node {node_id} is currently unavailable").into(),
5500 0 : ));
5501 0 : }
5502 0 :
5503 0 : if total_nodes_count <= 1 {
5504 0 : return Err(ApiError::PreconditionFailed(
5505 0 : "No other nodes to fill from".into(),
5506 0 : ));
5507 0 : }
5508 0 :
5509 0 : match node_policy {
5510 : NodeSchedulingPolicy::Active => {
5511 0 : self.node_configure(node_id, None, Some(NodeSchedulingPolicy::Filling))
5512 0 : .await?;
5513 :
5514 0 : let cancel = self.cancel.child_token();
5515 0 : let gate_guard = self.gate.enter().map_err(|_| ApiError::ShuttingDown)?;
5516 :
5517 0 : self.inner.write().unwrap().ongoing_operation = Some(OperationHandler {
5518 0 : operation: Operation::Fill(Fill { node_id }),
5519 0 : cancel: cancel.clone(),
5520 0 : });
5521 :
5522 0 : let span = tracing::info_span!(parent: None, "fill_node", %node_id);
5523 :
5524 0 : tokio::task::spawn({
5525 0 : let service = self.clone();
5526 0 : let cancel = cancel.clone();
5527 0 : async move {
5528 0 : let _gate_guard = gate_guard;
5529 0 :
5530 0 : scopeguard::defer! {
5531 0 : let prev = service.inner.write().unwrap().ongoing_operation.take();
5532 0 :
5533 0 : if let Some(Operation::Fill(removed_fill)) = prev.map(|h| h.operation) {
5534 0 : assert_eq!(removed_fill.node_id, node_id, "We always take the same operation");
5535 0 : } else {
5536 0 : panic!("We always remove the same operation")
5537 0 : }
5538 0 : }
5539 0 :
5540 0 : tracing::info!("Fill background operation starting");
5541 0 : let res = service.fill_node(node_id, cancel).await;
5542 0 : match res {
5543 : Ok(()) => {
5544 0 : tracing::info!("Fill background operation completed successfully");
5545 : }
5546 : Err(OperationError::Cancelled) => {
5547 0 : tracing::info!("Fill background operation was cancelled");
5548 : }
5549 0 : Err(err) => {
5550 0 : tracing::error!("Fill background operation encountered: {err}")
5551 : }
5552 : }
5553 0 : }
5554 0 : }.instrument(span));
5555 0 : }
5556 : NodeSchedulingPolicy::Filling => {
5557 0 : return Err(ApiError::Conflict(format!(
5558 0 : "Node {node_id} has fill in progress"
5559 0 : )));
5560 : }
5561 0 : policy => {
5562 0 : return Err(ApiError::PreconditionFailed(
5563 0 : format!("Node {node_id} cannot be filled due to {policy:?} policy").into(),
5564 0 : ));
5565 : }
5566 : }
5567 :
5568 0 : Ok(())
5569 0 : }
5570 :
5571 0 : pub(crate) async fn cancel_node_fill(&self, node_id: NodeId) -> Result<(), ApiError> {
5572 0 : let node_available = {
5573 0 : let locked = self.inner.read().unwrap();
5574 0 : let nodes = &locked.nodes;
5575 0 : let node = nodes.get(&node_id).ok_or(ApiError::NotFound(
5576 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
5577 0 : ))?;
5578 :
5579 0 : node.is_available()
5580 0 : };
5581 0 :
5582 0 : if !node_available {
5583 0 : return Err(ApiError::ResourceUnavailable(
5584 0 : format!("Node {node_id} is currently unavailable").into(),
5585 0 : ));
5586 0 : }
5587 :
5588 0 : if let Some(op_handler) = self.inner.read().unwrap().ongoing_operation.as_ref() {
5589 0 : if let Operation::Fill(fill) = op_handler.operation {
5590 0 : if fill.node_id == node_id {
5591 0 : tracing::info!("Cancelling background drain operation for node {node_id}");
5592 0 : op_handler.cancel.cancel();
5593 0 : return Ok(());
5594 0 : }
5595 0 : }
5596 0 : }
5597 :
5598 0 : Err(ApiError::PreconditionFailed(
5599 0 : format!("Node {node_id} has no fill in progress").into(),
5600 0 : ))
5601 0 : }
5602 :
5603 : /// Like [`Self::maybe_configured_reconcile_shard`], but uses the default reconciler
5604 : /// configuration
5605 0 : fn maybe_reconcile_shard(
5606 0 : &self,
5607 0 : shard: &mut TenantShard,
5608 0 : nodes: &Arc<HashMap<NodeId, Node>>,
5609 0 : ) -> Option<ReconcilerWaiter> {
5610 0 : self.maybe_configured_reconcile_shard(shard, nodes, ReconcilerConfig::default())
5611 0 : }
5612 :
5613 : /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
5614 0 : fn maybe_configured_reconcile_shard(
5615 0 : &self,
5616 0 : shard: &mut TenantShard,
5617 0 : nodes: &Arc<HashMap<NodeId, Node>>,
5618 0 : reconciler_config: ReconcilerConfig,
5619 0 : ) -> Option<ReconcilerWaiter> {
5620 0 : let reconcile_needed = shard.get_reconcile_needed(nodes);
5621 0 :
5622 0 : match reconcile_needed {
5623 0 : ReconcileNeeded::No => return None,
5624 0 : ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
5625 0 : ReconcileNeeded::Yes => {
5626 0 : // Fall through to try and acquire units for spawning reconciler
5627 0 : }
5628 : };
5629 :
5630 0 : let units = match self.reconciler_concurrency.clone().try_acquire_owned() {
5631 0 : Ok(u) => ReconcileUnits::new(u),
5632 : Err(_) => {
5633 0 : tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
5634 0 : "Concurrency limited: enqueued for reconcile later");
5635 0 : if !shard.delayed_reconcile {
5636 0 : match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
5637 0 : Err(TrySendError::Closed(_)) => {
5638 0 : // Weird mid-shutdown case?
5639 0 : }
5640 : Err(TrySendError::Full(_)) => {
5641 : // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
5642 0 : tracing::warn!(
5643 0 : "Many shards are waiting to reconcile: delayed_reconcile queue is full"
5644 : );
5645 : }
5646 0 : Ok(()) => {
5647 0 : shard.delayed_reconcile = true;
5648 0 : }
5649 : }
5650 0 : }
5651 :
5652 : // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
5653 : // number to advance. When this function is eventually called again and succeeds in getting units,
5654 : // it will spawn a reconciler that makes this waiter complete.
5655 0 : return Some(shard.future_reconcile_waiter());
5656 : }
5657 : };
5658 :
5659 0 : let Ok(gate_guard) = self.reconcilers_gate.enter() else {
5660 : // Gate closed: we're shutting down, drop out.
5661 0 : return None;
5662 : };
5663 :
5664 0 : shard.spawn_reconciler(
5665 0 : &self.result_tx,
5666 0 : nodes,
5667 0 : &self.compute_hook,
5668 0 : reconciler_config,
5669 0 : &self.config,
5670 0 : &self.persistence,
5671 0 : units,
5672 0 : gate_guard,
5673 0 : &self.reconcilers_cancel,
5674 0 : )
5675 0 : }
5676 :
5677 : /// Check all tenants for pending reconciliation work, and reconcile those in need.
5678 : /// Additionally, reschedule tenants that require it.
5679 : ///
5680 : /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
5681 : /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
5682 : /// available. A return value of 0 indicates that everything is fully reconciled already.
5683 0 : fn reconcile_all(&self) -> usize {
5684 0 : let mut locked = self.inner.write().unwrap();
5685 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
5686 0 : let pageservers = nodes.clone();
5687 0 :
5688 0 : let mut schedule_context = ScheduleContext::default();
5689 0 :
5690 0 : let mut reconciles_spawned = 0;
5691 0 : for (tenant_shard_id, shard) in tenants.iter_mut() {
5692 0 : if tenant_shard_id.is_shard_zero() {
5693 0 : schedule_context = ScheduleContext::default();
5694 0 : }
5695 :
5696 : // Skip checking if this shard is already enqueued for reconciliation
5697 0 : if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
5698 : // If there is something delayed, then return a nonzero count so that
5699 : // callers like reconcile_all_now do not incorrectly get the impression
5700 : // that the system is in a quiescent state.
5701 0 : reconciles_spawned = std::cmp::max(1, reconciles_spawned);
5702 0 : continue;
5703 0 : }
5704 0 :
5705 0 : // Eventual consistency: if an earlier reconcile job failed, and the shard is still
5706 0 : // dirty, spawn another rone
5707 0 : if self.maybe_reconcile_shard(shard, &pageservers).is_some() {
5708 0 : reconciles_spawned += 1;
5709 0 : }
5710 :
5711 0 : schedule_context.avoid(&shard.intent.all_pageservers());
5712 : }
5713 :
5714 0 : reconciles_spawned
5715 0 : }
5716 :
5717 : /// `optimize` in this context means identifying shards which have valid scheduled locations, but
5718 : /// could be scheduled somewhere better:
5719 : /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
5720 : /// * e.g. after a node fails then recovers, to move some work back to it
5721 : /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
5722 : /// * e.g. after a shard split, the initial attached locations will all be on the node where
5723 : /// we did the split, but are probably better placed elsewhere.
5724 : /// - Creating new secondary locations if it improves the spreading of a sharded tenant
5725 : /// * e.g. after a shard split, some locations will be on the same node (where the split
5726 : /// happened), and will probably be better placed elsewhere.
5727 : ///
5728 : /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
5729 : /// the time of scheduling, this function looks for cases where a better-scoring location is available
5730 : /// according to those same soft constraints.
5731 0 : async fn optimize_all(&self) -> usize {
5732 : // Limit on how many shards' optmizations each call to this function will execute. Combined
5733 : // with the frequency of background calls, this acts as an implicit rate limit that runs a small
5734 : // trickle of optimizations in the background, rather than executing a large number in parallel
5735 : // when a change occurs.
5736 : const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 2;
5737 :
5738 : // Synchronous prepare: scan shards for possible scheduling optimizations
5739 0 : let candidate_work = self.optimize_all_plan();
5740 0 : let candidate_work_len = candidate_work.len();
5741 :
5742 : // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
5743 0 : let validated_work = self.optimize_all_validate(candidate_work).await;
5744 :
5745 0 : let was_work_filtered = validated_work.len() != candidate_work_len;
5746 0 :
5747 0 : // Synchronous apply: update the shards' intent states according to validated optimisations
5748 0 : let mut reconciles_spawned = 0;
5749 0 : let mut optimizations_applied = 0;
5750 0 : let mut locked = self.inner.write().unwrap();
5751 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5752 0 : for (tenant_shard_id, optimization) in validated_work {
5753 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
5754 : // Shard was dropped between planning and execution;
5755 0 : continue;
5756 : };
5757 0 : if shard.apply_optimization(scheduler, optimization) {
5758 0 : optimizations_applied += 1;
5759 0 : if self.maybe_reconcile_shard(shard, nodes).is_some() {
5760 0 : reconciles_spawned += 1;
5761 0 : }
5762 0 : }
5763 :
5764 0 : if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
5765 0 : break;
5766 0 : }
5767 : }
5768 :
5769 0 : if was_work_filtered {
5770 0 : // If we filtered any work out during validation, ensure we return a nonzero value to indicate
5771 0 : // to callers that the system is not in a truly quiet state, it's going to do some work as soon
5772 0 : // as these validations start passing.
5773 0 : reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
5774 0 : }
5775 :
5776 0 : reconciles_spawned
5777 0 : }
5778 :
5779 0 : fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
5780 0 : let mut schedule_context = ScheduleContext::default();
5781 0 :
5782 0 : let mut tenant_shards: Vec<&TenantShard> = Vec::new();
5783 :
5784 : // How many candidate optimizations we will generate, before evaluating them for readniess: setting
5785 : // this higher than the execution limit gives us a chance to execute some work even if the first
5786 : // few optimizations we find are not ready.
5787 : const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 8;
5788 :
5789 0 : let mut work = Vec::new();
5790 0 :
5791 0 : let mut locked = self.inner.write().unwrap();
5792 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
5793 0 : for (tenant_shard_id, shard) in tenants.iter() {
5794 0 : if tenant_shard_id.is_shard_zero() {
5795 0 : // Reset accumulators on the first shard in a tenant
5796 0 : schedule_context = ScheduleContext::default();
5797 0 : schedule_context.mode = ScheduleMode::Speculative;
5798 0 : tenant_shards.clear();
5799 0 : }
5800 :
5801 0 : if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
5802 0 : break;
5803 0 : }
5804 0 :
5805 0 : match shard.get_scheduling_policy() {
5806 0 : ShardSchedulingPolicy::Active => {
5807 0 : // Ok to do optimization
5808 0 : }
5809 : ShardSchedulingPolicy::Essential
5810 : | ShardSchedulingPolicy::Pause
5811 : | ShardSchedulingPolicy::Stop => {
5812 : // Policy prevents optimizing this shard.
5813 0 : continue;
5814 : }
5815 : }
5816 :
5817 : // Accumulate the schedule context for all the shards in a tenant: we must have
5818 : // the total view of all shards before we can try to optimize any of them.
5819 0 : schedule_context.avoid(&shard.intent.all_pageservers());
5820 0 : if let Some(attached) = shard.intent.get_attached() {
5821 0 : schedule_context.push_attached(*attached);
5822 0 : }
5823 0 : tenant_shards.push(shard);
5824 0 :
5825 0 : // Once we have seen the last shard in the tenant, proceed to search across all shards
5826 0 : // in the tenant for optimizations
5827 0 : if shard.shard.number.0 == shard.shard.count.count() - 1 {
5828 0 : if tenant_shards.iter().any(|s| s.reconciler.is_some()) {
5829 : // Do not start any optimizations while another change to the tenant is ongoing: this
5830 : // is not necessary for correctness, but simplifies operations and implicitly throttles
5831 : // optimization changes to happen in a "trickle" over time.
5832 0 : continue;
5833 0 : }
5834 0 :
5835 0 : if tenant_shards.iter().any(|s| {
5836 0 : !matches!(s.splitting, SplitState::Idle)
5837 0 : || matches!(s.policy, PlacementPolicy::Detached)
5838 0 : }) {
5839 : // Never attempt to optimize a tenant that is currently being split, or
5840 : // a tenant that is meant to be detached
5841 0 : continue;
5842 0 : }
5843 :
5844 : // TODO: optimization calculations are relatively expensive: create some fast-path for
5845 : // the common idle case (avoiding the search on tenants that we have recently checked)
5846 :
5847 0 : for shard in &tenant_shards {
5848 0 : if let Some(optimization) =
5849 : // If idle, maybe ptimize attachments: if a shard has a secondary location that is preferable to
5850 : // its primary location based on soft constraints, cut it over.
5851 0 : shard.optimize_attachment(nodes, &schedule_context)
5852 : {
5853 0 : work.push((shard.tenant_shard_id, optimization));
5854 0 : break;
5855 0 : } else if let Some(optimization) =
5856 : // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
5857 : // better placed on another node, based on ScheduleContext, then adjust it. This
5858 : // covers cases like after a shard split, where we might have too many shards
5859 : // in the same tenant with secondary locations on the node where they originally split.
5860 0 : shard.optimize_secondary(scheduler, &schedule_context)
5861 : {
5862 0 : work.push((shard.tenant_shard_id, optimization));
5863 0 : break;
5864 0 : }
5865 :
5866 : // TODO: extend this mechanism to prefer attaching on nodes with fewer attached
5867 : // tenants (i.e. extend schedule state to distinguish attached from secondary counts),
5868 : // for the total number of attachments on a node (not just within a tenant.)
5869 : }
5870 0 : }
5871 : }
5872 :
5873 0 : work
5874 0 : }
5875 :
5876 0 : async fn optimize_all_validate(
5877 0 : &self,
5878 0 : candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
5879 0 : ) -> Vec<(TenantShardId, ScheduleOptimization)> {
5880 0 : // Take a clone of the node map to use outside the lock in async validation phase
5881 0 : let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
5882 0 :
5883 0 : let mut want_secondary_status = Vec::new();
5884 0 :
5885 0 : // Validate our plans: this is an async phase where we may do I/O to pageservers to
5886 0 : // check that the state of locations is acceptable to run the optimization, such as
5887 0 : // checking that a secondary location is sufficiently warmed-up to cleanly cut over
5888 0 : // in a live migration.
5889 0 : let mut validated_work = Vec::new();
5890 0 : for (tenant_shard_id, optimization) in candidate_work {
5891 0 : match optimization.action {
5892 : ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
5893 : old_attached_node_id: _,
5894 0 : new_attached_node_id,
5895 0 : }) => {
5896 0 : match validation_nodes.get(&new_attached_node_id) {
5897 0 : None => {
5898 0 : // Node was dropped between planning and validation
5899 0 : }
5900 0 : Some(node) => {
5901 0 : if !node.is_available() {
5902 0 : tracing::info!("Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable");
5903 0 : } else {
5904 0 : // Accumulate optimizations that require fetching secondary status, so that we can execute these
5905 0 : // remote API requests concurrently.
5906 0 : want_secondary_status.push((
5907 0 : tenant_shard_id,
5908 0 : node.clone(),
5909 0 : optimization,
5910 0 : ));
5911 0 : }
5912 : }
5913 : }
5914 : }
5915 : ScheduleOptimizationAction::ReplaceSecondary(_) => {
5916 : // No extra checks needed to replace a secondary: this does not interrupt client access
5917 0 : validated_work.push((tenant_shard_id, optimization))
5918 : }
5919 : };
5920 : }
5921 :
5922 : // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
5923 : // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
5924 : // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
5925 0 : let results = self
5926 0 : .tenant_for_shards_api(
5927 0 : want_secondary_status
5928 0 : .iter()
5929 0 : .map(|i| (i.0, i.1.clone()))
5930 0 : .collect(),
5931 0 : |tenant_shard_id, client| async move {
5932 0 : client.tenant_secondary_status(tenant_shard_id).await
5933 0 : },
5934 0 : 1,
5935 0 : 1,
5936 0 : SHORT_RECONCILE_TIMEOUT,
5937 0 : &self.cancel,
5938 0 : )
5939 0 : .await;
5940 :
5941 0 : for ((tenant_shard_id, node, optimization), secondary_status) in
5942 0 : want_secondary_status.into_iter().zip(results.into_iter())
5943 : {
5944 0 : match secondary_status {
5945 0 : Err(e) => {
5946 0 : tracing::info!("Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}");
5947 : }
5948 0 : Ok(progress) => {
5949 : // We require secondary locations to have less than 10GiB of downloads pending before we will use
5950 : // them in an optimization
5951 : const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
5952 :
5953 0 : if progress.heatmap_mtime.is_none()
5954 0 : || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
5955 0 : && progress.bytes_downloaded != progress.bytes_total
5956 0 : || progress.bytes_total - progress.bytes_downloaded
5957 0 : > DOWNLOAD_FRESHNESS_THRESHOLD
5958 : {
5959 0 : tracing::info!("Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}");
5960 : } else {
5961 : // Location looks ready: proceed
5962 0 : tracing::info!(
5963 0 : "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
5964 : );
5965 0 : validated_work.push((tenant_shard_id, optimization))
5966 : }
5967 : }
5968 : }
5969 : }
5970 :
5971 0 : validated_work
5972 0 : }
5973 :
5974 : /// Look for shards which are oversized and in need of splitting
5975 0 : async fn autosplit_tenants(self: &Arc<Self>) {
5976 0 : let Some(split_threshold) = self.config.split_threshold else {
5977 : // Auto-splitting is disabled
5978 0 : return;
5979 : };
5980 :
5981 0 : let nodes = self.inner.read().unwrap().nodes.clone();
5982 :
5983 : const SPLIT_TO_MAX: ShardCount = ShardCount::new(8);
5984 :
5985 0 : let mut top_n = Vec::new();
5986 0 :
5987 0 : // Call into each node to look for big tenants
5988 0 : let top_n_request = TopTenantShardsRequest {
5989 0 : // We currently split based on logical size, for simplicity: logical size is a signal of
5990 0 : // the user's intent to run a large database, whereas physical/resident size can be symptoms
5991 0 : // of compaction issues. Eventually we should switch to using resident size to bound the
5992 0 : // disk space impact of one shard.
5993 0 : order_by: models::TenantSorting::MaxLogicalSize,
5994 0 : limit: 10,
5995 0 : where_shards_lt: Some(SPLIT_TO_MAX),
5996 0 : where_gt: Some(split_threshold),
5997 0 : };
5998 0 : for node in nodes.values() {
5999 0 : let request_ref = &top_n_request;
6000 0 : match node
6001 0 : .with_client_retries(
6002 0 : |client| async move {
6003 0 : let request = request_ref.clone();
6004 0 : client.top_tenant_shards(request.clone()).await
6005 0 : },
6006 0 : &self.config.jwt_token,
6007 0 : 3,
6008 0 : 3,
6009 0 : Duration::from_secs(5),
6010 0 : &self.cancel,
6011 0 : )
6012 0 : .await
6013 : {
6014 0 : Some(Ok(node_top_n)) => {
6015 0 : top_n.extend(node_top_n.shards.into_iter());
6016 0 : }
6017 : Some(Err(mgmt_api::Error::Cancelled)) => {
6018 0 : continue;
6019 : }
6020 0 : Some(Err(e)) => {
6021 0 : tracing::warn!("Failed to fetch top N tenants from {node}: {e}");
6022 0 : continue;
6023 : }
6024 : None => {
6025 : // Node is shutting down
6026 0 : continue;
6027 : }
6028 : };
6029 : }
6030 :
6031 : // Pick the biggest tenant to split first
6032 0 : top_n.sort_by_key(|i| i.resident_size);
6033 0 : let Some(split_candidate) = top_n.into_iter().next() else {
6034 0 : tracing::debug!("No split-elegible shards found");
6035 0 : return;
6036 : };
6037 :
6038 : // We spawn a task to run this, so it's exactly like some external API client requesting it. We don't
6039 : // want to block the background reconcile loop on this.
6040 0 : tracing::info!("Auto-splitting tenant for size threshold {split_threshold}: current size {split_candidate:?}");
6041 :
6042 0 : let this = self.clone();
6043 0 : tokio::spawn(
6044 0 : async move {
6045 0 : match this
6046 0 : .tenant_shard_split(
6047 0 : split_candidate.id.tenant_id,
6048 0 : TenantShardSplitRequest {
6049 0 : // Always split to the max number of shards: this avoids stepping through
6050 0 : // intervening shard counts and encountering the overrhead of a split+cleanup
6051 0 : // each time as a tenant grows, and is not too expensive because our max shard
6052 0 : // count is relatively low anyway.
6053 0 : // This policy will be adjusted in future once we support higher shard count.
6054 0 : new_shard_count: SPLIT_TO_MAX.literal(),
6055 0 : new_stripe_size: Some(ShardParameters::DEFAULT_STRIPE_SIZE),
6056 0 : },
6057 0 : )
6058 0 : .await
6059 : {
6060 : Ok(_) => {
6061 0 : tracing::info!("Successful auto-split");
6062 : }
6063 0 : Err(e) => {
6064 0 : tracing::error!("Auto-split failed: {e}");
6065 : }
6066 : }
6067 0 : }
6068 0 : .instrument(tracing::info_span!("auto_split", tenant_id=%split_candidate.id.tenant_id)),
6069 : );
6070 0 : }
6071 :
6072 : /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
6073 : /// also wait for any generated Reconcilers to complete. Calling this until it returns zero should
6074 : /// put the system into a quiescent state where future background reconciliations won't do anything.
6075 0 : pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
6076 0 : let reconciles_spawned = self.reconcile_all();
6077 0 : let reconciles_spawned = if reconciles_spawned == 0 {
6078 : // Only optimize when we are otherwise idle
6079 0 : self.optimize_all().await
6080 : } else {
6081 0 : reconciles_spawned
6082 : };
6083 :
6084 0 : let waiters = {
6085 0 : let mut waiters = Vec::new();
6086 0 : let locked = self.inner.read().unwrap();
6087 0 : for (_tenant_shard_id, shard) in locked.tenants.iter() {
6088 0 : if let Some(waiter) = shard.get_waiter() {
6089 0 : waiters.push(waiter);
6090 0 : }
6091 : }
6092 0 : waiters
6093 0 : };
6094 0 :
6095 0 : let waiter_count = waiters.len();
6096 0 : match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
6097 0 : Ok(()) => {}
6098 0 : Err(ReconcileWaitError::Failed(_, reconcile_error))
6099 0 : if matches!(*reconcile_error, ReconcileError::Cancel) =>
6100 0 : {
6101 0 : // Ignore reconciler cancel errors: this reconciler might have shut down
6102 0 : // because some other change superceded it. We will return a nonzero number,
6103 0 : // so the caller knows they might have to call again to quiesce the system.
6104 0 : }
6105 0 : Err(e) => {
6106 0 : return Err(e);
6107 : }
6108 : };
6109 :
6110 0 : tracing::info!(
6111 0 : "{} reconciles in reconcile_all, {} waiters",
6112 : reconciles_spawned,
6113 : waiter_count
6114 : );
6115 :
6116 0 : Ok(std::cmp::max(waiter_count, reconciles_spawned))
6117 0 : }
6118 :
6119 0 : async fn stop_reconciliations(&self, reason: StopReconciliationsReason) {
6120 0 : // Cancel all on-going reconciles and wait for them to exit the gate.
6121 0 : tracing::info!("{reason}: cancelling and waiting for in-flight reconciles");
6122 0 : self.reconcilers_cancel.cancel();
6123 0 : self.reconcilers_gate.close().await;
6124 :
6125 : // Signal the background loop in [`Service::process_results`] to exit once
6126 : // it has proccessed the results from all the reconciles we cancelled earlier.
6127 0 : tracing::info!("{reason}: processing results from previously in-flight reconciles");
6128 0 : self.result_tx.send(ReconcileResultRequest::Stop).ok();
6129 0 : self.result_tx.closed().await;
6130 0 : }
6131 :
6132 0 : pub async fn shutdown(&self) {
6133 0 : self.stop_reconciliations(StopReconciliationsReason::ShuttingDown)
6134 0 : .await;
6135 :
6136 : // Background tasks hold gate guards: this notifies them of the cancellation and
6137 : // waits for them all to complete.
6138 0 : tracing::info!("Shutting down: cancelling and waiting for background tasks to exit");
6139 0 : self.cancel.cancel();
6140 0 : self.gate.close().await;
6141 0 : }
6142 :
6143 : /// Spot check the download lag for a secondary location of a shard.
6144 : /// Should be used as a heuristic, since it's not always precise: the
6145 : /// secondary might have not downloaded the new heat map yet and, hence,
6146 : /// is not aware of the lag.
6147 : ///
6148 : /// Returns:
6149 : /// * Ok(None) if the lag could not be determined from the status,
6150 : /// * Ok(Some(_)) if the lag could be determind
6151 : /// * Err on failures to query the pageserver.
6152 0 : async fn secondary_lag(
6153 0 : &self,
6154 0 : secondary: &NodeId,
6155 0 : tenant_shard_id: TenantShardId,
6156 0 : ) -> Result<Option<u64>, mgmt_api::Error> {
6157 0 : let nodes = self.inner.read().unwrap().nodes.clone();
6158 0 : let node = nodes.get(secondary).ok_or(mgmt_api::Error::ApiError(
6159 0 : StatusCode::NOT_FOUND,
6160 0 : format!("Node with id {} not found", secondary),
6161 0 : ))?;
6162 :
6163 0 : match node
6164 0 : .with_client_retries(
6165 0 : |client| async move { client.tenant_secondary_status(tenant_shard_id).await },
6166 0 : &self.config.jwt_token,
6167 0 : 1,
6168 0 : 3,
6169 0 : Duration::from_millis(250),
6170 0 : &self.cancel,
6171 0 : )
6172 0 : .await
6173 : {
6174 0 : Some(Ok(status)) => match status.heatmap_mtime {
6175 0 : Some(_) => Ok(Some(status.bytes_total - status.bytes_downloaded)),
6176 0 : None => Ok(None),
6177 : },
6178 0 : Some(Err(e)) => Err(e),
6179 0 : None => Err(mgmt_api::Error::Cancelled),
6180 : }
6181 0 : }
6182 :
6183 : /// Drain a node by moving the shards attached to it as primaries.
6184 : /// This is a long running operation and it should run as a separate Tokio task.
6185 0 : pub(crate) async fn drain_node(
6186 0 : self: &Arc<Self>,
6187 0 : node_id: NodeId,
6188 0 : cancel: CancellationToken,
6189 0 : ) -> Result<(), OperationError> {
6190 : const MAX_SECONDARY_LAG_BYTES_DEFAULT: u64 = 256 * 1024 * 1024;
6191 0 : let max_secondary_lag_bytes = self
6192 0 : .config
6193 0 : .max_secondary_lag_bytes
6194 0 : .unwrap_or(MAX_SECONDARY_LAG_BYTES_DEFAULT);
6195 :
6196 : // By default, live migrations are generous about the wait time for getting
6197 : // the secondary location up to speed. When draining, give up earlier in order
6198 : // to not stall the operation when a cold secondary is encountered.
6199 : const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(20);
6200 : const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
6201 0 : let reconciler_config = ReconcilerConfigBuilder::new()
6202 0 : .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
6203 0 : .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
6204 0 : .build();
6205 0 :
6206 0 : let mut waiters = Vec::new();
6207 0 :
6208 0 : let mut tid_iter = TenantShardIterator::new({
6209 0 : let service = self.clone();
6210 0 : move |last_inspected_shard: Option<TenantShardId>| {
6211 0 : let locked = &service.inner.read().unwrap();
6212 0 : let tenants = &locked.tenants;
6213 0 : let entry = match last_inspected_shard {
6214 0 : Some(skip_past) => {
6215 0 : // Skip to the last seen tenant shard id
6216 0 : let mut cursor = tenants.iter().skip_while(|(tid, _)| **tid != skip_past);
6217 0 :
6218 0 : // Skip past the last seen
6219 0 : cursor.nth(1)
6220 : }
6221 0 : None => tenants.first_key_value(),
6222 : };
6223 :
6224 0 : entry.map(|(tid, _)| tid).copied()
6225 0 : }
6226 0 : });
6227 :
6228 0 : while !tid_iter.finished() {
6229 0 : if cancel.is_cancelled() {
6230 0 : match self
6231 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
6232 0 : .await
6233 : {
6234 0 : Ok(()) => return Err(OperationError::Cancelled),
6235 0 : Err(err) => {
6236 0 : return Err(OperationError::FinalizeError(
6237 0 : format!(
6238 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
6239 0 : node_id, err
6240 0 : )
6241 0 : .into(),
6242 0 : ));
6243 : }
6244 : }
6245 0 : }
6246 0 :
6247 0 : drain_utils::validate_node_state(&node_id, self.inner.read().unwrap().nodes.clone())?;
6248 :
6249 0 : while waiters.len() < MAX_RECONCILES_PER_OPERATION {
6250 0 : let tid = match tid_iter.next() {
6251 0 : Some(tid) => tid,
6252 : None => {
6253 0 : break;
6254 : }
6255 : };
6256 :
6257 0 : let tid_drain = TenantShardDrain {
6258 0 : drained_node: node_id,
6259 0 : tenant_shard_id: tid,
6260 0 : };
6261 :
6262 0 : let dest_node_id = {
6263 0 : let locked = self.inner.read().unwrap();
6264 0 :
6265 0 : match tid_drain
6266 0 : .tenant_shard_eligible_for_drain(&locked.tenants, &locked.scheduler)
6267 : {
6268 0 : Some(node_id) => node_id,
6269 : None => {
6270 0 : continue;
6271 : }
6272 : }
6273 : };
6274 :
6275 0 : match self.secondary_lag(&dest_node_id, tid).await {
6276 0 : Ok(Some(lag)) if lag <= max_secondary_lag_bytes => {
6277 0 : // The secondary is reasonably up to date.
6278 0 : // Migrate to it
6279 0 : }
6280 0 : Ok(Some(lag)) => {
6281 0 : tracing::info!(
6282 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
6283 0 : "Secondary on node {dest_node_id} is lagging by {lag}. Skipping reconcile."
6284 : );
6285 0 : continue;
6286 : }
6287 : Ok(None) => {
6288 0 : tracing::info!(
6289 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
6290 0 : "Could not determine lag for secondary on node {dest_node_id}. Skipping reconcile."
6291 : );
6292 0 : continue;
6293 : }
6294 0 : Err(err) => {
6295 0 : tracing::warn!(
6296 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
6297 0 : "Failed to get secondary lag from node {dest_node_id}. Skipping reconcile: {err}"
6298 : );
6299 0 : continue;
6300 : }
6301 : }
6302 :
6303 : {
6304 0 : let mut locked = self.inner.write().unwrap();
6305 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
6306 0 : let rescheduled = tid_drain.reschedule_to_secondary(
6307 0 : dest_node_id,
6308 0 : tenants,
6309 0 : scheduler,
6310 0 : nodes,
6311 0 : )?;
6312 :
6313 0 : if let Some(tenant_shard) = rescheduled {
6314 0 : let waiter = self.maybe_configured_reconcile_shard(
6315 0 : tenant_shard,
6316 0 : nodes,
6317 0 : reconciler_config,
6318 0 : );
6319 0 : if let Some(some) = waiter {
6320 0 : waiters.push(some);
6321 0 : }
6322 0 : }
6323 : }
6324 : }
6325 :
6326 0 : waiters = self
6327 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
6328 0 : .await;
6329 :
6330 0 : failpoint_support::sleep_millis_async!("sleepy-drain-loop", &cancel);
6331 : }
6332 :
6333 0 : while !waiters.is_empty() {
6334 0 : if cancel.is_cancelled() {
6335 0 : match self
6336 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
6337 0 : .await
6338 : {
6339 0 : Ok(()) => return Err(OperationError::Cancelled),
6340 0 : Err(err) => {
6341 0 : return Err(OperationError::FinalizeError(
6342 0 : format!(
6343 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
6344 0 : node_id, err
6345 0 : )
6346 0 : .into(),
6347 0 : ));
6348 : }
6349 : }
6350 0 : }
6351 0 :
6352 0 : tracing::info!("Awaiting {} pending drain reconciliations", waiters.len());
6353 :
6354 0 : waiters = self
6355 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
6356 0 : .await;
6357 : }
6358 :
6359 : // At this point we have done the best we could to drain shards from this node.
6360 : // Set the node scheduling policy to `[NodeSchedulingPolicy::PauseForRestart]`
6361 : // to complete the drain.
6362 0 : if let Err(err) = self
6363 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::PauseForRestart))
6364 0 : .await
6365 : {
6366 : // This is not fatal. Anything that is polling the node scheduling policy to detect
6367 : // the end of the drain operations will hang, but all such places should enforce an
6368 : // overall timeout. The scheduling policy will be updated upon node re-attach and/or
6369 : // by the counterpart fill operation.
6370 0 : return Err(OperationError::FinalizeError(
6371 0 : format!(
6372 0 : "Failed to finalise drain of {node_id} by setting scheduling policy to PauseForRestart: {err}"
6373 0 : )
6374 0 : .into(),
6375 0 : ));
6376 0 : }
6377 0 :
6378 0 : Ok(())
6379 0 : }
6380 :
6381 : /// Create a node fill plan (pick secondaries to promote) that meets the following requirements:
6382 : /// 1. The node should be filled until it reaches the expected cluster average of
6383 : /// attached shards. If there are not enough secondaries on the node, the plan stops early.
6384 : /// 2. Select tenant shards to promote such that the number of attached shards is balanced
6385 : /// throughout the cluster. We achieve this by picking tenant shards from each node,
6386 : /// starting from the ones with the largest number of attached shards, until the node
6387 : /// reaches the expected cluster average.
6388 : /// 3. Avoid promoting more shards of the same tenant than required. The upper bound
6389 : /// for the number of tenants from the same shard promoted to the node being filled is:
6390 : /// shard count for the tenant divided by the number of nodes in the cluster.
6391 0 : fn fill_node_plan(&self, node_id: NodeId) -> Vec<TenantShardId> {
6392 0 : let mut locked = self.inner.write().unwrap();
6393 0 : let fill_requirement = locked.scheduler.compute_fill_requirement(node_id);
6394 0 :
6395 0 : let mut tids_by_node = locked
6396 0 : .tenants
6397 0 : .iter_mut()
6398 0 : .filter_map(|(tid, tenant_shard)| {
6399 0 : if tenant_shard.intent.get_secondary().contains(&node_id) {
6400 0 : if let Some(primary) = tenant_shard.intent.get_attached() {
6401 0 : return Some((*primary, *tid));
6402 0 : }
6403 0 : }
6404 :
6405 0 : None
6406 0 : })
6407 0 : .into_group_map();
6408 0 :
6409 0 : let expected_attached = locked.scheduler.expected_attached_shard_count();
6410 0 : let nodes_by_load = locked.scheduler.nodes_by_attached_shard_count();
6411 0 :
6412 0 : let mut promoted_per_tenant: HashMap<TenantId, usize> = HashMap::new();
6413 0 : let mut plan = Vec::new();
6414 :
6415 0 : for (node_id, attached) in nodes_by_load {
6416 0 : let available = locked
6417 0 : .nodes
6418 0 : .get(&node_id)
6419 0 : .map_or(false, |n| n.is_available());
6420 0 : if !available {
6421 0 : continue;
6422 0 : }
6423 0 :
6424 0 : if plan.len() >= fill_requirement
6425 0 : || tids_by_node.is_empty()
6426 0 : || attached <= expected_attached
6427 : {
6428 0 : break;
6429 0 : }
6430 0 :
6431 0 : let can_take = attached - expected_attached;
6432 0 : let needed = fill_requirement - plan.len();
6433 0 : let mut take = std::cmp::min(can_take, needed);
6434 0 :
6435 0 : let mut remove_node = false;
6436 0 : while take > 0 {
6437 0 : match tids_by_node.get_mut(&node_id) {
6438 0 : Some(tids) => match tids.pop() {
6439 0 : Some(tid) => {
6440 0 : let max_promote_for_tenant = std::cmp::max(
6441 0 : tid.shard_count.count() as usize / locked.nodes.len(),
6442 0 : 1,
6443 0 : );
6444 0 : let promoted = promoted_per_tenant.entry(tid.tenant_id).or_default();
6445 0 : if *promoted < max_promote_for_tenant {
6446 0 : plan.push(tid);
6447 0 : *promoted += 1;
6448 0 : take -= 1;
6449 0 : }
6450 : }
6451 : None => {
6452 0 : remove_node = true;
6453 0 : break;
6454 : }
6455 : },
6456 : None => {
6457 0 : break;
6458 : }
6459 : }
6460 : }
6461 :
6462 0 : if remove_node {
6463 0 : tids_by_node.remove(&node_id);
6464 0 : }
6465 : }
6466 :
6467 0 : plan
6468 0 : }
6469 :
6470 : /// Fill a node by promoting its secondaries until the cluster is balanced
6471 : /// with regards to attached shard counts. Note that this operation only
6472 : /// makes sense as a counterpart to the drain implemented in [`Service::drain_node`].
6473 : /// This is a long running operation and it should run as a separate Tokio task.
6474 0 : pub(crate) async fn fill_node(
6475 0 : &self,
6476 0 : node_id: NodeId,
6477 0 : cancel: CancellationToken,
6478 0 : ) -> Result<(), OperationError> {
6479 : const SECONDARY_WARMUP_TIMEOUT: Duration = Duration::from_secs(20);
6480 : const SECONDARY_DOWNLOAD_REQUEST_TIMEOUT: Duration = Duration::from_secs(5);
6481 0 : let reconciler_config = ReconcilerConfigBuilder::new()
6482 0 : .secondary_warmup_timeout(SECONDARY_WARMUP_TIMEOUT)
6483 0 : .secondary_download_request_timeout(SECONDARY_DOWNLOAD_REQUEST_TIMEOUT)
6484 0 : .build();
6485 0 :
6486 0 : let mut tids_to_promote = self.fill_node_plan(node_id);
6487 0 : let mut waiters = Vec::new();
6488 :
6489 : // Execute the plan we've composed above. Before aplying each move from the plan,
6490 : // we validate to ensure that it has not gone stale in the meantime.
6491 0 : while !tids_to_promote.is_empty() {
6492 0 : if cancel.is_cancelled() {
6493 0 : match self
6494 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
6495 0 : .await
6496 : {
6497 0 : Ok(()) => return Err(OperationError::Cancelled),
6498 0 : Err(err) => {
6499 0 : return Err(OperationError::FinalizeError(
6500 0 : format!(
6501 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
6502 0 : node_id, err
6503 0 : )
6504 0 : .into(),
6505 0 : ));
6506 : }
6507 : }
6508 0 : }
6509 0 :
6510 0 : {
6511 0 : let mut locked = self.inner.write().unwrap();
6512 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
6513 :
6514 0 : let node = nodes.get(&node_id).ok_or(OperationError::NodeStateChanged(
6515 0 : format!("node {node_id} was removed").into(),
6516 0 : ))?;
6517 :
6518 0 : let current_policy = node.get_scheduling();
6519 0 : if !matches!(current_policy, NodeSchedulingPolicy::Filling) {
6520 : // TODO(vlad): maybe cancel pending reconciles before erroring out. need to think
6521 : // about it
6522 0 : return Err(OperationError::NodeStateChanged(
6523 0 : format!("node {node_id} changed state to {current_policy:?}").into(),
6524 0 : ));
6525 0 : }
6526 :
6527 0 : while waiters.len() < MAX_RECONCILES_PER_OPERATION {
6528 0 : if let Some(tid) = tids_to_promote.pop() {
6529 0 : if let Some(tenant_shard) = tenants.get_mut(&tid) {
6530 : // If the node being filled is not a secondary anymore,
6531 : // skip the promotion.
6532 0 : if !tenant_shard.intent.get_secondary().contains(&node_id) {
6533 0 : continue;
6534 0 : }
6535 0 :
6536 0 : let previously_attached_to = *tenant_shard.intent.get_attached();
6537 0 : match tenant_shard.reschedule_to_secondary(Some(node_id), scheduler) {
6538 0 : Err(e) => {
6539 0 : tracing::warn!(
6540 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
6541 0 : "Scheduling error when filling pageserver {} : {e}", node_id
6542 : );
6543 : }
6544 : Ok(()) => {
6545 0 : tracing::info!(
6546 0 : tenant_id=%tid.tenant_id, shard_id=%tid.shard_slug(),
6547 0 : "Rescheduled shard while filling node {}: {:?} -> {}",
6548 : node_id,
6549 : previously_attached_to,
6550 : node_id
6551 : );
6552 :
6553 0 : if let Some(waiter) = self.maybe_configured_reconcile_shard(
6554 0 : tenant_shard,
6555 0 : nodes,
6556 0 : reconciler_config,
6557 0 : ) {
6558 0 : waiters.push(waiter);
6559 0 : }
6560 : }
6561 : }
6562 0 : }
6563 : } else {
6564 0 : break;
6565 : }
6566 : }
6567 : }
6568 :
6569 0 : waiters = self
6570 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
6571 0 : .await;
6572 : }
6573 :
6574 0 : while !waiters.is_empty() {
6575 0 : if cancel.is_cancelled() {
6576 0 : match self
6577 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
6578 0 : .await
6579 : {
6580 0 : Ok(()) => return Err(OperationError::Cancelled),
6581 0 : Err(err) => {
6582 0 : return Err(OperationError::FinalizeError(
6583 0 : format!(
6584 0 : "Failed to finalise drain cancel of {} by setting scheduling policy to Active: {}",
6585 0 : node_id, err
6586 0 : )
6587 0 : .into(),
6588 0 : ));
6589 : }
6590 : }
6591 0 : }
6592 0 :
6593 0 : tracing::info!("Awaiting {} pending fill reconciliations", waiters.len());
6594 :
6595 0 : waiters = self
6596 0 : .await_waiters_remainder(waiters, SHORT_RECONCILE_TIMEOUT)
6597 0 : .await;
6598 : }
6599 :
6600 0 : if let Err(err) = self
6601 0 : .node_configure(node_id, None, Some(NodeSchedulingPolicy::Active))
6602 0 : .await
6603 : {
6604 : // This isn't a huge issue since the filling process starts upon request. However, it
6605 : // will prevent the next drain from starting. The only case in which this can fail
6606 : // is database unavailability. Such a case will require manual intervention.
6607 0 : return Err(OperationError::FinalizeError(
6608 0 : format!("Failed to finalise fill of {node_id} by setting scheduling policy to Active: {err}")
6609 0 : .into(),
6610 0 : ));
6611 0 : }
6612 0 :
6613 0 : Ok(())
6614 0 : }
6615 :
6616 : /// Updates scrubber metadata health check results.
6617 0 : pub(crate) async fn metadata_health_update(
6618 0 : &self,
6619 0 : update_req: MetadataHealthUpdateRequest,
6620 0 : ) -> Result<(), ApiError> {
6621 0 : let now = chrono::offset::Utc::now();
6622 0 : let (healthy_records, unhealthy_records) = {
6623 0 : let locked = self.inner.read().unwrap();
6624 0 : let healthy_records = update_req
6625 0 : .healthy_tenant_shards
6626 0 : .into_iter()
6627 0 : // Retain only health records associated with tenant shards managed by storage controller.
6628 0 : .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
6629 0 : .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, true, now))
6630 0 : .collect();
6631 0 : let unhealthy_records = update_req
6632 0 : .unhealthy_tenant_shards
6633 0 : .into_iter()
6634 0 : .filter(|tenant_shard_id| locked.tenants.contains_key(tenant_shard_id))
6635 0 : .map(|tenant_shard_id| MetadataHealthPersistence::new(tenant_shard_id, false, now))
6636 0 : .collect();
6637 0 :
6638 0 : (healthy_records, unhealthy_records)
6639 0 : };
6640 0 :
6641 0 : self.persistence
6642 0 : .update_metadata_health_records(healthy_records, unhealthy_records, now)
6643 0 : .await?;
6644 0 : Ok(())
6645 0 : }
6646 :
6647 : /// Lists the tenant shards that has unhealthy metadata status.
6648 0 : pub(crate) async fn metadata_health_list_unhealthy(
6649 0 : &self,
6650 0 : ) -> Result<Vec<TenantShardId>, ApiError> {
6651 0 : let result = self
6652 0 : .persistence
6653 0 : .list_unhealthy_metadata_health_records()
6654 0 : .await?
6655 0 : .iter()
6656 0 : .map(|p| p.get_tenant_shard_id().unwrap())
6657 0 : .collect();
6658 0 :
6659 0 : Ok(result)
6660 0 : }
6661 :
6662 : /// Lists the tenant shards that have not been scrubbed for some duration.
6663 0 : pub(crate) async fn metadata_health_list_outdated(
6664 0 : &self,
6665 0 : not_scrubbed_for: Duration,
6666 0 : ) -> Result<Vec<MetadataHealthRecord>, ApiError> {
6667 0 : let earlier = chrono::offset::Utc::now() - not_scrubbed_for;
6668 0 : let result = self
6669 0 : .persistence
6670 0 : .list_outdated_metadata_health_records(earlier)
6671 0 : .await?
6672 0 : .into_iter()
6673 0 : .map(|record| record.into())
6674 0 : .collect();
6675 0 : Ok(result)
6676 0 : }
6677 :
6678 0 : pub(crate) fn get_leadership_status(&self) -> LeadershipStatus {
6679 0 : self.inner.read().unwrap().get_leadership_status()
6680 0 : }
6681 :
6682 0 : pub(crate) async fn step_down(&self) -> GlobalObservedState {
6683 0 : tracing::info!("Received step down request from peer");
6684 0 : failpoint_support::sleep_millis_async!("sleep-on-step-down-handling");
6685 :
6686 0 : self.inner.write().unwrap().step_down();
6687 0 : // TODO: would it make sense to have a time-out for this?
6688 0 : self.stop_reconciliations(StopReconciliationsReason::SteppingDown)
6689 0 : .await;
6690 :
6691 0 : let mut global_observed = GlobalObservedState::default();
6692 0 : let locked = self.inner.read().unwrap();
6693 0 : for (tid, tenant_shard) in locked.tenants.iter() {
6694 0 : global_observed
6695 0 : .0
6696 0 : .insert(*tid, tenant_shard.observed.clone());
6697 0 : }
6698 :
6699 0 : global_observed
6700 0 : }
6701 :
6702 0 : pub(crate) async fn get_safekeeper(
6703 0 : &self,
6704 0 : id: i64,
6705 0 : ) -> Result<crate::persistence::SafekeeperPersistence, DatabaseError> {
6706 0 : self.persistence.safekeeper_get(id).await
6707 0 : }
6708 :
6709 0 : pub(crate) async fn upsert_safekeeper(
6710 0 : &self,
6711 0 : record: crate::persistence::SafekeeperPersistence,
6712 0 : ) -> Result<(), DatabaseError> {
6713 0 : self.persistence.safekeeper_upsert(record).await
6714 0 : }
6715 :
6716 0 : pub(crate) async fn update_shards_preferred_azs(
6717 0 : &self,
6718 0 : req: ShardsPreferredAzsRequest,
6719 0 : ) -> Result<ShardsPreferredAzsResponse, ApiError> {
6720 0 : let preferred_azs = req.preferred_az_ids.into_iter().collect::<Vec<_>>();
6721 0 : let updated = self
6722 0 : .persistence
6723 0 : .set_tenant_shard_preferred_azs(preferred_azs)
6724 0 : .await
6725 0 : .map_err(|err| {
6726 0 : ApiError::InternalServerError(anyhow::anyhow!(
6727 0 : "Failed to persist preferred AZs: {err}"
6728 0 : ))
6729 0 : })?;
6730 :
6731 0 : let mut updated_in_mem_and_db = Vec::default();
6732 0 :
6733 0 : let mut locked = self.inner.write().unwrap();
6734 0 : for (tid, az_id) in updated {
6735 0 : let shard = locked.tenants.get_mut(&tid);
6736 0 : if let Some(shard) = shard {
6737 0 : shard.set_preferred_az(az_id);
6738 0 : updated_in_mem_and_db.push(tid);
6739 0 : }
6740 : }
6741 :
6742 0 : Ok(ShardsPreferredAzsResponse {
6743 0 : updated: updated_in_mem_and_db,
6744 0 : })
6745 0 : }
6746 : }
|