Line data Source code
1 : use std::{
2 : borrow::Cow,
3 : cmp::Ordering,
4 : collections::{BTreeMap, HashMap, HashSet},
5 : str::FromStr,
6 : sync::Arc,
7 : time::{Duration, Instant},
8 : };
9 :
10 : use crate::{
11 : compute_hook::NotifyError,
12 : id_lock_map::{trace_exclusive_lock, trace_shared_lock, IdLockMap, WrappedWriteGuard},
13 : persistence::{AbortShardSplitStatus, TenantFilter},
14 : reconciler::{ReconcileError, ReconcileUnits},
15 : scheduler::{ScheduleContext, ScheduleMode},
16 : tenant_shard::{
17 : MigrateAttachment, ReconcileNeeded, ScheduleOptimization, ScheduleOptimizationAction,
18 : },
19 : };
20 : use anyhow::Context;
21 : use control_plane::storage_controller::{
22 : AttachHookRequest, AttachHookResponse, InspectRequest, InspectResponse,
23 : };
24 : use diesel::result::DatabaseErrorKind;
25 : use futures::{stream::FuturesUnordered, StreamExt};
26 : use itertools::Itertools;
27 : use pageserver_api::{
28 : controller_api::{
29 : NodeAvailability, NodeRegisterRequest, NodeSchedulingPolicy, PlacementPolicy,
30 : ShardSchedulingPolicy, TenantCreateResponse, TenantCreateResponseShard,
31 : TenantDescribeResponse, TenantDescribeResponseShard, TenantLocateResponse,
32 : TenantPolicyRequest, TenantShardMigrateRequest, TenantShardMigrateResponse,
33 : UtilizationScore,
34 : },
35 : models::{SecondaryProgress, TenantConfigRequest},
36 : };
37 : use reqwest::StatusCode;
38 : use tracing::instrument;
39 :
40 : use crate::pageserver_client::PageserverClient;
41 : use pageserver_api::{
42 : models::{
43 : self, LocationConfig, LocationConfigListResponse, LocationConfigMode,
44 : PageserverUtilization, ShardParameters, TenantConfig, TenantCreateRequest,
45 : TenantLocationConfigRequest, TenantLocationConfigResponse, TenantShardLocation,
46 : TenantShardSplitRequest, TenantShardSplitResponse, TenantTimeTravelRequest,
47 : TimelineCreateRequest, TimelineInfo,
48 : },
49 : shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId},
50 : upcall_api::{
51 : ReAttachRequest, ReAttachResponse, ReAttachResponseTenant, ValidateRequest,
52 : ValidateResponse, ValidateResponseTenant,
53 : },
54 : };
55 : use pageserver_client::mgmt_api;
56 : use tokio::sync::mpsc::error::TrySendError;
57 : use tokio_util::sync::CancellationToken;
58 : use utils::{
59 : completion::Barrier,
60 : failpoint_support,
61 : generation::Generation,
62 : http::error::ApiError,
63 : id::{NodeId, TenantId, TimelineId},
64 : sync::gate::Gate,
65 : };
66 :
67 : use crate::{
68 : compute_hook::ComputeHook,
69 : heartbeater::{Heartbeater, PageserverState},
70 : node::{AvailabilityTransition, Node},
71 : persistence::{split_state::SplitState, DatabaseError, Persistence, TenantShardPersistence},
72 : reconciler::attached_location_conf,
73 : scheduler::Scheduler,
74 : tenant_shard::{
75 : IntentState, ObservedState, ObservedStateLocation, ReconcileResult, ReconcileWaitError,
76 : ReconcilerWaiter, TenantShard,
77 : },
78 : };
79 :
80 : // For operations that should be quick, like attaching a new tenant
81 : const SHORT_RECONCILE_TIMEOUT: Duration = Duration::from_secs(5);
82 :
83 : // For operations that might be slow, like migrating a tenant with
84 : // some data in it.
85 : pub const RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
86 :
87 : // If we receive a call using Secondary mode initially, it will omit generation. We will initialize
88 : // tenant shards into this generation, and as long as it remains in this generation, we will accept
89 : // input generation from future requests as authoritative.
90 : const INITIAL_GENERATION: Generation = Generation::new(0);
91 :
92 : /// How long [`Service::startup_reconcile`] is allowed to take before it should give
93 : /// up on unresponsive pageservers and proceed.
94 : pub(crate) const STARTUP_RECONCILE_TIMEOUT: Duration = Duration::from_secs(30);
95 :
96 : /// How long a node may be unresponsive to heartbeats before we declare it offline.
97 : /// This must be long enough to cover node restarts as well as normal operations: in future
98 : /// it should be separated into distinct timeouts for startup vs. normal operation
99 : /// (`<https://github.com/neondatabase/neon/issues/7552>`)
100 : pub const MAX_UNAVAILABLE_INTERVAL_DEFAULT: Duration = Duration::from_secs(300);
101 :
102 0 : #[derive(Clone, strum_macros::Display)]
103 : enum TenantOperations {
104 : Create,
105 : LocationConfig,
106 : ConfigSet,
107 : TimeTravelRemoteStorage,
108 : Delete,
109 : UpdatePolicy,
110 : ShardSplit,
111 : SecondaryDownload,
112 : TimelineCreate,
113 : TimelineDelete,
114 : }
115 :
116 0 : #[derive(Clone, strum_macros::Display)]
117 : enum NodeOperations {
118 : Register,
119 : Configure,
120 : }
121 :
122 : pub const RECONCILER_CONCURRENCY_DEFAULT: usize = 128;
123 :
124 : // Depth of the channel used to enqueue shards for reconciliation when they can't do it immediately.
125 : // This channel is finite-size to avoid using excessive memory if we get into a state where reconciles are finishing more slowly
126 : // than they're being pushed onto the queue.
127 : const MAX_DELAYED_RECONCILES: usize = 10000;
128 :
129 : // Top level state available to all HTTP handlers
130 : struct ServiceState {
131 : tenants: BTreeMap<TenantShardId, TenantShard>,
132 :
133 : nodes: Arc<HashMap<NodeId, Node>>,
134 :
135 : scheduler: Scheduler,
136 :
137 : /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
138 : delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
139 : }
140 :
141 : /// Transform an error from a pageserver into an error to return to callers of a storage
142 : /// controller API.
143 0 : fn passthrough_api_error(node: &Node, e: mgmt_api::Error) -> ApiError {
144 0 : match e {
145 0 : mgmt_api::Error::ReceiveErrorBody(str) => {
146 0 : // Presume errors receiving body are connectivity/availability issues
147 0 : ApiError::ResourceUnavailable(
148 0 : format!("{node} error receiving error body: {str}").into(),
149 0 : )
150 : }
151 0 : mgmt_api::Error::ReceiveBody(str) => {
152 0 : // Presume errors receiving body are connectivity/availability issues
153 0 : ApiError::ResourceUnavailable(format!("{node} error receiving body: {str}").into())
154 : }
155 0 : mgmt_api::Error::ApiError(StatusCode::NOT_FOUND, msg) => {
156 0 : ApiError::NotFound(anyhow::anyhow!(format!("{node}: {msg}")).into())
157 : }
158 0 : mgmt_api::Error::ApiError(StatusCode::SERVICE_UNAVAILABLE, msg) => {
159 0 : ApiError::ResourceUnavailable(format!("{node}: {msg}").into())
160 : }
161 0 : mgmt_api::Error::ApiError(status @ StatusCode::UNAUTHORIZED, msg)
162 0 : | mgmt_api::Error::ApiError(status @ StatusCode::FORBIDDEN, msg) => {
163 : // Auth errors talking to a pageserver are not auth errors for the caller: they are
164 : // internal server errors, showing that something is wrong with the pageserver or
165 : // storage controller's auth configuration.
166 0 : ApiError::InternalServerError(anyhow::anyhow!("{node} {status}: {msg}"))
167 : }
168 0 : mgmt_api::Error::ApiError(status, msg) => {
169 0 : // Presume general case of pageserver API errors is that we tried to do something
170 0 : // that can't be done right now.
171 0 : ApiError::Conflict(format!("{node} {status}: {status} {msg}"))
172 : }
173 0 : mgmt_api::Error::Cancelled => ApiError::ShuttingDown,
174 : }
175 0 : }
176 :
177 : impl ServiceState {
178 0 : fn new(
179 0 : nodes: HashMap<NodeId, Node>,
180 0 : tenants: BTreeMap<TenantShardId, TenantShard>,
181 0 : scheduler: Scheduler,
182 0 : delayed_reconcile_rx: tokio::sync::mpsc::Receiver<TenantShardId>,
183 0 : ) -> Self {
184 0 : Self {
185 0 : tenants,
186 0 : nodes: Arc::new(nodes),
187 0 : scheduler,
188 0 : delayed_reconcile_rx,
189 0 : }
190 0 : }
191 :
192 0 : fn parts_mut(
193 0 : &mut self,
194 0 : ) -> (
195 0 : &mut Arc<HashMap<NodeId, Node>>,
196 0 : &mut BTreeMap<TenantShardId, TenantShard>,
197 0 : &mut Scheduler,
198 0 : ) {
199 0 : (&mut self.nodes, &mut self.tenants, &mut self.scheduler)
200 0 : }
201 : }
202 :
203 : #[derive(Clone)]
204 : pub struct Config {
205 : // All pageservers managed by one instance of this service must have
206 : // the same public key. This JWT token will be used to authenticate
207 : // this service to the pageservers it manages.
208 : pub jwt_token: Option<String>,
209 :
210 : // This JWT token will be used to authenticate this service to the control plane.
211 : pub control_plane_jwt_token: Option<String>,
212 :
213 : /// Where the compute hook should send notifications of pageserver attachment locations
214 : /// (this URL points to the control plane in prod). If this is None, the compute hook will
215 : /// assume it is running in a test environment and try to update neon_local.
216 : pub compute_hook_url: Option<String>,
217 :
218 : /// Grace period within which a pageserver does not respond to heartbeats, but is still
219 : /// considered active. Once the grace period elapses, the next heartbeat failure will
220 : /// mark the pagseserver offline.
221 : pub max_unavailable_interval: Duration,
222 :
223 : /// How many Reconcilers may be spawned concurrently
224 : pub reconciler_concurrency: usize,
225 : }
226 :
227 : impl From<DatabaseError> for ApiError {
228 0 : fn from(err: DatabaseError) -> ApiError {
229 0 : match err {
230 0 : DatabaseError::Query(e) => ApiError::InternalServerError(e.into()),
231 : // FIXME: ApiError doesn't have an Unavailable variant, but ShuttingDown maps to 503.
232 : DatabaseError::Connection(_) | DatabaseError::ConnectionPool(_) => {
233 0 : ApiError::ShuttingDown
234 : }
235 0 : DatabaseError::Logical(reason) => {
236 0 : ApiError::InternalServerError(anyhow::anyhow!(reason))
237 : }
238 : }
239 0 : }
240 : }
241 :
242 : pub struct Service {
243 : inner: Arc<std::sync::RwLock<ServiceState>>,
244 : config: Config,
245 : persistence: Arc<Persistence>,
246 : compute_hook: Arc<ComputeHook>,
247 : result_tx: tokio::sync::mpsc::UnboundedSender<ReconcileResult>,
248 :
249 : heartbeater: Heartbeater,
250 :
251 : // Channel for background cleanup from failed operations that require cleanup, such as shard split
252 : abort_tx: tokio::sync::mpsc::UnboundedSender<TenantShardSplitAbort>,
253 :
254 : // Locking on a tenant granularity (covers all shards in the tenant):
255 : // - Take exclusively for rare operations that mutate the tenant's persistent state (e.g. create/delete/split)
256 : // - Take in shared mode for operations that need the set of shards to stay the same to complete reliably (e.g. timeline CRUD)
257 : tenant_op_locks: IdLockMap<TenantId, TenantOperations>,
258 :
259 : // Locking for node-mutating operations: take exclusively for operations that modify the node's persistent state, or
260 : // that transition it to/from Active.
261 : node_op_locks: IdLockMap<NodeId, NodeOperations>,
262 :
263 : // Limit how many Reconcilers we will spawn concurrently
264 : reconciler_concurrency: Arc<tokio::sync::Semaphore>,
265 :
266 : /// Queue of tenants who are waiting for concurrency limits to permit them to reconcile
267 : /// Send into this queue to promptly attempt to reconcile this shard next time units are available.
268 : ///
269 : /// Note that this state logically lives inside ServiceInner, but carrying Sender here makes the code simpler
270 : /// by avoiding needing a &mut ref to something inside the ServiceInner. This could be optimized to
271 : /// use a VecDeque instead of a channel to reduce synchronization overhead, at the cost of some code complexity.
272 : delayed_reconcile_tx: tokio::sync::mpsc::Sender<TenantShardId>,
273 :
274 : // Process shutdown will fire this token
275 : cancel: CancellationToken,
276 :
277 : // Background tasks will hold this gate
278 : gate: Gate,
279 :
280 : /// This waits for initial reconciliation with pageservers to complete. Until this barrier
281 : /// passes, it isn't safe to do any actions that mutate tenants.
282 : pub(crate) startup_complete: Barrier,
283 : }
284 :
285 : impl From<ReconcileWaitError> for ApiError {
286 0 : fn from(value: ReconcileWaitError) -> Self {
287 0 : match value {
288 0 : ReconcileWaitError::Shutdown => ApiError::ShuttingDown,
289 0 : e @ ReconcileWaitError::Timeout(_) => ApiError::Timeout(format!("{e}").into()),
290 0 : e @ ReconcileWaitError::Failed(..) => ApiError::InternalServerError(anyhow::anyhow!(e)),
291 : }
292 0 : }
293 : }
294 :
295 : #[allow(clippy::large_enum_variant)]
296 : enum TenantCreateOrUpdate {
297 : Create(TenantCreateRequest),
298 : Update(Vec<ShardUpdate>),
299 : }
300 :
301 : struct ShardSplitParams {
302 : old_shard_count: ShardCount,
303 : new_shard_count: ShardCount,
304 : new_stripe_size: Option<ShardStripeSize>,
305 : targets: Vec<ShardSplitTarget>,
306 : policy: PlacementPolicy,
307 : config: TenantConfig,
308 : shard_ident: ShardIdentity,
309 : }
310 :
311 : // When preparing for a shard split, we may either choose to proceed with the split,
312 : // or find that the work is already done and return NoOp.
313 : enum ShardSplitAction {
314 : Split(ShardSplitParams),
315 : NoOp(TenantShardSplitResponse),
316 : }
317 :
318 : // A parent shard which will be split
319 : struct ShardSplitTarget {
320 : parent_id: TenantShardId,
321 : node: Node,
322 : child_ids: Vec<TenantShardId>,
323 : }
324 :
325 : /// When we tenant shard split operation fails, we may not be able to clean up immediately, because nodes
326 : /// might not be available. We therefore use a queue of abort operations processed in the background.
327 : struct TenantShardSplitAbort {
328 : tenant_id: TenantId,
329 : /// The target values from the request that failed
330 : new_shard_count: ShardCount,
331 : new_stripe_size: Option<ShardStripeSize>,
332 : /// Until this abort op is complete, no other operations may be done on the tenant
333 : _tenant_lock: WrappedWriteGuard<TenantOperations>,
334 : }
335 :
336 0 : #[derive(thiserror::Error, Debug)]
337 : enum TenantShardSplitAbortError {
338 : #[error(transparent)]
339 : Database(#[from] DatabaseError),
340 : #[error(transparent)]
341 : Remote(#[from] mgmt_api::Error),
342 : #[error("Unavailable")]
343 : Unavailable,
344 : }
345 :
346 : struct ShardUpdate {
347 : tenant_shard_id: TenantShardId,
348 : placement_policy: PlacementPolicy,
349 : tenant_config: TenantConfig,
350 :
351 : /// If this is None, generation is not updated.
352 : generation: Option<Generation>,
353 : }
354 :
355 : impl Service {
356 0 : pub fn get_config(&self) -> &Config {
357 0 : &self.config
358 0 : }
359 :
360 : /// Called once on startup, this function attempts to contact all pageservers to build an up-to-date
361 : /// view of the world, and determine which pageservers are responsive.
362 0 : #[instrument(skip_all)]
363 : async fn startup_reconcile(
364 : self: &Arc<Service>,
365 : bg_compute_notify_result_tx: tokio::sync::mpsc::Sender<
366 : Result<(), (TenantShardId, NotifyError)>,
367 : >,
368 : ) {
369 : // For all tenant shards, a vector of observed states on nodes (where None means
370 : // indeterminate, same as in [`ObservedStateLocation`])
371 : let mut observed: HashMap<TenantShardId, Vec<(NodeId, Option<LocationConfig>)>> =
372 : HashMap::new();
373 :
374 : // Startup reconciliation does I/O to other services: whether they
375 : // are responsive or not, we should aim to finish within our deadline, because:
376 : // - If we don't, a k8s readiness hook watching /ready will kill us.
377 : // - While we're waiting for startup reconciliation, we are not fully
378 : // available for end user operations like creating/deleting tenants and timelines.
379 : //
380 : // We set multiple deadlines to break up the time available between the phases of work: this is
381 : // arbitrary, but avoids a situation where the first phase could burn our entire timeout period.
382 : let start_at = Instant::now();
383 : let node_scan_deadline = start_at
384 : .checked_add(STARTUP_RECONCILE_TIMEOUT / 2)
385 : .expect("Reconcile timeout is a modest constant");
386 :
387 : // Accumulate a list of any tenant locations that ought to be detached
388 : let mut cleanup = Vec::new();
389 :
390 : let node_listings = self.scan_node_locations(node_scan_deadline).await;
391 : // Send initial heartbeat requests to nodes that replied to the location listing above.
392 : let nodes_online = self.initial_heartbeat_round(node_listings.keys()).await;
393 :
394 : for (node_id, list_response) in node_listings {
395 : let tenant_shards = list_response.tenant_shards;
396 : tracing::info!(
397 : "Received {} shard statuses from pageserver {}, setting it to Active",
398 : tenant_shards.len(),
399 : node_id
400 : );
401 :
402 : for (tenant_shard_id, conf_opt) in tenant_shards {
403 : let shard_observations = observed.entry(tenant_shard_id).or_default();
404 : shard_observations.push((node_id, conf_opt));
405 : }
406 : }
407 :
408 : // List of tenants for which we will attempt to notify compute of their location at startup
409 : let mut compute_notifications = Vec::new();
410 :
411 : // Populate intent and observed states for all tenants, based on reported state on pageservers
412 : tracing::info!("Populating tenant shards' states from initial pageserver scan...");
413 : let shard_count = {
414 : let mut locked = self.inner.write().unwrap();
415 : let (nodes, tenants, scheduler) = locked.parts_mut();
416 :
417 : // Mark nodes online if they responded to us: nodes are offline by default after a restart.
418 : let mut new_nodes = (**nodes).clone();
419 : for (node_id, node) in new_nodes.iter_mut() {
420 : if let Some(utilization) = nodes_online.get(node_id) {
421 : node.set_availability(NodeAvailability::Active(UtilizationScore(
422 : utilization.utilization_score,
423 : )));
424 : scheduler.node_upsert(node);
425 : }
426 : }
427 : *nodes = Arc::new(new_nodes);
428 :
429 : for (tenant_shard_id, shard_observations) in observed {
430 : for (node_id, observed_loc) in shard_observations {
431 : let Some(tenant_shard) = tenants.get_mut(&tenant_shard_id) else {
432 : cleanup.push((tenant_shard_id, node_id));
433 : continue;
434 : };
435 : tenant_shard
436 : .observed
437 : .locations
438 : .insert(node_id, ObservedStateLocation { conf: observed_loc });
439 : }
440 : }
441 :
442 : // Populate each tenant's intent state
443 : let mut schedule_context = ScheduleContext::default();
444 : for (tenant_shard_id, tenant_shard) in tenants.iter_mut() {
445 : if tenant_shard_id.shard_number == ShardNumber(0) {
446 : // Reset scheduling context each time we advance to the next Tenant
447 : schedule_context = ScheduleContext::default();
448 : }
449 :
450 : tenant_shard.intent_from_observed(scheduler);
451 : if let Err(e) = tenant_shard.schedule(scheduler, &mut schedule_context) {
452 : // Non-fatal error: we are unable to properly schedule the tenant, perhaps because
453 : // not enough pageservers are available. The tenant may well still be available
454 : // to clients.
455 : tracing::error!("Failed to schedule tenant {tenant_shard_id} at startup: {e}");
456 : } else {
457 : // If we're both intending and observed to be attached at a particular node, we will
458 : // emit a compute notification for this. In the case where our observed state does not
459 : // yet match our intent, we will eventually reconcile, and that will emit a compute notification.
460 : if let Some(attached_at) = tenant_shard.stably_attached() {
461 : compute_notifications.push((
462 : *tenant_shard_id,
463 : attached_at,
464 : tenant_shard.shard.stripe_size,
465 : ));
466 : }
467 : }
468 : }
469 :
470 : tenants.len()
471 : };
472 :
473 : // TODO: if any tenant's intent now differs from its loaded generation_pageserver, we should clear that
474 : // generation_pageserver in the database.
475 :
476 : // Emit compute hook notifications for all tenants which are already stably attached. Other tenants
477 : // will emit compute hook notifications when they reconcile.
478 : //
479 : // Ordering: our calls to notify_background synchronously establish a relative order for these notifications vs. any later
480 : // calls into the ComputeHook for the same tenant: we can leave these to run to completion in the background and any later
481 : // calls will be correctly ordered wrt these.
482 : //
483 : // Concurrency: we call notify_background for all tenants, which will create O(N) tokio tasks, but almost all of them
484 : // will just wait on the ComputeHook::API_CONCURRENCY semaphore immediately, so very cheap until they get that semaphore
485 : // unit and start doing I/O.
486 : tracing::info!(
487 : "Sending {} compute notifications",
488 : compute_notifications.len()
489 : );
490 : self.compute_hook.notify_background(
491 : compute_notifications,
492 : bg_compute_notify_result_tx.clone(),
493 : &self.cancel,
494 : );
495 :
496 : // Finally, now that the service is up and running, launch reconcile operations for any tenants
497 : // which require it: under normal circumstances this should only include tenants that were in some
498 : // transient state before we restarted, or any tenants whose compute hooks failed above.
499 : tracing::info!("Checking for shards in need of reconciliation...");
500 : let reconcile_tasks = self.reconcile_all();
501 : // We will not wait for these reconciliation tasks to run here: we're now done with startup and
502 : // normal operations may proceed.
503 :
504 : // Clean up any tenants that were found on pageservers but are not known to us. Do this in the
505 : // background because it does not need to complete in order to proceed with other work.
506 : if !cleanup.is_empty() {
507 : tracing::info!("Cleaning up {} locations in the background", cleanup.len());
508 : tokio::task::spawn({
509 : let cleanup_self = self.clone();
510 0 : async move { cleanup_self.cleanup_locations(cleanup).await }
511 : });
512 : }
513 :
514 : tracing::info!("Startup complete, spawned {reconcile_tasks} reconciliation tasks ({shard_count} shards total)");
515 : }
516 :
517 0 : async fn initial_heartbeat_round<'a>(
518 0 : &self,
519 0 : node_ids: impl Iterator<Item = &'a NodeId>,
520 0 : ) -> HashMap<NodeId, PageserverUtilization> {
521 0 : assert!(!self.startup_complete.is_ready());
522 :
523 0 : let all_nodes = {
524 0 : let locked = self.inner.read().unwrap();
525 0 : locked.nodes.clone()
526 0 : };
527 0 :
528 0 : let mut nodes_to_heartbeat = HashMap::new();
529 0 : for node_id in node_ids {
530 0 : match all_nodes.get(node_id) {
531 0 : Some(node) => {
532 0 : nodes_to_heartbeat.insert(*node_id, node.clone());
533 0 : }
534 : None => {
535 0 : tracing::warn!("Node {node_id} was removed during start-up");
536 : }
537 : }
538 : }
539 :
540 0 : tracing::info!("Sending initial heartbeats...");
541 0 : let res = self
542 0 : .heartbeater
543 0 : .heartbeat(Arc::new(nodes_to_heartbeat))
544 0 : .await;
545 :
546 0 : let mut online_nodes = HashMap::new();
547 0 : if let Ok(deltas) = res {
548 0 : for (node_id, status) in deltas.0 {
549 0 : match status {
550 0 : PageserverState::Available { utilization, .. } => {
551 0 : online_nodes.insert(node_id, utilization);
552 0 : }
553 0 : PageserverState::Offline => {}
554 : }
555 : }
556 0 : }
557 :
558 0 : online_nodes
559 0 : }
560 :
561 : /// Used during [`Self::startup_reconcile`]: issue GETs to all nodes concurrently, with a deadline.
562 : ///
563 : /// The result includes only nodes which responded within the deadline
564 0 : async fn scan_node_locations(
565 0 : &self,
566 0 : deadline: Instant,
567 0 : ) -> HashMap<NodeId, LocationConfigListResponse> {
568 0 : let nodes = {
569 0 : let locked = self.inner.read().unwrap();
570 0 : locked.nodes.clone()
571 0 : };
572 0 :
573 0 : let mut node_results = HashMap::new();
574 0 :
575 0 : let mut node_list_futs = FuturesUnordered::new();
576 0 :
577 0 : tracing::info!("Scanning shards on {} nodes...", nodes.len());
578 0 : for node in nodes.values() {
579 0 : node_list_futs.push({
580 0 : async move {
581 0 : tracing::info!("Scanning shards on node {node}...");
582 0 : let timeout = Duration::from_secs(1);
583 0 : let response = node
584 0 : .with_client_retries(
585 0 : |client| async move { client.list_location_config().await },
586 0 : &self.config.jwt_token,
587 0 : 1,
588 0 : 5,
589 0 : timeout,
590 0 : &self.cancel,
591 0 : )
592 0 : .await;
593 0 : (node.get_id(), response)
594 0 : }
595 0 : });
596 0 : }
597 :
598 : loop {
599 0 : let (node_id, result) = tokio::select! {
600 : next = node_list_futs.next() => {
601 : match next {
602 : Some(result) => result,
603 : None =>{
604 : // We got results for all our nodes
605 : break;
606 : }
607 :
608 : }
609 : },
610 : _ = tokio::time::sleep(deadline.duration_since(Instant::now())) => {
611 : // Give up waiting for anyone who hasn't responded: we will yield the results that we have
612 : tracing::info!("Reached deadline while waiting for nodes to respond to location listing requests");
613 : break;
614 : }
615 : };
616 :
617 0 : let Some(list_response) = result else {
618 0 : tracing::info!("Shutdown during startup_reconcile");
619 0 : break;
620 : };
621 :
622 0 : match list_response {
623 0 : Err(e) => {
624 0 : tracing::warn!("Could not scan node {} ({e})", node_id);
625 : }
626 0 : Ok(listing) => {
627 0 : node_results.insert(node_id, listing);
628 0 : }
629 : }
630 : }
631 :
632 0 : node_results
633 0 : }
634 :
635 : /// Used during [`Self::startup_reconcile`]: detach a list of unknown-to-us tenants from pageservers.
636 : ///
637 : /// This is safe to run in the background, because if we don't have this TenantShardId in our map of
638 : /// tenants, then it is probably something incompletely deleted before: we will not fight with any
639 : /// other task trying to attach it.
640 0 : #[instrument(skip_all)]
641 : async fn cleanup_locations(&self, cleanup: Vec<(TenantShardId, NodeId)>) {
642 : let nodes = self.inner.read().unwrap().nodes.clone();
643 :
644 : for (tenant_shard_id, node_id) in cleanup {
645 : // A node reported a tenant_shard_id which is unknown to us: detach it.
646 : let Some(node) = nodes.get(&node_id) else {
647 : // This is legitimate; we run in the background and [`Self::startup_reconcile`] might have identified
648 : // a location to clean up on a node that has since been removed.
649 : tracing::info!(
650 : "Not cleaning up location {node_id}/{tenant_shard_id}: node not found"
651 : );
652 : continue;
653 : };
654 :
655 : if self.cancel.is_cancelled() {
656 : break;
657 : }
658 :
659 : let client = PageserverClient::new(
660 : node.get_id(),
661 : node.base_url(),
662 : self.config.jwt_token.as_deref(),
663 : );
664 : match client
665 : .location_config(
666 : tenant_shard_id,
667 : LocationConfig {
668 : mode: LocationConfigMode::Detached,
669 : generation: None,
670 : secondary_conf: None,
671 : shard_number: tenant_shard_id.shard_number.0,
672 : shard_count: tenant_shard_id.shard_count.literal(),
673 : shard_stripe_size: 0,
674 : tenant_conf: models::TenantConfig::default(),
675 : },
676 : None,
677 : false,
678 : )
679 : .await
680 : {
681 : Ok(()) => {
682 : tracing::info!(
683 : "Detached unknown shard {tenant_shard_id} on pageserver {node_id}"
684 : );
685 : }
686 : Err(e) => {
687 : // Non-fatal error: leaving a tenant shard behind that we are not managing shouldn't
688 : // break anything.
689 : tracing::error!(
690 : "Failed to detach unknkown shard {tenant_shard_id} on pageserver {node_id}: {e}"
691 : );
692 : }
693 : }
694 : }
695 : }
696 :
697 : /// Long running background task that periodically wakes up and looks for shards that need
698 : /// reconciliation. Reconciliation is fallible, so any reconciliation tasks that fail during
699 : /// e.g. a tenant create/attach/migrate must eventually be retried: this task is responsible
700 : /// for those retries.
701 0 : #[instrument(skip_all)]
702 : async fn background_reconcile(&self) {
703 : self.startup_complete.clone().wait().await;
704 :
705 : const BACKGROUND_RECONCILE_PERIOD: Duration = Duration::from_secs(20);
706 :
707 : let mut interval = tokio::time::interval(BACKGROUND_RECONCILE_PERIOD);
708 : while !self.cancel.is_cancelled() {
709 : tokio::select! {
710 : _ = interval.tick() => {
711 : let reconciles_spawned = self.reconcile_all();
712 : if reconciles_spawned == 0 {
713 : // Run optimizer only when we didn't find any other work to do
714 : self.optimize_all().await;
715 : }
716 : }
717 : _ = self.cancel.cancelled() => return
718 : }
719 : }
720 : }
721 0 : #[instrument(skip_all)]
722 : async fn spawn_heartbeat_driver(&self) {
723 : self.startup_complete.clone().wait().await;
724 :
725 : const HEARTBEAT_INTERVAL: Duration = Duration::from_secs(5);
726 :
727 : let mut interval = tokio::time::interval(HEARTBEAT_INTERVAL);
728 : while !self.cancel.is_cancelled() {
729 : tokio::select! {
730 : _ = interval.tick() => { }
731 : _ = self.cancel.cancelled() => return
732 : };
733 :
734 : let nodes = {
735 : let locked = self.inner.read().unwrap();
736 : locked.nodes.clone()
737 : };
738 :
739 : let res = self.heartbeater.heartbeat(nodes).await;
740 : if let Ok(deltas) = res {
741 : for (node_id, state) in deltas.0 {
742 : let new_availability = match state {
743 : PageserverState::Available { utilization, .. } => NodeAvailability::Active(
744 : UtilizationScore(utilization.utilization_score),
745 : ),
746 : PageserverState::Offline => NodeAvailability::Offline,
747 : };
748 : let res = self
749 : .node_configure(node_id, Some(new_availability), None)
750 : .await;
751 :
752 : match res {
753 : Ok(()) => {}
754 : Err(ApiError::NotFound(_)) => {
755 : // This should be rare, but legitimate since the heartbeats are done
756 : // on a snapshot of the nodes.
757 : tracing::info!("Node {} was not found after heartbeat round", node_id);
758 : }
759 : Err(err) => {
760 : tracing::error!(
761 : "Failed to update node {} after heartbeat round: {}",
762 : node_id,
763 : err
764 : );
765 : }
766 : }
767 : }
768 : }
769 : }
770 : }
771 :
772 : /// Apply the contents of a [`ReconcileResult`] to our in-memory state: if the reconciliation
773 : /// was successful and intent hasn't changed since the Reconciler was spawned, this will update
774 : /// the observed state of the tenant such that subsequent calls to [`TenantShard::get_reconcile_needed`]
775 : /// will indicate that reconciliation is not needed.
776 0 : #[instrument(skip_all, fields(
777 : tenant_id=%result.tenant_shard_id.tenant_id, shard_id=%result.tenant_shard_id.shard_slug(),
778 : sequence=%result.sequence
779 0 : ))]
780 : fn process_result(&self, result: ReconcileResult) {
781 : let mut locked = self.inner.write().unwrap();
782 : let Some(tenant) = locked.tenants.get_mut(&result.tenant_shard_id) else {
783 : // A reconciliation result might race with removing a tenant: drop results for
784 : // tenants that aren't in our map.
785 : return;
786 : };
787 :
788 : // Usually generation should only be updated via this path, so the max() isn't
789 : // needed, but it is used to handle out-of-band updates via. e.g. test hook.
790 : tenant.generation = std::cmp::max(tenant.generation, result.generation);
791 :
792 : // If the reconciler signals that it failed to notify compute, set this state on
793 : // the shard so that a future [`TenantShard::maybe_reconcile`] will try again.
794 : tenant.pending_compute_notification = result.pending_compute_notification;
795 :
796 : // Let the TenantShard know it is idle.
797 : tenant.reconcile_complete(result.sequence);
798 :
799 : match result.result {
800 : Ok(()) => {
801 : for (node_id, loc) in &result.observed.locations {
802 : if let Some(conf) = &loc.conf {
803 : tracing::info!("Updating observed location {}: {:?}", node_id, conf);
804 : } else {
805 : tracing::info!("Setting observed location {} to None", node_id,)
806 : }
807 : }
808 : tenant.observed = result.observed;
809 : tenant.waiter.advance(result.sequence);
810 : }
811 : Err(e) => {
812 : match e {
813 : ReconcileError::Cancel => {
814 : tracing::info!("Reconciler was cancelled");
815 : }
816 : ReconcileError::Remote(mgmt_api::Error::Cancelled) => {
817 : // This might be due to the reconciler getting cancelled, or it might
818 : // be due to the `Node` being marked offline.
819 : tracing::info!("Reconciler cancelled during pageserver API call");
820 : }
821 : _ => {
822 : tracing::warn!("Reconcile error: {}", e);
823 : }
824 : }
825 :
826 : // Ordering: populate last_error before advancing error_seq,
827 : // so that waiters will see the correct error after waiting.
828 : tenant.set_last_error(result.sequence, e);
829 :
830 : for (node_id, o) in result.observed.locations {
831 : tenant.observed.locations.insert(node_id, o);
832 : }
833 : }
834 : }
835 :
836 : // Maybe some other work can proceed now that this job finished.
837 : if self.reconciler_concurrency.available_permits() > 0 {
838 : while let Ok(tenant_shard_id) = locked.delayed_reconcile_rx.try_recv() {
839 : let (nodes, tenants, _scheduler) = locked.parts_mut();
840 : if let Some(shard) = tenants.get_mut(&tenant_shard_id) {
841 : shard.delayed_reconcile = false;
842 : self.maybe_reconcile_shard(shard, nodes);
843 : }
844 :
845 : if self.reconciler_concurrency.available_permits() == 0 {
846 : break;
847 : }
848 : }
849 : }
850 : }
851 :
852 0 : async fn process_results(
853 0 : &self,
854 0 : mut result_rx: tokio::sync::mpsc::UnboundedReceiver<ReconcileResult>,
855 0 : mut bg_compute_hook_result_rx: tokio::sync::mpsc::Receiver<
856 0 : Result<(), (TenantShardId, NotifyError)>,
857 0 : >,
858 0 : ) {
859 0 : loop {
860 0 : // Wait for the next result, or for cancellation
861 0 : tokio::select! {
862 : r = result_rx.recv() => {
863 : match r {
864 : Some(result) => {self.process_result(result);},
865 : None => {break;}
866 : }
867 : }
868 0 : _ = async{
869 0 : match bg_compute_hook_result_rx.recv().await {
870 0 : Some(result) => {
871 0 : if let Err((tenant_shard_id, notify_error)) = result {
872 0 : tracing::warn!("Marking shard {tenant_shard_id} for notification retry, due to error {notify_error}");
873 0 : let mut locked = self.inner.write().unwrap();
874 0 : if let Some(shard) = locked.tenants.get_mut(&tenant_shard_id) {
875 0 : shard.pending_compute_notification = true;
876 0 : }
877 :
878 0 : }
879 : },
880 : None => {
881 : // This channel is dead, but we don't want to terminate the outer loop{}: just wait for shutdown
882 0 : self.cancel.cancelled().await;
883 : }
884 : }
885 0 : } => {},
886 : _ = self.cancel.cancelled() => {
887 : break;
888 : }
889 0 : };
890 0 : }
891 :
892 : // We should only fall through on shutdown
893 0 : assert!(self.cancel.is_cancelled());
894 0 : }
895 :
896 0 : async fn process_aborts(
897 0 : &self,
898 0 : mut abort_rx: tokio::sync::mpsc::UnboundedReceiver<TenantShardSplitAbort>,
899 0 : ) {
900 : loop {
901 : // Wait for the next result, or for cancellation
902 0 : let op = tokio::select! {
903 : r = abort_rx.recv() => {
904 : match r {
905 : Some(op) => {op},
906 : None => {break;}
907 : }
908 : }
909 : _ = self.cancel.cancelled() => {
910 : break;
911 : }
912 : };
913 :
914 : // Retry until shutdown: we must keep this request object alive until it is properly
915 : // processed, as it holds a lock guard that prevents other operations trying to do things
916 : // to the tenant while it is in a weird part-split state.
917 0 : while !self.cancel.is_cancelled() {
918 0 : match self.abort_tenant_shard_split(&op).await {
919 0 : Ok(_) => break,
920 0 : Err(e) => {
921 0 : tracing::warn!(
922 0 : "Failed to abort shard split on {}, will retry: {e}",
923 : op.tenant_id
924 : );
925 :
926 : // If a node is unavailable, we hope that it has been properly marked Offline
927 : // when we retry, so that the abort op will succeed. If the abort op is failing
928 : // for some other reason, we will keep retrying forever, or until a human notices
929 : // and does something about it (either fixing a pageserver or restarting the controller).
930 0 : tokio::time::timeout(Duration::from_secs(5), self.cancel.cancelled())
931 0 : .await
932 0 : .ok();
933 : }
934 : }
935 : }
936 : }
937 0 : }
938 :
939 0 : pub async fn spawn(config: Config, persistence: Arc<Persistence>) -> anyhow::Result<Arc<Self>> {
940 0 : let (result_tx, result_rx) = tokio::sync::mpsc::unbounded_channel();
941 0 : let (abort_tx, abort_rx) = tokio::sync::mpsc::unbounded_channel();
942 0 :
943 0 : tracing::info!("Loading nodes from database...");
944 0 : let nodes = persistence
945 0 : .list_nodes()
946 0 : .await?
947 0 : .into_iter()
948 0 : .map(Node::from_persistent)
949 0 : .collect::<Vec<_>>();
950 0 : let nodes: HashMap<NodeId, Node> = nodes.into_iter().map(|n| (n.get_id(), n)).collect();
951 0 : tracing::info!("Loaded {} nodes from database.", nodes.len());
952 :
953 0 : tracing::info!("Loading shards from database...");
954 0 : let mut tenant_shard_persistence = persistence.list_tenant_shards().await?;
955 0 : tracing::info!(
956 0 : "Loaded {} shards from database.",
957 0 : tenant_shard_persistence.len()
958 : );
959 :
960 : // If any shard splits were in progress, reset the database state to abort them
961 0 : let mut tenant_shard_count_min_max: HashMap<TenantId, (ShardCount, ShardCount)> =
962 0 : HashMap::new();
963 0 : for tsp in &mut tenant_shard_persistence {
964 0 : let shard = tsp.get_shard_identity()?;
965 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
966 0 : let entry = tenant_shard_count_min_max
967 0 : .entry(tenant_shard_id.tenant_id)
968 0 : .or_insert_with(|| (shard.count, shard.count));
969 0 : entry.0 = std::cmp::min(entry.0, shard.count);
970 0 : entry.1 = std::cmp::max(entry.1, shard.count);
971 : }
972 :
973 0 : for (tenant_id, (count_min, count_max)) in tenant_shard_count_min_max {
974 0 : if count_min != count_max {
975 : // Aborting the split in the database and dropping the child shards is sufficient: the reconciliation in
976 : // [`Self::startup_reconcile`] will implicitly drop the child shards on remote pageservers, or they'll
977 : // be dropped later in [`Self::node_activate_reconcile`] if it isn't available right now.
978 0 : tracing::info!("Aborting shard split {tenant_id} {count_min:?} -> {count_max:?}");
979 0 : let abort_status = persistence.abort_shard_split(tenant_id, count_max).await?;
980 :
981 : // We may never see the Complete status here: if the split was complete, we wouldn't have
982 : // identified this tenant has having mismatching min/max counts.
983 0 : assert!(matches!(abort_status, AbortShardSplitStatus::Aborted));
984 :
985 : // Clear the splitting status in-memory, to reflect that we just aborted in the database
986 0 : tenant_shard_persistence.iter_mut().for_each(|tsp| {
987 0 : // Set idle split state on those shards that we will retain.
988 0 : let tsp_tenant_id = TenantId::from_str(tsp.tenant_id.as_str()).unwrap();
989 0 : if tsp_tenant_id == tenant_id
990 0 : && tsp.get_shard_identity().unwrap().count == count_min
991 0 : {
992 0 : tsp.splitting = SplitState::Idle;
993 0 : } else if tsp_tenant_id == tenant_id {
994 : // Leave the splitting state on the child shards: this will be used next to
995 : // drop them.
996 0 : tracing::info!(
997 0 : "Shard {tsp_tenant_id} will be dropped after shard split abort",
998 : );
999 0 : }
1000 0 : });
1001 0 :
1002 0 : // Drop shards for this tenant which we didn't just mark idle (i.e. child shards of the aborted split)
1003 0 : tenant_shard_persistence.retain(|tsp| {
1004 0 : TenantId::from_str(tsp.tenant_id.as_str()).unwrap() != tenant_id
1005 0 : || tsp.splitting == SplitState::Idle
1006 0 : });
1007 0 : }
1008 : }
1009 :
1010 0 : let mut tenants = BTreeMap::new();
1011 0 :
1012 0 : let mut scheduler = Scheduler::new(nodes.values());
1013 0 :
1014 0 : #[cfg(feature = "testing")]
1015 0 : {
1016 0 : // Hack: insert scheduler state for all nodes referenced by shards, as compatibility
1017 0 : // tests only store the shards, not the nodes. The nodes will be loaded shortly
1018 0 : // after when pageservers start up and register.
1019 0 : let mut node_ids = HashSet::new();
1020 0 : for tsp in &tenant_shard_persistence {
1021 0 : if let Some(node_id) = tsp.generation_pageserver {
1022 0 : node_ids.insert(node_id);
1023 0 : }
1024 : }
1025 0 : for node_id in node_ids {
1026 0 : tracing::info!("Creating node {} in scheduler for tests", node_id);
1027 0 : let node = Node::new(
1028 0 : NodeId(node_id as u64),
1029 0 : "".to_string(),
1030 0 : 123,
1031 0 : "".to_string(),
1032 0 : 123,
1033 0 : );
1034 0 :
1035 0 : scheduler.node_upsert(&node);
1036 : }
1037 : }
1038 0 : for tsp in tenant_shard_persistence {
1039 0 : let tenant_shard_id = tsp.get_tenant_shard_id()?;
1040 :
1041 : // We will populate intent properly later in [`Self::startup_reconcile`], initially populate
1042 : // it with what we can infer: the node for which a generation was most recently issued.
1043 0 : let mut intent = IntentState::new();
1044 0 : if let Some(generation_pageserver) = tsp.generation_pageserver {
1045 0 : intent.set_attached(&mut scheduler, Some(NodeId(generation_pageserver as u64)));
1046 0 : }
1047 0 : let new_tenant = TenantShard::from_persistent(tsp, intent)?;
1048 :
1049 0 : tenants.insert(tenant_shard_id, new_tenant);
1050 : }
1051 :
1052 0 : let (startup_completion, startup_complete) = utils::completion::channel();
1053 0 :
1054 0 : // This channel is continuously consumed by process_results, so doesn't need to be very large.
1055 0 : let (bg_compute_notify_result_tx, bg_compute_notify_result_rx) =
1056 0 : tokio::sync::mpsc::channel(512);
1057 0 :
1058 0 : let (delayed_reconcile_tx, delayed_reconcile_rx) =
1059 0 : tokio::sync::mpsc::channel(MAX_DELAYED_RECONCILES);
1060 0 :
1061 0 : let cancel = CancellationToken::new();
1062 0 : let heartbeater = Heartbeater::new(
1063 0 : config.jwt_token.clone(),
1064 0 : config.max_unavailable_interval,
1065 0 : cancel.clone(),
1066 0 : );
1067 0 : let this = Arc::new(Self {
1068 0 : inner: Arc::new(std::sync::RwLock::new(ServiceState::new(
1069 0 : nodes,
1070 0 : tenants,
1071 0 : scheduler,
1072 0 : delayed_reconcile_rx,
1073 0 : ))),
1074 0 : config: config.clone(),
1075 0 : persistence,
1076 0 : compute_hook: Arc::new(ComputeHook::new(config.clone())),
1077 0 : result_tx,
1078 0 : heartbeater,
1079 0 : reconciler_concurrency: Arc::new(tokio::sync::Semaphore::new(
1080 0 : config.reconciler_concurrency,
1081 0 : )),
1082 0 : delayed_reconcile_tx,
1083 0 : abort_tx,
1084 0 : startup_complete: startup_complete.clone(),
1085 0 : cancel,
1086 0 : gate: Gate::default(),
1087 0 : tenant_op_locks: Default::default(),
1088 0 : node_op_locks: Default::default(),
1089 0 : });
1090 0 :
1091 0 : let result_task_this = this.clone();
1092 0 : tokio::task::spawn(async move {
1093 : // Block shutdown until we're done (we must respect self.cancel)
1094 0 : if let Ok(_gate) = result_task_this.gate.enter() {
1095 0 : result_task_this
1096 0 : .process_results(result_rx, bg_compute_notify_result_rx)
1097 0 : .await
1098 0 : }
1099 0 : });
1100 0 :
1101 0 : tokio::task::spawn({
1102 0 : let this = this.clone();
1103 0 : async move {
1104 : // Block shutdown until we're done (we must respect self.cancel)
1105 0 : if let Ok(_gate) = this.gate.enter() {
1106 0 : this.process_aborts(abort_rx).await
1107 0 : }
1108 0 : }
1109 0 : });
1110 0 :
1111 0 : tokio::task::spawn({
1112 0 : let this = this.clone();
1113 0 : async move {
1114 0 : if let Ok(_gate) = this.gate.enter() {
1115 0 : loop {
1116 0 : tokio::select! {
1117 : _ = this.cancel.cancelled() => {
1118 : break;
1119 : },
1120 : _ = tokio::time::sleep(Duration::from_secs(60)) => {}
1121 0 : };
1122 0 : this.tenant_op_locks.housekeeping();
1123 0 : }
1124 0 : }
1125 0 : }
1126 0 : });
1127 0 :
1128 0 : tokio::task::spawn({
1129 0 : let this = this.clone();
1130 0 : // We will block the [`Service::startup_complete`] barrier until [`Self::startup_reconcile`]
1131 0 : // is done.
1132 0 : let startup_completion = startup_completion.clone();
1133 0 : async move {
1134 : // Block shutdown until we're done (we must respect self.cancel)
1135 0 : let Ok(_gate) = this.gate.enter() else {
1136 0 : return;
1137 : };
1138 :
1139 0 : this.startup_reconcile(bg_compute_notify_result_tx).await;
1140 0 : drop(startup_completion);
1141 0 : }
1142 0 : });
1143 0 :
1144 0 : tokio::task::spawn({
1145 0 : let this = this.clone();
1146 0 : let startup_complete = startup_complete.clone();
1147 0 : async move {
1148 0 : startup_complete.wait().await;
1149 0 : this.background_reconcile().await;
1150 0 : }
1151 0 : });
1152 0 :
1153 0 : tokio::task::spawn({
1154 0 : let this = this.clone();
1155 0 : let startup_complete = startup_complete.clone();
1156 0 : async move {
1157 0 : startup_complete.wait().await;
1158 0 : this.spawn_heartbeat_driver().await;
1159 0 : }
1160 0 : });
1161 0 :
1162 0 : Ok(this)
1163 0 : }
1164 :
1165 0 : pub(crate) async fn attach_hook(
1166 0 : &self,
1167 0 : attach_req: AttachHookRequest,
1168 0 : ) -> anyhow::Result<AttachHookResponse> {
1169 0 : // This is a test hook. To enable using it on tenants that were created directly with
1170 0 : // the pageserver API (not via this service), we will auto-create any missing tenant
1171 0 : // shards with default state.
1172 0 : let insert = {
1173 0 : let locked = self.inner.write().unwrap();
1174 0 : !locked.tenants.contains_key(&attach_req.tenant_shard_id)
1175 0 : };
1176 0 : if insert {
1177 0 : let tsp = TenantShardPersistence {
1178 0 : tenant_id: attach_req.tenant_shard_id.tenant_id.to_string(),
1179 0 : shard_number: attach_req.tenant_shard_id.shard_number.0 as i32,
1180 0 : shard_count: attach_req.tenant_shard_id.shard_count.literal() as i32,
1181 0 : shard_stripe_size: 0,
1182 0 : generation: Some(0),
1183 0 : generation_pageserver: None,
1184 0 : placement_policy: serde_json::to_string(&PlacementPolicy::Attached(0)).unwrap(),
1185 0 : config: serde_json::to_string(&TenantConfig::default()).unwrap(),
1186 0 : splitting: SplitState::default(),
1187 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
1188 0 : .unwrap(),
1189 0 : };
1190 0 :
1191 0 : match self.persistence.insert_tenant_shards(vec![tsp]).await {
1192 0 : Err(e) => match e {
1193 : DatabaseError::Query(diesel::result::Error::DatabaseError(
1194 : DatabaseErrorKind::UniqueViolation,
1195 : _,
1196 : )) => {
1197 0 : tracing::info!(
1198 0 : "Raced with another request to insert tenant {}",
1199 : attach_req.tenant_shard_id
1200 : )
1201 : }
1202 0 : _ => return Err(e.into()),
1203 : },
1204 : Ok(()) => {
1205 0 : tracing::info!("Inserted shard {} in database", attach_req.tenant_shard_id);
1206 :
1207 0 : let mut locked = self.inner.write().unwrap();
1208 0 : locked.tenants.insert(
1209 0 : attach_req.tenant_shard_id,
1210 0 : TenantShard::new(
1211 0 : attach_req.tenant_shard_id,
1212 0 : ShardIdentity::unsharded(),
1213 0 : PlacementPolicy::Attached(0),
1214 0 : ),
1215 0 : );
1216 0 : tracing::info!("Inserted shard {} in memory", attach_req.tenant_shard_id);
1217 : }
1218 : }
1219 0 : }
1220 :
1221 0 : let new_generation = if let Some(req_node_id) = attach_req.node_id {
1222 0 : let maybe_tenant_conf = {
1223 0 : let locked = self.inner.write().unwrap();
1224 0 : locked
1225 0 : .tenants
1226 0 : .get(&attach_req.tenant_shard_id)
1227 0 : .map(|t| t.config.clone())
1228 0 : };
1229 0 :
1230 0 : match maybe_tenant_conf {
1231 0 : Some(conf) => {
1232 0 : let new_generation = self
1233 0 : .persistence
1234 0 : .increment_generation(attach_req.tenant_shard_id, req_node_id)
1235 0 : .await?;
1236 :
1237 : // Persist the placement policy update. This is required
1238 : // when we reattaching a detached tenant.
1239 0 : self.persistence
1240 0 : .update_tenant_shard(
1241 0 : TenantFilter::Shard(attach_req.tenant_shard_id),
1242 0 : Some(PlacementPolicy::Attached(0)),
1243 0 : Some(conf),
1244 0 : None,
1245 0 : None,
1246 0 : )
1247 0 : .await?;
1248 0 : Some(new_generation)
1249 : }
1250 : None => {
1251 0 : anyhow::bail!("Attach hook handling raced with tenant removal")
1252 : }
1253 : }
1254 : } else {
1255 0 : self.persistence.detach(attach_req.tenant_shard_id).await?;
1256 0 : None
1257 : };
1258 :
1259 0 : let mut locked = self.inner.write().unwrap();
1260 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
1261 0 :
1262 0 : let tenant_shard = tenants
1263 0 : .get_mut(&attach_req.tenant_shard_id)
1264 0 : .expect("Checked for existence above");
1265 :
1266 0 : if let Some(new_generation) = new_generation {
1267 0 : tenant_shard.generation = Some(new_generation);
1268 0 : tenant_shard.policy = PlacementPolicy::Attached(0);
1269 0 : } else {
1270 : // This is a detach notification. We must update placement policy to avoid re-attaching
1271 : // during background scheduling/reconciliation, or during storage controller restart.
1272 0 : assert!(attach_req.node_id.is_none());
1273 0 : tenant_shard.policy = PlacementPolicy::Detached;
1274 : }
1275 :
1276 0 : if let Some(attaching_pageserver) = attach_req.node_id.as_ref() {
1277 0 : tracing::info!(
1278 : tenant_id = %attach_req.tenant_shard_id,
1279 : ps_id = %attaching_pageserver,
1280 : generation = ?tenant_shard.generation,
1281 0 : "issuing",
1282 : );
1283 0 : } else if let Some(ps_id) = tenant_shard.intent.get_attached() {
1284 0 : tracing::info!(
1285 : tenant_id = %attach_req.tenant_shard_id,
1286 : %ps_id,
1287 : generation = ?tenant_shard.generation,
1288 0 : "dropping",
1289 : );
1290 : } else {
1291 0 : tracing::info!(
1292 : tenant_id = %attach_req.tenant_shard_id,
1293 0 : "no-op: tenant already has no pageserver");
1294 : }
1295 0 : tenant_shard
1296 0 : .intent
1297 0 : .set_attached(scheduler, attach_req.node_id);
1298 0 :
1299 0 : tracing::info!(
1300 0 : "attach_hook: tenant {} set generation {:?}, pageserver {}",
1301 0 : attach_req.tenant_shard_id,
1302 0 : tenant_shard.generation,
1303 0 : // TODO: this is an odd number of 0xf's
1304 0 : attach_req.node_id.unwrap_or(utils::id::NodeId(0xfffffff))
1305 : );
1306 :
1307 : // Trick the reconciler into not doing anything for this tenant: this helps
1308 : // tests that manually configure a tenant on the pagesrever, and then call this
1309 : // attach hook: they don't want background reconciliation to modify what they
1310 : // did to the pageserver.
1311 : #[cfg(feature = "testing")]
1312 : {
1313 0 : if let Some(node_id) = attach_req.node_id {
1314 0 : tenant_shard.observed.locations = HashMap::from([(
1315 0 : node_id,
1316 0 : ObservedStateLocation {
1317 0 : conf: Some(attached_location_conf(
1318 0 : tenant_shard.generation.unwrap(),
1319 0 : &tenant_shard.shard,
1320 0 : &tenant_shard.config,
1321 0 : false,
1322 0 : )),
1323 0 : },
1324 0 : )]);
1325 0 : } else {
1326 0 : tenant_shard.observed.locations.clear();
1327 0 : }
1328 : }
1329 :
1330 0 : Ok(AttachHookResponse {
1331 0 : gen: attach_req
1332 0 : .node_id
1333 0 : .map(|_| tenant_shard.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap()),
1334 0 : })
1335 0 : }
1336 :
1337 0 : pub(crate) fn inspect(&self, inspect_req: InspectRequest) -> InspectResponse {
1338 0 : let locked = self.inner.read().unwrap();
1339 0 :
1340 0 : let tenant_shard = locked.tenants.get(&inspect_req.tenant_shard_id);
1341 0 :
1342 0 : InspectResponse {
1343 0 : attachment: tenant_shard.and_then(|s| {
1344 0 : s.intent
1345 0 : .get_attached()
1346 0 : .map(|ps| (s.generation.expect("Test hook, not used on tenants that are mid-onboarding with a NULL generation").into().unwrap(), ps))
1347 0 : }),
1348 0 : }
1349 0 : }
1350 :
1351 : // When the availability state of a node transitions to active, we must do a full reconciliation
1352 : // of LocationConfigs on that node. This is because while a node was offline:
1353 : // - we might have proceeded through startup_reconcile without checking for extraneous LocationConfigs on this node
1354 : // - aborting a tenant shard split might have left rogue child shards behind on this node.
1355 : //
1356 : // This function must complete _before_ setting a `Node` to Active: once it is set to Active, other
1357 : // Reconcilers might communicate with the node, and these must not overlap with the work we do in
1358 : // this function.
1359 : //
1360 : // The reconciliation logic in here is very similar to what [`Self::startup_reconcile`] does, but
1361 : // for written for a single node rather than as a batch job for all nodes.
1362 0 : #[tracing::instrument(skip_all, fields(node_id=%node.get_id()))]
1363 : async fn node_activate_reconcile(
1364 : &self,
1365 : mut node: Node,
1366 : _lock: &WrappedWriteGuard<NodeOperations>,
1367 : ) -> Result<(), ApiError> {
1368 : // This Node is a mutable local copy: we will set it active so that we can use its
1369 : // API client to reconcile with the node. The Node in [`Self::nodes`] will get updated
1370 : // later.
1371 : node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
1372 :
1373 : let configs = match node
1374 : .with_client_retries(
1375 0 : |client| async move { client.list_location_config().await },
1376 : &self.config.jwt_token,
1377 : 1,
1378 : 5,
1379 : SHORT_RECONCILE_TIMEOUT,
1380 : &self.cancel,
1381 : )
1382 : .await
1383 : {
1384 : None => {
1385 : // We're shutting down (the Node's cancellation token can't have fired, because
1386 : // we're the only scope that has a reference to it, and we didn't fire it).
1387 : return Err(ApiError::ShuttingDown);
1388 : }
1389 : Some(Err(e)) => {
1390 : // This node didn't succeed listing its locations: it may not proceed to active state
1391 : // as it is apparently unavailable.
1392 : return Err(ApiError::PreconditionFailed(
1393 : format!("Failed to query node location configs, cannot activate ({e})").into(),
1394 : ));
1395 : }
1396 : Some(Ok(configs)) => configs,
1397 : };
1398 : tracing::info!("Loaded {} LocationConfigs", configs.tenant_shards.len());
1399 :
1400 : let mut cleanup = Vec::new();
1401 : {
1402 : let mut locked = self.inner.write().unwrap();
1403 :
1404 : for (tenant_shard_id, observed_loc) in configs.tenant_shards {
1405 : let Some(tenant_shard) = locked.tenants.get_mut(&tenant_shard_id) else {
1406 : cleanup.push(tenant_shard_id);
1407 : continue;
1408 : };
1409 : tenant_shard
1410 : .observed
1411 : .locations
1412 : .insert(node.get_id(), ObservedStateLocation { conf: observed_loc });
1413 : }
1414 : }
1415 :
1416 : for tenant_shard_id in cleanup {
1417 : tracing::info!("Detaching {tenant_shard_id}");
1418 : match node
1419 : .with_client_retries(
1420 0 : |client| async move {
1421 0 : let config = LocationConfig {
1422 0 : mode: LocationConfigMode::Detached,
1423 0 : generation: None,
1424 0 : secondary_conf: None,
1425 0 : shard_number: tenant_shard_id.shard_number.0,
1426 0 : shard_count: tenant_shard_id.shard_count.literal(),
1427 0 : shard_stripe_size: 0,
1428 0 : tenant_conf: models::TenantConfig::default(),
1429 0 : };
1430 0 : client
1431 0 : .location_config(tenant_shard_id, config, None, false)
1432 0 : .await
1433 0 : },
1434 : &self.config.jwt_token,
1435 : 1,
1436 : 5,
1437 : SHORT_RECONCILE_TIMEOUT,
1438 : &self.cancel,
1439 : )
1440 : .await
1441 : {
1442 : None => {
1443 : // We're shutting down (the Node's cancellation token can't have fired, because
1444 : // we're the only scope that has a reference to it, and we didn't fire it).
1445 : return Err(ApiError::ShuttingDown);
1446 : }
1447 : Some(Err(e)) => {
1448 : // Do not let the node proceed to Active state if it is not responsive to requests
1449 : // to detach. This could happen if e.g. a shutdown bug in the pageserver is preventing
1450 : // detach completing: we should not let this node back into the set of nodes considered
1451 : // okay for scheduling.
1452 : return Err(ApiError::Conflict(format!(
1453 : "Node {node} failed to detach {tenant_shard_id}: {e}"
1454 : )));
1455 : }
1456 : Some(Ok(_)) => {}
1457 : };
1458 : }
1459 :
1460 : Ok(())
1461 : }
1462 :
1463 0 : pub(crate) async fn re_attach(
1464 0 : &self,
1465 0 : reattach_req: ReAttachRequest,
1466 0 : ) -> Result<ReAttachResponse, ApiError> {
1467 0 : if let Some(register_req) = reattach_req.register {
1468 0 : self.node_register(register_req).await?;
1469 0 : }
1470 :
1471 : // Ordering: we must persist generation number updates before making them visible in the in-memory state
1472 0 : let incremented_generations = self.persistence.re_attach(reattach_req.node_id).await?;
1473 :
1474 0 : tracing::info!(
1475 : node_id=%reattach_req.node_id,
1476 0 : "Incremented {} tenant shards' generations",
1477 0 : incremented_generations.len()
1478 : );
1479 :
1480 : // Apply the updated generation to our in-memory state, and
1481 : // gather discover secondary locations.
1482 0 : let mut locked = self.inner.write().unwrap();
1483 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
1484 0 :
1485 0 : let mut response = ReAttachResponse {
1486 0 : tenants: Vec::new(),
1487 0 : };
1488 :
1489 : // TODO: cancel/restart any running reconciliation for this tenant, it might be trying
1490 : // to call location_conf API with an old generation. Wait for cancellation to complete
1491 : // before responding to this request. Requires well implemented CancellationToken logic
1492 : // all the way to where we call location_conf. Even then, there can still be a location_conf
1493 : // request in flight over the network: TODO handle that by making location_conf API refuse
1494 : // to go backward in generations.
1495 :
1496 : // Scan through all shards, applying updates for ones where we updated generation
1497 : // and identifying shards that intend to have a secondary location on this node.
1498 0 : for (tenant_shard_id, shard) in tenants {
1499 0 : if let Some(new_gen) = incremented_generations.get(tenant_shard_id) {
1500 0 : let new_gen = *new_gen;
1501 0 : response.tenants.push(ReAttachResponseTenant {
1502 0 : id: *tenant_shard_id,
1503 0 : gen: Some(new_gen.into().unwrap()),
1504 0 : // A tenant is only put into multi or stale modes in the middle of a [`Reconciler::live_migrate`]
1505 0 : // execution. If a pageserver is restarted during that process, then the reconcile pass will
1506 0 : // fail, and start from scratch, so it doesn't make sense for us to try and preserve
1507 0 : // the stale/multi states at this point.
1508 0 : mode: LocationConfigMode::AttachedSingle,
1509 0 : });
1510 0 :
1511 0 : shard.generation = std::cmp::max(shard.generation, Some(new_gen));
1512 0 : if let Some(observed) = shard.observed.locations.get_mut(&reattach_req.node_id) {
1513 : // Why can we update `observed` even though we're not sure our response will be received
1514 : // by the pageserver? Because the pageserver will not proceed with startup until
1515 : // it has processed response: if it loses it, we'll see another request and increment
1516 : // generation again, avoiding any uncertainty about dirtiness of tenant's state.
1517 0 : if let Some(conf) = observed.conf.as_mut() {
1518 0 : conf.generation = new_gen.into();
1519 0 : }
1520 0 : } else {
1521 0 : // This node has no observed state for the shard: perhaps it was offline
1522 0 : // when the pageserver restarted. Insert a None, so that the Reconciler
1523 0 : // will be prompted to learn the location's state before it makes changes.
1524 0 : shard
1525 0 : .observed
1526 0 : .locations
1527 0 : .insert(reattach_req.node_id, ObservedStateLocation { conf: None });
1528 0 : }
1529 0 : } else if shard.intent.get_secondary().contains(&reattach_req.node_id) {
1530 0 : // Ordering: pageserver will not accept /location_config requests until it has
1531 0 : // finished processing the response from re-attach. So we can update our in-memory state
1532 0 : // now, and be confident that we are not stamping on the result of some later location config.
1533 0 : // TODO: however, we are not strictly ordered wrt ReconcileResults queue,
1534 0 : // so we might update observed state here, and then get over-written by some racing
1535 0 : // ReconcileResult. The impact is low however, since we have set state on pageserver something
1536 0 : // that matches intent, so worst case if we race then we end up doing a spurious reconcile.
1537 0 :
1538 0 : response.tenants.push(ReAttachResponseTenant {
1539 0 : id: *tenant_shard_id,
1540 0 : gen: None,
1541 0 : mode: LocationConfigMode::Secondary,
1542 0 : });
1543 0 :
1544 0 : // We must not update observed, because we have no guarantee that our
1545 0 : // response will be received by the pageserver. This could leave it
1546 0 : // falsely dirty, but the resulting reconcile should be idempotent.
1547 0 : }
1548 : }
1549 :
1550 : // We consider a node Active once we have composed a re-attach response, but we
1551 : // do not call [`Self::node_activate_reconcile`]: the handling of the re-attach response
1552 : // implicitly synchronizes the LocationConfigs on the node.
1553 : //
1554 : // Setting a node active unblocks any Reconcilers that might write to the location config API,
1555 : // but those requests will not be accepted by the node until it has finished processing
1556 : // the re-attach response.
1557 0 : if let Some(node) = nodes.get(&reattach_req.node_id) {
1558 0 : if !node.is_available() {
1559 0 : let mut new_nodes = (**nodes).clone();
1560 0 : if let Some(node) = new_nodes.get_mut(&reattach_req.node_id) {
1561 0 : node.set_availability(NodeAvailability::Active(UtilizationScore::worst()));
1562 0 : scheduler.node_upsert(node);
1563 0 : }
1564 0 : let new_nodes = Arc::new(new_nodes);
1565 0 : *nodes = new_nodes;
1566 0 : }
1567 0 : }
1568 :
1569 0 : Ok(response)
1570 0 : }
1571 :
1572 0 : pub(crate) fn validate(&self, validate_req: ValidateRequest) -> ValidateResponse {
1573 0 : let locked = self.inner.read().unwrap();
1574 0 :
1575 0 : let mut response = ValidateResponse {
1576 0 : tenants: Vec::new(),
1577 0 : };
1578 :
1579 0 : for req_tenant in validate_req.tenants {
1580 0 : if let Some(tenant_shard) = locked.tenants.get(&req_tenant.id) {
1581 0 : let valid = tenant_shard.generation == Some(Generation::new(req_tenant.gen));
1582 0 : tracing::info!(
1583 0 : "handle_validate: {}(gen {}): valid={valid} (latest {:?})",
1584 : req_tenant.id,
1585 : req_tenant.gen,
1586 : tenant_shard.generation
1587 : );
1588 0 : response.tenants.push(ValidateResponseTenant {
1589 0 : id: req_tenant.id,
1590 0 : valid,
1591 0 : });
1592 0 : } else {
1593 0 : // After tenant deletion, we may approve any validation. This avoids
1594 0 : // spurious warnings on the pageserver if it has pending LSN updates
1595 0 : // at the point a deletion happens.
1596 0 : response.tenants.push(ValidateResponseTenant {
1597 0 : id: req_tenant.id,
1598 0 : valid: true,
1599 0 : });
1600 0 : }
1601 : }
1602 0 : response
1603 0 : }
1604 :
1605 0 : pub(crate) async fn tenant_create(
1606 0 : &self,
1607 0 : create_req: TenantCreateRequest,
1608 0 : ) -> Result<TenantCreateResponse, ApiError> {
1609 0 : let tenant_id = create_req.new_tenant_id.tenant_id;
1610 :
1611 : // Exclude any concurrent attempts to create/access the same tenant ID
1612 0 : let _tenant_lock = trace_exclusive_lock(
1613 0 : &self.tenant_op_locks,
1614 0 : create_req.new_tenant_id.tenant_id,
1615 0 : TenantOperations::Create,
1616 0 : )
1617 0 : .await;
1618 0 : let (response, waiters) = self.do_tenant_create(create_req).await?;
1619 :
1620 0 : if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
1621 : // Avoid deadlock: reconcile may fail while notifying compute, if the cloud control plane refuses to
1622 : // accept compute notifications while it is in the process of creating. Reconciliation will
1623 : // be retried in the background.
1624 0 : tracing::warn!(%tenant_id, "Reconcile not done yet while creating tenant ({e})");
1625 0 : }
1626 0 : Ok(response)
1627 0 : }
1628 :
1629 0 : pub(crate) async fn do_tenant_create(
1630 0 : &self,
1631 0 : create_req: TenantCreateRequest,
1632 0 : ) -> Result<(TenantCreateResponse, Vec<ReconcilerWaiter>), ApiError> {
1633 0 : let placement_policy = create_req
1634 0 : .placement_policy
1635 0 : .clone()
1636 0 : // As a default, zero secondaries is convenient for tests that don't choose a policy.
1637 0 : .unwrap_or(PlacementPolicy::Attached(0));
1638 :
1639 : // This service expects to handle sharding itself: it is an error to try and directly create
1640 : // a particular shard here.
1641 0 : let tenant_id = if !create_req.new_tenant_id.is_unsharded() {
1642 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
1643 0 : "Attempted to create a specific shard, this API is for creating the whole tenant"
1644 0 : )));
1645 : } else {
1646 0 : create_req.new_tenant_id.tenant_id
1647 0 : };
1648 0 :
1649 0 : tracing::info!(
1650 0 : "Creating tenant {}, shard_count={:?}",
1651 : create_req.new_tenant_id,
1652 : create_req.shard_parameters.count,
1653 : );
1654 :
1655 0 : let create_ids = (0..create_req.shard_parameters.count.count())
1656 0 : .map(|i| TenantShardId {
1657 0 : tenant_id,
1658 0 : shard_number: ShardNumber(i),
1659 0 : shard_count: create_req.shard_parameters.count,
1660 0 : })
1661 0 : .collect::<Vec<_>>();
1662 :
1663 : // If the caller specifies a None generation, it means "start from default". This is different
1664 : // to [`Self::tenant_location_config`], where a None generation is used to represent
1665 : // an incompletely-onboarded tenant.
1666 0 : let initial_generation = if matches!(placement_policy, PlacementPolicy::Secondary) {
1667 0 : tracing::info!(
1668 0 : "tenant_create: secondary mode, generation is_some={}",
1669 0 : create_req.generation.is_some()
1670 : );
1671 0 : create_req.generation.map(Generation::new)
1672 : } else {
1673 0 : tracing::info!(
1674 0 : "tenant_create: not secondary mode, generation is_some={}",
1675 0 : create_req.generation.is_some()
1676 : );
1677 0 : Some(
1678 0 : create_req
1679 0 : .generation
1680 0 : .map(Generation::new)
1681 0 : .unwrap_or(INITIAL_GENERATION),
1682 0 : )
1683 : };
1684 :
1685 : // Ordering: we persist tenant shards before creating them on the pageserver. This enables a caller
1686 : // to clean up after themselves by issuing a tenant deletion if something goes wrong and we restart
1687 : // during the creation, rather than risking leaving orphan objects in S3.
1688 0 : let persist_tenant_shards = create_ids
1689 0 : .iter()
1690 0 : .map(|tenant_shard_id| TenantShardPersistence {
1691 0 : tenant_id: tenant_shard_id.tenant_id.to_string(),
1692 0 : shard_number: tenant_shard_id.shard_number.0 as i32,
1693 0 : shard_count: tenant_shard_id.shard_count.literal() as i32,
1694 0 : shard_stripe_size: create_req.shard_parameters.stripe_size.0 as i32,
1695 0 : generation: initial_generation.map(|g| g.into().unwrap() as i32),
1696 0 : // The pageserver is not known until scheduling happens: we will set this column when
1697 0 : // incrementing the generation the first time we attach to a pageserver.
1698 0 : generation_pageserver: None,
1699 0 : placement_policy: serde_json::to_string(&placement_policy).unwrap(),
1700 0 : config: serde_json::to_string(&create_req.config).unwrap(),
1701 0 : splitting: SplitState::default(),
1702 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
1703 0 : .unwrap(),
1704 0 : })
1705 0 : .collect();
1706 0 :
1707 0 : match self
1708 0 : .persistence
1709 0 : .insert_tenant_shards(persist_tenant_shards)
1710 0 : .await
1711 : {
1712 0 : Ok(_) => {}
1713 : Err(DatabaseError::Query(diesel::result::Error::DatabaseError(
1714 : DatabaseErrorKind::UniqueViolation,
1715 : _,
1716 : ))) => {
1717 : // Unique key violation: this is probably a retry. Because the shard count is part of the unique key,
1718 : // if we see a unique key violation it means that the creation request's shard count matches the previous
1719 : // creation's shard count.
1720 0 : tracing::info!("Tenant shards already present in database, proceeding with idempotent creation...");
1721 : }
1722 : // Any other database error is unexpected and a bug.
1723 0 : Err(e) => return Err(ApiError::InternalServerError(anyhow::anyhow!(e))),
1724 : };
1725 :
1726 0 : let mut schedule_context = ScheduleContext::default();
1727 :
1728 0 : let (waiters, response_shards) = {
1729 0 : let mut locked = self.inner.write().unwrap();
1730 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
1731 0 :
1732 0 : let mut response_shards = Vec::new();
1733 0 : let mut schcedule_error = None;
1734 :
1735 0 : for tenant_shard_id in create_ids {
1736 0 : tracing::info!("Creating shard {tenant_shard_id}...");
1737 :
1738 : use std::collections::btree_map::Entry;
1739 0 : match tenants.entry(tenant_shard_id) {
1740 0 : Entry::Occupied(mut entry) => {
1741 0 : tracing::info!(
1742 0 : "Tenant shard {tenant_shard_id} already exists while creating"
1743 : );
1744 :
1745 : // TODO: schedule() should take an anti-affinity expression that pushes
1746 : // attached and secondary locations (independently) away frorm those
1747 : // pageservers also holding a shard for this tenant.
1748 :
1749 0 : entry
1750 0 : .get_mut()
1751 0 : .schedule(scheduler, &mut schedule_context)
1752 0 : .map_err(|e| {
1753 0 : ApiError::Conflict(format!(
1754 0 : "Failed to schedule shard {tenant_shard_id}: {e}"
1755 0 : ))
1756 0 : })?;
1757 :
1758 0 : if let Some(node_id) = entry.get().intent.get_attached() {
1759 0 : let generation = entry
1760 0 : .get()
1761 0 : .generation
1762 0 : .expect("Generation is set when in attached mode");
1763 0 : response_shards.push(TenantCreateResponseShard {
1764 0 : shard_id: tenant_shard_id,
1765 0 : node_id: *node_id,
1766 0 : generation: generation.into().unwrap(),
1767 0 : });
1768 0 : }
1769 :
1770 0 : continue;
1771 : }
1772 0 : Entry::Vacant(entry) => {
1773 0 : let state = entry.insert(TenantShard::new(
1774 0 : tenant_shard_id,
1775 0 : ShardIdentity::from_params(
1776 0 : tenant_shard_id.shard_number,
1777 0 : &create_req.shard_parameters,
1778 0 : ),
1779 0 : placement_policy.clone(),
1780 0 : ));
1781 0 :
1782 0 : state.generation = initial_generation;
1783 0 : state.config = create_req.config.clone();
1784 0 : if let Err(e) = state.schedule(scheduler, &mut schedule_context) {
1785 0 : schcedule_error = Some(e);
1786 0 : }
1787 :
1788 : // Only include shards in result if we are attaching: the purpose
1789 : // of the response is to tell the caller where the shards are attached.
1790 0 : if let Some(node_id) = state.intent.get_attached() {
1791 0 : let generation = state
1792 0 : .generation
1793 0 : .expect("Generation is set when in attached mode");
1794 0 : response_shards.push(TenantCreateResponseShard {
1795 0 : shard_id: tenant_shard_id,
1796 0 : node_id: *node_id,
1797 0 : generation: generation.into().unwrap(),
1798 0 : });
1799 0 : }
1800 : }
1801 : };
1802 : }
1803 :
1804 : // If we failed to schedule shards, then they are still created in the controller,
1805 : // but we return an error to the requester to avoid a silent failure when someone
1806 : // tries to e.g. create a tenant whose placement policy requires more nodes than
1807 : // are present in the system. We do this here rather than in the above loop, to
1808 : // avoid situations where we only create a subset of shards in the tenant.
1809 0 : if let Some(e) = schcedule_error {
1810 0 : return Err(ApiError::Conflict(format!(
1811 0 : "Failed to schedule shard(s): {e}"
1812 0 : )));
1813 0 : }
1814 0 :
1815 0 : let waiters = tenants
1816 0 : .range_mut(TenantShardId::tenant_range(tenant_id))
1817 0 : .filter_map(|(_shard_id, shard)| self.maybe_reconcile_shard(shard, nodes))
1818 0 : .collect::<Vec<_>>();
1819 0 : (waiters, response_shards)
1820 0 : };
1821 0 :
1822 0 : Ok((
1823 0 : TenantCreateResponse {
1824 0 : shards: response_shards,
1825 0 : },
1826 0 : waiters,
1827 0 : ))
1828 0 : }
1829 :
1830 : /// Helper for functions that reconcile a number of shards, and would like to do a timeout-bounded
1831 : /// wait for reconciliation to complete before responding.
1832 0 : async fn await_waiters(
1833 0 : &self,
1834 0 : waiters: Vec<ReconcilerWaiter>,
1835 0 : timeout: Duration,
1836 0 : ) -> Result<(), ReconcileWaitError> {
1837 0 : let deadline = Instant::now().checked_add(timeout).unwrap();
1838 0 : for waiter in waiters {
1839 0 : let timeout = deadline.duration_since(Instant::now());
1840 0 : waiter.wait_timeout(timeout).await?;
1841 : }
1842 :
1843 0 : Ok(())
1844 0 : }
1845 :
1846 : /// Part of [`Self::tenant_location_config`]: dissect an incoming location config request,
1847 : /// and transform it into either a tenant creation of a series of shard updates.
1848 : ///
1849 : /// If the incoming request makes no changes, a [`TenantCreateOrUpdate::Update`] result will
1850 : /// still be returned.
1851 0 : fn tenant_location_config_prepare(
1852 0 : &self,
1853 0 : tenant_id: TenantId,
1854 0 : req: TenantLocationConfigRequest,
1855 0 : ) -> TenantCreateOrUpdate {
1856 0 : let mut updates = Vec::new();
1857 0 : let mut locked = self.inner.write().unwrap();
1858 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
1859 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
1860 :
1861 : // Use location config mode as an indicator of policy.
1862 0 : let placement_policy = match req.config.mode {
1863 0 : LocationConfigMode::Detached => PlacementPolicy::Detached,
1864 0 : LocationConfigMode::Secondary => PlacementPolicy::Secondary,
1865 : LocationConfigMode::AttachedMulti
1866 : | LocationConfigMode::AttachedSingle
1867 : | LocationConfigMode::AttachedStale => {
1868 0 : if nodes.len() > 1 {
1869 0 : PlacementPolicy::Attached(1)
1870 : } else {
1871 : // Convenience for dev/test: if we just have one pageserver, import
1872 : // tenants into non-HA mode so that scheduling will succeed.
1873 0 : PlacementPolicy::Attached(0)
1874 : }
1875 : }
1876 : };
1877 :
1878 0 : let mut create = true;
1879 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
1880 : // Saw an existing shard: this is not a creation
1881 0 : create = false;
1882 :
1883 : // Shards may have initially been created by a Secondary request, where we
1884 : // would have left generation as None.
1885 : //
1886 : // We only update generation the first time we see an attached-mode request,
1887 : // and if there is no existing generation set. The caller is responsible for
1888 : // ensuring that no non-storage-controller pageserver ever uses a higher
1889 : // generation than they passed in here.
1890 : use LocationConfigMode::*;
1891 0 : let set_generation = match req.config.mode {
1892 0 : AttachedMulti | AttachedSingle | AttachedStale if shard.generation.is_none() => {
1893 0 : req.config.generation.map(Generation::new)
1894 : }
1895 0 : _ => None,
1896 : };
1897 :
1898 0 : updates.push(ShardUpdate {
1899 0 : tenant_shard_id: *shard_id,
1900 0 : placement_policy: placement_policy.clone(),
1901 0 : tenant_config: req.config.tenant_conf.clone(),
1902 0 : generation: set_generation,
1903 0 : });
1904 : }
1905 :
1906 0 : if create {
1907 : use LocationConfigMode::*;
1908 0 : let generation = match req.config.mode {
1909 0 : AttachedMulti | AttachedSingle | AttachedStale => req.config.generation,
1910 : // If a caller provided a generation in a non-attached request, ignore it
1911 : // and leave our generation as None: this enables a subsequent update to set
1912 : // the generation when setting an attached mode for the first time.
1913 0 : _ => None,
1914 : };
1915 :
1916 0 : TenantCreateOrUpdate::Create(
1917 0 : // Synthesize a creation request
1918 0 : TenantCreateRequest {
1919 0 : new_tenant_id: tenant_shard_id,
1920 0 : generation,
1921 0 : shard_parameters: ShardParameters {
1922 0 : count: tenant_shard_id.shard_count,
1923 0 : // We only import un-sharded or single-sharded tenants, so stripe
1924 0 : // size can be made up arbitrarily here.
1925 0 : stripe_size: ShardParameters::DEFAULT_STRIPE_SIZE,
1926 0 : },
1927 0 : placement_policy: Some(placement_policy),
1928 0 : config: req.config.tenant_conf,
1929 0 : },
1930 0 : )
1931 : } else {
1932 0 : assert!(!updates.is_empty());
1933 0 : TenantCreateOrUpdate::Update(updates)
1934 : }
1935 0 : }
1936 :
1937 : /// This API is used by the cloud control plane to migrate unsharded tenants that it created
1938 : /// directly with pageservers into this service.
1939 : ///
1940 : /// Cloud control plane MUST NOT continue issuing GENERATION NUMBERS for this tenant once it
1941 : /// has attempted to call this API. Failure to oblige to this rule may lead to S3 corruption.
1942 : /// Think of the first attempt to call this API as a transfer of absolute authority over the
1943 : /// tenant's source of generation numbers.
1944 : ///
1945 : /// The mode in this request coarse-grained control of tenants:
1946 : /// - Call with mode Attached* to upsert the tenant.
1947 : /// - Call with mode Secondary to either onboard a tenant without attaching it, or
1948 : /// to set an existing tenant to PolicyMode::Secondary
1949 : /// - Call with mode Detached to switch to PolicyMode::Detached
1950 0 : pub(crate) async fn tenant_location_config(
1951 0 : &self,
1952 0 : tenant_shard_id: TenantShardId,
1953 0 : req: TenantLocationConfigRequest,
1954 0 : ) -> Result<TenantLocationConfigResponse, ApiError> {
1955 : // We require an exclusive lock, because we are updating both persistent and in-memory state
1956 0 : let _tenant_lock = trace_exclusive_lock(
1957 0 : &self.tenant_op_locks,
1958 0 : tenant_shard_id.tenant_id,
1959 0 : TenantOperations::LocationConfig,
1960 0 : )
1961 0 : .await;
1962 :
1963 0 : if !tenant_shard_id.is_unsharded() {
1964 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
1965 0 : "This API is for importing single-sharded or unsharded tenants"
1966 0 : )));
1967 0 : }
1968 0 :
1969 0 : // First check if this is a creation or an update
1970 0 : let create_or_update = self.tenant_location_config_prepare(tenant_shard_id.tenant_id, req);
1971 0 :
1972 0 : let mut result = TenantLocationConfigResponse {
1973 0 : shards: Vec::new(),
1974 0 : stripe_size: None,
1975 0 : };
1976 0 : let waiters = match create_or_update {
1977 0 : TenantCreateOrUpdate::Create(create_req) => {
1978 0 : let (create_resp, waiters) = self.do_tenant_create(create_req).await?;
1979 0 : result.shards = create_resp
1980 0 : .shards
1981 0 : .into_iter()
1982 0 : .map(|s| TenantShardLocation {
1983 0 : node_id: s.node_id,
1984 0 : shard_id: s.shard_id,
1985 0 : })
1986 0 : .collect();
1987 0 : waiters
1988 : }
1989 0 : TenantCreateOrUpdate::Update(updates) => {
1990 0 : // Persist updates
1991 0 : // Ordering: write to the database before applying changes in-memory, so that
1992 0 : // we will not appear time-travel backwards on a restart.
1993 0 : let mut schedule_context = ScheduleContext::default();
1994 : for ShardUpdate {
1995 0 : tenant_shard_id,
1996 0 : placement_policy,
1997 0 : tenant_config,
1998 0 : generation,
1999 0 : } in &updates
2000 : {
2001 0 : self.persistence
2002 0 : .update_tenant_shard(
2003 0 : TenantFilter::Shard(*tenant_shard_id),
2004 0 : Some(placement_policy.clone()),
2005 0 : Some(tenant_config.clone()),
2006 0 : *generation,
2007 0 : None,
2008 0 : )
2009 0 : .await?;
2010 : }
2011 :
2012 : // Apply updates in-memory
2013 0 : let mut waiters = Vec::new();
2014 0 : {
2015 0 : let mut locked = self.inner.write().unwrap();
2016 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2017 :
2018 : for ShardUpdate {
2019 0 : tenant_shard_id,
2020 0 : placement_policy,
2021 0 : tenant_config,
2022 0 : generation: update_generation,
2023 0 : } in updates
2024 : {
2025 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
2026 0 : tracing::warn!("Shard {tenant_shard_id} removed while updating");
2027 0 : continue;
2028 : };
2029 :
2030 : // Update stripe size
2031 0 : if result.stripe_size.is_none() && shard.shard.count.count() > 1 {
2032 0 : result.stripe_size = Some(shard.shard.stripe_size);
2033 0 : }
2034 :
2035 0 : shard.policy = placement_policy;
2036 0 : shard.config = tenant_config;
2037 0 : if let Some(generation) = update_generation {
2038 0 : shard.generation = Some(generation);
2039 0 : }
2040 :
2041 0 : shard.schedule(scheduler, &mut schedule_context)?;
2042 :
2043 0 : let maybe_waiter = self.maybe_reconcile_shard(shard, nodes);
2044 0 : if let Some(waiter) = maybe_waiter {
2045 0 : waiters.push(waiter);
2046 0 : }
2047 :
2048 0 : if let Some(node_id) = shard.intent.get_attached() {
2049 0 : result.shards.push(TenantShardLocation {
2050 0 : shard_id: tenant_shard_id,
2051 0 : node_id: *node_id,
2052 0 : })
2053 0 : }
2054 : }
2055 : }
2056 0 : waiters
2057 : }
2058 : };
2059 :
2060 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
2061 : // Do not treat a reconcile error as fatal: we have already applied any requested
2062 : // Intent changes, and the reconcile can fail for external reasons like unavailable
2063 : // compute notification API. In these cases, it is important that we do not
2064 : // cause the cloud control plane to retry forever on this API.
2065 0 : tracing::warn!(
2066 0 : "Failed to reconcile after /location_config: {e}, returning success anyway"
2067 : );
2068 0 : }
2069 :
2070 : // Logging the full result is useful because it lets us cross-check what the cloud control
2071 : // plane's tenant_shards table should contain.
2072 0 : tracing::info!("Complete, returning {result:?}");
2073 :
2074 0 : Ok(result)
2075 0 : }
2076 :
2077 0 : pub(crate) async fn tenant_config_set(&self, req: TenantConfigRequest) -> Result<(), ApiError> {
2078 : // We require an exclusive lock, because we are updating persistent and in-memory state
2079 0 : let _tenant_lock = trace_exclusive_lock(
2080 0 : &self.tenant_op_locks,
2081 0 : req.tenant_id,
2082 0 : TenantOperations::ConfigSet,
2083 0 : )
2084 0 : .await;
2085 :
2086 0 : let tenant_id = req.tenant_id;
2087 0 : let config = req.config;
2088 0 :
2089 0 : self.persistence
2090 0 : .update_tenant_shard(
2091 0 : TenantFilter::Tenant(req.tenant_id),
2092 0 : None,
2093 0 : Some(config.clone()),
2094 0 : None,
2095 0 : None,
2096 0 : )
2097 0 : .await?;
2098 :
2099 0 : let waiters = {
2100 0 : let mut waiters = Vec::new();
2101 0 : let mut locked = self.inner.write().unwrap();
2102 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
2103 0 : for (_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2104 0 : shard.config = config.clone();
2105 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
2106 0 : waiters.push(waiter);
2107 0 : }
2108 : }
2109 0 : waiters
2110 : };
2111 :
2112 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
2113 : // Treat this as success because we have stored the configuration. If e.g.
2114 : // a node was unavailable at this time, it should not stop us accepting a
2115 : // configuration change.
2116 0 : tracing::warn!(%tenant_id, "Accepted configuration update but reconciliation failed: {e}");
2117 0 : }
2118 :
2119 0 : Ok(())
2120 0 : }
2121 :
2122 0 : pub(crate) fn tenant_config_get(
2123 0 : &self,
2124 0 : tenant_id: TenantId,
2125 0 : ) -> Result<HashMap<&str, serde_json::Value>, ApiError> {
2126 0 : let config = {
2127 0 : let locked = self.inner.read().unwrap();
2128 0 :
2129 0 : match locked
2130 0 : .tenants
2131 0 : .range(TenantShardId::tenant_range(tenant_id))
2132 0 : .next()
2133 : {
2134 0 : Some((_tenant_shard_id, shard)) => shard.config.clone(),
2135 : None => {
2136 0 : return Err(ApiError::NotFound(
2137 0 : anyhow::anyhow!("Tenant not found").into(),
2138 0 : ))
2139 : }
2140 : }
2141 : };
2142 :
2143 : // Unlike the pageserver, we do not have a set of global defaults: the config is
2144 : // entirely per-tenant. Therefore the distinction between `tenant_specific_overrides`
2145 : // and `effective_config` in the response is meaningless, but we retain that syntax
2146 : // in order to remain compatible with the pageserver API.
2147 :
2148 0 : let response = HashMap::from([
2149 : (
2150 : "tenant_specific_overrides",
2151 0 : serde_json::to_value(&config)
2152 0 : .context("serializing tenant specific overrides")
2153 0 : .map_err(ApiError::InternalServerError)?,
2154 : ),
2155 : (
2156 0 : "effective_config",
2157 0 : serde_json::to_value(&config)
2158 0 : .context("serializing effective config")
2159 0 : .map_err(ApiError::InternalServerError)?,
2160 : ),
2161 : ]);
2162 :
2163 0 : Ok(response)
2164 0 : }
2165 :
2166 0 : pub(crate) async fn tenant_time_travel_remote_storage(
2167 0 : &self,
2168 0 : time_travel_req: &TenantTimeTravelRequest,
2169 0 : tenant_id: TenantId,
2170 0 : timestamp: Cow<'_, str>,
2171 0 : done_if_after: Cow<'_, str>,
2172 0 : ) -> Result<(), ApiError> {
2173 0 : let _tenant_lock = trace_exclusive_lock(
2174 0 : &self.tenant_op_locks,
2175 0 : tenant_id,
2176 0 : TenantOperations::TimeTravelRemoteStorage,
2177 0 : )
2178 0 : .await;
2179 :
2180 0 : let node = {
2181 0 : let locked = self.inner.read().unwrap();
2182 : // Just a sanity check to prevent misuse: the API expects that the tenant is fully
2183 : // detached everywhere, and nothing writes to S3 storage. Here, we verify that,
2184 : // but only at the start of the process, so it's really just to prevent operator
2185 : // mistakes.
2186 0 : for (shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
2187 0 : if shard.intent.get_attached().is_some() || !shard.intent.get_secondary().is_empty()
2188 : {
2189 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2190 0 : "We want tenant to be attached in shard with tenant_shard_id={shard_id}"
2191 0 : )));
2192 0 : }
2193 0 : let maybe_attached = shard
2194 0 : .observed
2195 0 : .locations
2196 0 : .iter()
2197 0 : .filter_map(|(node_id, observed_location)| {
2198 0 : observed_location
2199 0 : .conf
2200 0 : .as_ref()
2201 0 : .map(|loc| (node_id, observed_location, loc.mode))
2202 0 : })
2203 0 : .find(|(_, _, mode)| *mode != LocationConfigMode::Detached);
2204 0 : if let Some((node_id, _observed_location, mode)) = maybe_attached {
2205 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!("We observed attached={mode:?} tenant in node_id={node_id} shard with tenant_shard_id={shard_id}")));
2206 0 : }
2207 : }
2208 0 : let scheduler = &locked.scheduler;
2209 : // Right now we only perform the operation on a single node without parallelization
2210 : // TODO fan out the operation to multiple nodes for better performance
2211 0 : let node_id = scheduler.schedule_shard(&[], &ScheduleContext::default())?;
2212 0 : let node = locked
2213 0 : .nodes
2214 0 : .get(&node_id)
2215 0 : .expect("Pageservers may not be deleted while lock is active");
2216 0 : node.clone()
2217 0 : };
2218 0 :
2219 0 : // The shard count is encoded in the remote storage's URL, so we need to handle all historically used shard counts
2220 0 : let mut counts = time_travel_req
2221 0 : .shard_counts
2222 0 : .iter()
2223 0 : .copied()
2224 0 : .collect::<HashSet<_>>()
2225 0 : .into_iter()
2226 0 : .collect::<Vec<_>>();
2227 0 : counts.sort_unstable();
2228 :
2229 0 : for count in counts {
2230 0 : let shard_ids = (0..count.count())
2231 0 : .map(|i| TenantShardId {
2232 0 : tenant_id,
2233 0 : shard_number: ShardNumber(i),
2234 0 : shard_count: count,
2235 0 : })
2236 0 : .collect::<Vec<_>>();
2237 0 : for tenant_shard_id in shard_ids {
2238 0 : let client = PageserverClient::new(
2239 0 : node.get_id(),
2240 0 : node.base_url(),
2241 0 : self.config.jwt_token.as_deref(),
2242 0 : );
2243 0 :
2244 0 : tracing::info!("Doing time travel recovery for shard {tenant_shard_id}",);
2245 :
2246 0 : client
2247 0 : .tenant_time_travel_remote_storage(
2248 0 : tenant_shard_id,
2249 0 : ×tamp,
2250 0 : &done_if_after,
2251 0 : )
2252 0 : .await
2253 0 : .map_err(|e| {
2254 0 : ApiError::InternalServerError(anyhow::anyhow!(
2255 0 : "Error doing time travel recovery for shard {tenant_shard_id} on node {}: {e}",
2256 0 : node
2257 0 : ))
2258 0 : })?;
2259 : }
2260 : }
2261 0 : Ok(())
2262 0 : }
2263 :
2264 0 : pub(crate) async fn tenant_secondary_download(
2265 0 : &self,
2266 0 : tenant_id: TenantId,
2267 0 : wait: Option<Duration>,
2268 0 : ) -> Result<(StatusCode, SecondaryProgress), ApiError> {
2269 0 : let _tenant_lock = trace_shared_lock(
2270 0 : &self.tenant_op_locks,
2271 0 : tenant_id,
2272 0 : TenantOperations::SecondaryDownload,
2273 0 : )
2274 0 : .await;
2275 :
2276 : // Acquire lock and yield the collection of shard-node tuples which we will send requests onward to
2277 0 : let targets = {
2278 0 : let locked = self.inner.read().unwrap();
2279 0 : let mut targets = Vec::new();
2280 :
2281 0 : for (tenant_shard_id, shard) in
2282 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2283 : {
2284 0 : for node_id in shard.intent.get_secondary() {
2285 0 : let node = locked
2286 0 : .nodes
2287 0 : .get(node_id)
2288 0 : .expect("Pageservers may not be deleted while referenced");
2289 0 :
2290 0 : targets.push((*tenant_shard_id, node.clone()));
2291 0 : }
2292 : }
2293 0 : targets
2294 0 : };
2295 0 :
2296 0 : // Issue concurrent requests to all shards' locations
2297 0 : let mut futs = FuturesUnordered::new();
2298 0 : for (tenant_shard_id, node) in targets {
2299 0 : let client = PageserverClient::new(
2300 0 : node.get_id(),
2301 0 : node.base_url(),
2302 0 : self.config.jwt_token.as_deref(),
2303 0 : );
2304 0 : futs.push(async move {
2305 0 : let result = client
2306 0 : .tenant_secondary_download(tenant_shard_id, wait)
2307 0 : .await;
2308 0 : (result, node, tenant_shard_id)
2309 0 : })
2310 : }
2311 :
2312 : // Handle any errors returned by pageservers. This includes cases like this request racing with
2313 : // a scheduling operation, such that the tenant shard we're calling doesn't exist on that pageserver any more, as
2314 : // well as more general cases like 503s, 500s, or timeouts.
2315 0 : let mut aggregate_progress = SecondaryProgress::default();
2316 0 : let mut aggregate_status: Option<StatusCode> = None;
2317 0 : let mut error: Option<mgmt_api::Error> = None;
2318 0 : while let Some((result, node, tenant_shard_id)) = futs.next().await {
2319 0 : match result {
2320 0 : Err(e) => {
2321 0 : // Secondary downloads are always advisory: if something fails, we nevertheless report success, so that whoever
2322 0 : // is calling us will proceed with whatever migration they're doing, albeit with a slightly less warm cache
2323 0 : // than they had hoped for.
2324 0 : tracing::warn!("Secondary download error from pageserver {node}: {e}",);
2325 0 : error = Some(e)
2326 : }
2327 0 : Ok((status_code, progress)) => {
2328 0 : tracing::info!(%tenant_shard_id, "Shard status={status_code} progress: {progress:?}");
2329 0 : aggregate_progress.layers_downloaded += progress.layers_downloaded;
2330 0 : aggregate_progress.layers_total += progress.layers_total;
2331 0 : aggregate_progress.bytes_downloaded += progress.bytes_downloaded;
2332 0 : aggregate_progress.bytes_total += progress.bytes_total;
2333 0 : aggregate_progress.heatmap_mtime =
2334 0 : std::cmp::max(aggregate_progress.heatmap_mtime, progress.heatmap_mtime);
2335 0 : aggregate_status = match aggregate_status {
2336 0 : None => Some(status_code),
2337 0 : Some(StatusCode::OK) => Some(status_code),
2338 0 : Some(cur) => {
2339 0 : // Other status codes (e.g. 202) -- do not overwrite.
2340 0 : Some(cur)
2341 : }
2342 : };
2343 : }
2344 : }
2345 : }
2346 :
2347 : // If any of the shards return 202, indicate our result as 202.
2348 0 : match aggregate_status {
2349 : None => {
2350 0 : match error {
2351 0 : Some(e) => {
2352 0 : // No successes, and an error: surface it
2353 0 : Err(ApiError::Conflict(format!("Error from pageserver: {e}")))
2354 : }
2355 : None => {
2356 : // No shards found
2357 0 : Err(ApiError::NotFound(
2358 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
2359 0 : ))
2360 : }
2361 : }
2362 : }
2363 0 : Some(aggregate_status) => Ok((aggregate_status, aggregate_progress)),
2364 : }
2365 0 : }
2366 :
2367 0 : pub(crate) async fn tenant_delete(&self, tenant_id: TenantId) -> Result<StatusCode, ApiError> {
2368 0 : let _tenant_lock =
2369 0 : trace_exclusive_lock(&self.tenant_op_locks, tenant_id, TenantOperations::Delete).await;
2370 :
2371 0 : self.ensure_attached_wait(tenant_id).await?;
2372 :
2373 : // TODO: refactor into helper
2374 0 : let targets = {
2375 0 : let locked = self.inner.read().unwrap();
2376 0 : let mut targets = Vec::new();
2377 :
2378 0 : for (tenant_shard_id, shard) in
2379 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2380 0 : {
2381 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2382 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2383 0 : })?;
2384 0 : let node = locked
2385 0 : .nodes
2386 0 : .get(&node_id)
2387 0 : .expect("Pageservers may not be deleted while referenced");
2388 0 :
2389 0 : targets.push((*tenant_shard_id, node.clone()));
2390 : }
2391 0 : targets
2392 0 : };
2393 0 :
2394 0 : // Phase 1: delete on the pageservers
2395 0 : let mut any_pending = false;
2396 0 : for (tenant_shard_id, node) in targets {
2397 0 : let client = PageserverClient::new(
2398 0 : node.get_id(),
2399 0 : node.base_url(),
2400 0 : self.config.jwt_token.as_deref(),
2401 0 : );
2402 : // TODO: this, like many other places, requires proper retry handling for 503, timeout: those should not
2403 : // surface immediately as an error to our caller.
2404 0 : let status = client.tenant_delete(tenant_shard_id).await.map_err(|e| {
2405 0 : ApiError::InternalServerError(anyhow::anyhow!(
2406 0 : "Error deleting shard {tenant_shard_id} on node {node}: {e}",
2407 0 : ))
2408 0 : })?;
2409 0 : tracing::info!(
2410 0 : "Shard {tenant_shard_id} on node {node}, delete returned {}",
2411 : status
2412 : );
2413 0 : if status == StatusCode::ACCEPTED {
2414 0 : any_pending = true;
2415 0 : }
2416 : }
2417 :
2418 0 : if any_pending {
2419 : // Caller should call us again later. When we eventually see 404s from
2420 : // all the shards, we may proceed to delete our records of the tenant.
2421 0 : tracing::info!(
2422 0 : "Tenant {} has some shards pending deletion, returning 202",
2423 : tenant_id
2424 : );
2425 0 : return Ok(StatusCode::ACCEPTED);
2426 0 : }
2427 0 :
2428 0 : // Fall through: deletion of the tenant on pageservers is complete, we may proceed to drop
2429 0 : // our in-memory state and database state.
2430 0 :
2431 0 : // Ordering: we delete persistent state first: if we then
2432 0 : // crash, we will drop the in-memory state.
2433 0 :
2434 0 : // Drop persistent state.
2435 0 : self.persistence.delete_tenant(tenant_id).await?;
2436 :
2437 : // Drop in-memory state
2438 : {
2439 0 : let mut locked = self.inner.write().unwrap();
2440 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
2441 :
2442 : // Dereference Scheduler from shards before dropping them
2443 0 : for (_tenant_shard_id, shard) in
2444 0 : tenants.range_mut(TenantShardId::tenant_range(tenant_id))
2445 0 : {
2446 0 : shard.intent.clear(scheduler);
2447 0 : }
2448 :
2449 0 : tenants.retain(|tenant_shard_id, _shard| tenant_shard_id.tenant_id != tenant_id);
2450 0 : tracing::info!(
2451 0 : "Deleted tenant {tenant_id}, now have {} tenants",
2452 0 : locked.tenants.len()
2453 : );
2454 : };
2455 :
2456 : // Success is represented as 404, to imitate the existing pageserver deletion API
2457 0 : Ok(StatusCode::NOT_FOUND)
2458 0 : }
2459 :
2460 : /// Naming: this configures the storage controller's policies for a tenant, whereas [`Self::tenant_config_set`] is "set the TenantConfig"
2461 : /// for a tenant. The TenantConfig is passed through to pageservers, whereas this function modifies
2462 : /// the tenant's policies (configuration) within the storage controller
2463 0 : pub(crate) async fn tenant_update_policy(
2464 0 : &self,
2465 0 : tenant_id: TenantId,
2466 0 : req: TenantPolicyRequest,
2467 0 : ) -> Result<(), ApiError> {
2468 : // We require an exclusive lock, because we are updating persistent and in-memory state
2469 0 : let _tenant_lock = trace_exclusive_lock(
2470 0 : &self.tenant_op_locks,
2471 0 : tenant_id,
2472 0 : TenantOperations::UpdatePolicy,
2473 0 : )
2474 0 : .await;
2475 :
2476 0 : failpoint_support::sleep_millis_async!("tenant-update-policy-exclusive-lock");
2477 :
2478 : let TenantPolicyRequest {
2479 0 : placement,
2480 0 : scheduling,
2481 0 : } = req;
2482 0 :
2483 0 : self.persistence
2484 0 : .update_tenant_shard(
2485 0 : TenantFilter::Tenant(tenant_id),
2486 0 : placement.clone(),
2487 0 : None,
2488 0 : None,
2489 0 : scheduling,
2490 0 : )
2491 0 : .await?;
2492 :
2493 0 : let mut schedule_context = ScheduleContext::default();
2494 0 : let mut locked = self.inner.write().unwrap();
2495 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
2496 0 : for (shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
2497 0 : if let Some(placement) = &placement {
2498 0 : shard.policy = placement.clone();
2499 0 :
2500 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2501 0 : "Updated placement policy to {placement:?}");
2502 0 : }
2503 :
2504 0 : if let Some(scheduling) = &scheduling {
2505 0 : shard.set_scheduling_policy(*scheduling);
2506 0 :
2507 0 : tracing::info!(tenant_id=%shard_id.tenant_id, shard_id=%shard_id.shard_slug(),
2508 0 : "Updated scheduling policy to {scheduling:?}");
2509 0 : }
2510 :
2511 : // In case scheduling is being switched back on, try it now.
2512 0 : shard.schedule(scheduler, &mut schedule_context).ok();
2513 0 : self.maybe_reconcile_shard(shard, nodes);
2514 : }
2515 :
2516 0 : Ok(())
2517 0 : }
2518 :
2519 0 : pub(crate) async fn tenant_timeline_create(
2520 0 : &self,
2521 0 : tenant_id: TenantId,
2522 0 : mut create_req: TimelineCreateRequest,
2523 0 : ) -> Result<TimelineInfo, ApiError> {
2524 0 : tracing::info!(
2525 0 : "Creating timeline {}/{}",
2526 : tenant_id,
2527 : create_req.new_timeline_id,
2528 : );
2529 :
2530 0 : let _tenant_lock = trace_shared_lock(
2531 0 : &self.tenant_op_locks,
2532 0 : tenant_id,
2533 0 : TenantOperations::TimelineCreate,
2534 0 : )
2535 0 : .await;
2536 :
2537 0 : self.ensure_attached_wait(tenant_id).await?;
2538 :
2539 0 : let mut targets = {
2540 0 : let locked = self.inner.read().unwrap();
2541 0 : let mut targets = Vec::new();
2542 :
2543 0 : for (tenant_shard_id, shard) in
2544 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2545 0 : {
2546 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2547 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2548 0 : })?;
2549 0 : let node = locked
2550 0 : .nodes
2551 0 : .get(&node_id)
2552 0 : .expect("Pageservers may not be deleted while referenced");
2553 0 :
2554 0 : targets.push((*tenant_shard_id, node.clone()));
2555 : }
2556 0 : targets
2557 0 : };
2558 0 :
2559 0 : if targets.is_empty() {
2560 0 : return Err(ApiError::NotFound(
2561 0 : anyhow::anyhow!("Tenant not found").into(),
2562 0 : ));
2563 0 : };
2564 0 : let shard_zero = targets.remove(0);
2565 :
2566 0 : async fn create_one(
2567 0 : tenant_shard_id: TenantShardId,
2568 0 : node: Node,
2569 0 : jwt: Option<String>,
2570 0 : create_req: TimelineCreateRequest,
2571 0 : ) -> Result<TimelineInfo, ApiError> {
2572 0 : tracing::info!(
2573 0 : "Creating timeline on shard {}/{}, attached to node {node}",
2574 : tenant_shard_id,
2575 : create_req.new_timeline_id,
2576 : );
2577 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
2578 0 :
2579 0 : client
2580 0 : .timeline_create(tenant_shard_id, &create_req)
2581 0 : .await
2582 0 : .map_err(|e| passthrough_api_error(&node, e))
2583 0 : }
2584 :
2585 : // Because the caller might not provide an explicit LSN, we must do the creation first on a single shard, and then
2586 : // use whatever LSN that shard picked when creating on subsequent shards. We arbitrarily use shard zero as the shard
2587 : // that will get the first creation request, and propagate the LSN to all the >0 shards.
2588 0 : let timeline_info = create_one(
2589 0 : shard_zero.0,
2590 0 : shard_zero.1,
2591 0 : self.config.jwt_token.clone(),
2592 0 : create_req.clone(),
2593 0 : )
2594 0 : .await?;
2595 :
2596 : // Propagate the LSN that shard zero picked, if caller didn't provide one
2597 0 : if create_req.ancestor_timeline_id.is_some() && create_req.ancestor_start_lsn.is_none() {
2598 0 : create_req.ancestor_start_lsn = timeline_info.ancestor_lsn;
2599 0 : }
2600 :
2601 : // Create timeline on remaining shards with number >0
2602 0 : if !targets.is_empty() {
2603 : // If we had multiple shards, issue requests for the remainder now.
2604 0 : let jwt = self.config.jwt_token.clone();
2605 0 : self.tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
2606 0 : let create_req = create_req.clone();
2607 0 : Box::pin(create_one(tenant_shard_id, node, jwt.clone(), create_req))
2608 0 : })
2609 0 : .await?;
2610 0 : }
2611 :
2612 0 : Ok(timeline_info)
2613 0 : }
2614 :
2615 : /// Helper for concurrently calling a pageserver API on a number of shards, such as timeline creation.
2616 : ///
2617 : /// On success, the returned vector contains exactly the same number of elements as the input `locations`.
2618 0 : async fn tenant_for_shards<F, R>(
2619 0 : &self,
2620 0 : locations: Vec<(TenantShardId, Node)>,
2621 0 : mut req_fn: F,
2622 0 : ) -> Result<Vec<R>, ApiError>
2623 0 : where
2624 0 : F: FnMut(
2625 0 : TenantShardId,
2626 0 : Node,
2627 0 : )
2628 0 : -> std::pin::Pin<Box<dyn futures::Future<Output = Result<R, ApiError>> + Send>>,
2629 0 : {
2630 0 : let mut futs = FuturesUnordered::new();
2631 0 : let mut results = Vec::with_capacity(locations.len());
2632 :
2633 0 : for (tenant_shard_id, node) in locations {
2634 0 : futs.push(req_fn(tenant_shard_id, node));
2635 0 : }
2636 :
2637 0 : while let Some(r) = futs.next().await {
2638 0 : results.push(r?);
2639 : }
2640 :
2641 0 : Ok(results)
2642 0 : }
2643 :
2644 : /// Concurrently invoke a pageserver API call on many shards at once
2645 0 : pub(crate) async fn tenant_for_shards_api<T, O, F>(
2646 0 : &self,
2647 0 : locations: Vec<(TenantShardId, Node)>,
2648 0 : op: O,
2649 0 : warn_threshold: u32,
2650 0 : max_retries: u32,
2651 0 : timeout: Duration,
2652 0 : cancel: &CancellationToken,
2653 0 : ) -> Vec<mgmt_api::Result<T>>
2654 0 : where
2655 0 : O: Fn(TenantShardId, PageserverClient) -> F + Copy,
2656 0 : F: std::future::Future<Output = mgmt_api::Result<T>>,
2657 0 : {
2658 0 : let mut futs = FuturesUnordered::new();
2659 0 : let mut results = Vec::with_capacity(locations.len());
2660 :
2661 0 : for (tenant_shard_id, node) in locations {
2662 0 : futs.push(async move {
2663 0 : node.with_client_retries(
2664 0 : |client| op(tenant_shard_id, client),
2665 0 : &self.config.jwt_token,
2666 0 : warn_threshold,
2667 0 : max_retries,
2668 0 : timeout,
2669 0 : cancel,
2670 0 : )
2671 0 : .await
2672 0 : });
2673 0 : }
2674 :
2675 0 : while let Some(r) = futs.next().await {
2676 0 : let r = r.unwrap_or(Err(mgmt_api::Error::Cancelled));
2677 0 : results.push(r);
2678 0 : }
2679 :
2680 0 : results
2681 0 : }
2682 :
2683 0 : pub(crate) async fn tenant_timeline_delete(
2684 0 : &self,
2685 0 : tenant_id: TenantId,
2686 0 : timeline_id: TimelineId,
2687 0 : ) -> Result<StatusCode, ApiError> {
2688 0 : tracing::info!("Deleting timeline {}/{}", tenant_id, timeline_id,);
2689 0 : let _tenant_lock = trace_shared_lock(
2690 0 : &self.tenant_op_locks,
2691 0 : tenant_id,
2692 0 : TenantOperations::TimelineDelete,
2693 0 : )
2694 0 : .await;
2695 :
2696 0 : self.ensure_attached_wait(tenant_id).await?;
2697 :
2698 0 : let mut targets = {
2699 0 : let locked = self.inner.read().unwrap();
2700 0 : let mut targets = Vec::new();
2701 :
2702 0 : for (tenant_shard_id, shard) in
2703 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2704 0 : {
2705 0 : let node_id = shard.intent.get_attached().ok_or_else(|| {
2706 0 : ApiError::InternalServerError(anyhow::anyhow!("Shard not scheduled"))
2707 0 : })?;
2708 0 : let node = locked
2709 0 : .nodes
2710 0 : .get(&node_id)
2711 0 : .expect("Pageservers may not be deleted while referenced");
2712 0 :
2713 0 : targets.push((*tenant_shard_id, node.clone()));
2714 : }
2715 0 : targets
2716 0 : };
2717 0 :
2718 0 : if targets.is_empty() {
2719 0 : return Err(ApiError::NotFound(
2720 0 : anyhow::anyhow!("Tenant not found").into(),
2721 0 : ));
2722 0 : }
2723 0 : let shard_zero = targets.remove(0);
2724 :
2725 0 : async fn delete_one(
2726 0 : tenant_shard_id: TenantShardId,
2727 0 : timeline_id: TimelineId,
2728 0 : node: Node,
2729 0 : jwt: Option<String>,
2730 0 : ) -> Result<StatusCode, ApiError> {
2731 0 : tracing::info!(
2732 0 : "Deleting timeline on shard {tenant_shard_id}/{timeline_id}, attached to node {node}",
2733 : );
2734 :
2735 0 : let client = PageserverClient::new(node.get_id(), node.base_url(), jwt.as_deref());
2736 0 : client
2737 0 : .timeline_delete(tenant_shard_id, timeline_id)
2738 0 : .await
2739 0 : .map_err(|e| {
2740 0 : ApiError::InternalServerError(anyhow::anyhow!(
2741 0 : "Error deleting timeline {timeline_id} on {tenant_shard_id} on node {node}: {e}",
2742 0 : ))
2743 0 : })
2744 0 : }
2745 :
2746 0 : let statuses = self
2747 0 : .tenant_for_shards(targets, |tenant_shard_id: TenantShardId, node: Node| {
2748 0 : Box::pin(delete_one(
2749 0 : tenant_shard_id,
2750 0 : timeline_id,
2751 0 : node,
2752 0 : self.config.jwt_token.clone(),
2753 0 : ))
2754 0 : })
2755 0 : .await?;
2756 :
2757 : // If any shards >0 haven't finished deletion yet, don't start deletion on shard zero
2758 0 : if statuses.iter().any(|s| s != &StatusCode::NOT_FOUND) {
2759 0 : return Ok(StatusCode::ACCEPTED);
2760 0 : }
2761 :
2762 : // Delete shard zero last: this is not strictly necessary, but since a caller's GET on a timeline will be routed
2763 : // to shard zero, it gives a more obvious behavior that a GET returns 404 once the deletion is done.
2764 0 : let shard_zero_status = delete_one(
2765 0 : shard_zero.0,
2766 0 : timeline_id,
2767 0 : shard_zero.1,
2768 0 : self.config.jwt_token.clone(),
2769 0 : )
2770 0 : .await?;
2771 :
2772 0 : Ok(shard_zero_status)
2773 0 : }
2774 :
2775 : /// When you need to send an HTTP request to the pageserver that holds shard0 of a tenant, this
2776 : /// function looks up and returns node. If the tenant isn't found, returns Err(ApiError::NotFound)
2777 0 : pub(crate) fn tenant_shard0_node(
2778 0 : &self,
2779 0 : tenant_id: TenantId,
2780 0 : ) -> Result<(Node, TenantShardId), ApiError> {
2781 0 : let locked = self.inner.read().unwrap();
2782 0 : let Some((tenant_shard_id, shard)) = locked
2783 0 : .tenants
2784 0 : .range(TenantShardId::tenant_range(tenant_id))
2785 0 : .next()
2786 : else {
2787 0 : return Err(ApiError::NotFound(
2788 0 : anyhow::anyhow!("Tenant {tenant_id} not found").into(),
2789 0 : ));
2790 : };
2791 :
2792 : // TODO: should use the ID last published to compute_hook, rather than the intent: the intent might
2793 : // point to somewhere we haven't attached yet.
2794 0 : let Some(node_id) = shard.intent.get_attached() else {
2795 0 : tracing::warn!(
2796 0 : tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
2797 0 : "Shard not scheduled (policy {:?}), cannot generate pass-through URL",
2798 : shard.policy
2799 : );
2800 0 : return Err(ApiError::Conflict(
2801 0 : "Cannot call timeline API on non-attached tenant".to_string(),
2802 0 : ));
2803 : };
2804 :
2805 0 : let Some(node) = locked.nodes.get(node_id) else {
2806 : // This should never happen
2807 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2808 0 : "Shard refers to nonexistent node"
2809 0 : )));
2810 : };
2811 :
2812 0 : Ok((node.clone(), *tenant_shard_id))
2813 0 : }
2814 :
2815 0 : pub(crate) fn tenant_locate(
2816 0 : &self,
2817 0 : tenant_id: TenantId,
2818 0 : ) -> Result<TenantLocateResponse, ApiError> {
2819 0 : let locked = self.inner.read().unwrap();
2820 0 : tracing::info!("Locating shards for tenant {tenant_id}");
2821 :
2822 0 : let mut result = Vec::new();
2823 0 : let mut shard_params: Option<ShardParameters> = None;
2824 :
2825 0 : for (tenant_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id))
2826 : {
2827 0 : let node_id =
2828 0 : shard
2829 0 : .intent
2830 0 : .get_attached()
2831 0 : .ok_or(ApiError::BadRequest(anyhow::anyhow!(
2832 0 : "Cannot locate a tenant that is not attached"
2833 0 : )))?;
2834 :
2835 0 : let node = locked
2836 0 : .nodes
2837 0 : .get(&node_id)
2838 0 : .expect("Pageservers may not be deleted while referenced");
2839 0 :
2840 0 : result.push(node.shard_location(*tenant_shard_id));
2841 0 :
2842 0 : match &shard_params {
2843 0 : None => {
2844 0 : shard_params = Some(ShardParameters {
2845 0 : stripe_size: shard.shard.stripe_size,
2846 0 : count: shard.shard.count,
2847 0 : });
2848 0 : }
2849 0 : Some(params) => {
2850 0 : if params.stripe_size != shard.shard.stripe_size {
2851 : // This should never happen. We enforce at runtime because it's simpler than
2852 : // adding an extra per-tenant data structure to store the things that should be the same
2853 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
2854 0 : "Inconsistent shard stripe size parameters!"
2855 0 : )));
2856 0 : }
2857 : }
2858 : }
2859 : }
2860 :
2861 0 : if result.is_empty() {
2862 0 : return Err(ApiError::NotFound(
2863 0 : anyhow::anyhow!("No shards for this tenant ID found").into(),
2864 0 : ));
2865 0 : }
2866 0 : let shard_params = shard_params.expect("result is non-empty, therefore this is set");
2867 0 : tracing::info!(
2868 0 : "Located tenant {} with params {:?} on shards {}",
2869 0 : tenant_id,
2870 0 : shard_params,
2871 0 : result
2872 0 : .iter()
2873 0 : .map(|s| format!("{:?}", s))
2874 0 : .collect::<Vec<_>>()
2875 0 : .join(",")
2876 : );
2877 :
2878 0 : Ok(TenantLocateResponse {
2879 0 : shards: result,
2880 0 : shard_params,
2881 0 : })
2882 0 : }
2883 :
2884 : /// Returns None if the input iterator of shards does not include a shard with number=0
2885 0 : fn tenant_describe_impl<'a>(
2886 0 : &self,
2887 0 : shards: impl Iterator<Item = &'a TenantShard>,
2888 0 : ) -> Option<TenantDescribeResponse> {
2889 0 : let mut shard_zero = None;
2890 0 : let mut describe_shards = Vec::new();
2891 :
2892 0 : for shard in shards {
2893 0 : if shard.tenant_shard_id.is_shard_zero() {
2894 0 : shard_zero = Some(shard);
2895 0 : }
2896 :
2897 0 : describe_shards.push(TenantDescribeResponseShard {
2898 0 : tenant_shard_id: shard.tenant_shard_id,
2899 0 : node_attached: *shard.intent.get_attached(),
2900 0 : node_secondary: shard.intent.get_secondary().to_vec(),
2901 0 : last_error: shard
2902 0 : .last_error
2903 0 : .lock()
2904 0 : .unwrap()
2905 0 : .as_ref()
2906 0 : .map(|e| format!("{e}"))
2907 0 : .unwrap_or("".to_string())
2908 0 : .clone(),
2909 0 : is_reconciling: shard.reconciler.is_some(),
2910 0 : is_pending_compute_notification: shard.pending_compute_notification,
2911 0 : is_splitting: matches!(shard.splitting, SplitState::Splitting),
2912 0 : scheduling_policy: *shard.get_scheduling_policy(),
2913 : })
2914 : }
2915 :
2916 0 : let shard_zero = shard_zero?;
2917 :
2918 0 : Some(TenantDescribeResponse {
2919 0 : tenant_id: shard_zero.tenant_shard_id.tenant_id,
2920 0 : shards: describe_shards,
2921 0 : stripe_size: shard_zero.shard.stripe_size,
2922 0 : policy: shard_zero.policy.clone(),
2923 0 : config: shard_zero.config.clone(),
2924 0 : })
2925 0 : }
2926 :
2927 0 : pub(crate) fn tenant_describe(
2928 0 : &self,
2929 0 : tenant_id: TenantId,
2930 0 : ) -> Result<TenantDescribeResponse, ApiError> {
2931 0 : let locked = self.inner.read().unwrap();
2932 0 :
2933 0 : self.tenant_describe_impl(
2934 0 : locked
2935 0 : .tenants
2936 0 : .range(TenantShardId::tenant_range(tenant_id))
2937 0 : .map(|(_k, v)| v),
2938 0 : )
2939 0 : .ok_or_else(|| ApiError::NotFound(anyhow::anyhow!("Tenant {tenant_id} not found").into()))
2940 0 : }
2941 :
2942 0 : pub(crate) fn tenant_list(&self) -> Vec<TenantDescribeResponse> {
2943 0 : let locked = self.inner.read().unwrap();
2944 0 :
2945 0 : let mut result = Vec::new();
2946 0 : for (_tenant_id, tenant_shards) in
2947 0 : &locked.tenants.iter().group_by(|(id, _shard)| id.tenant_id)
2948 0 : {
2949 0 : result.push(
2950 0 : self.tenant_describe_impl(tenant_shards.map(|(_k, v)| v))
2951 0 : .expect("Groups are always non-empty"),
2952 0 : );
2953 0 : }
2954 :
2955 0 : result
2956 0 : }
2957 :
2958 0 : #[instrument(skip_all, fields(tenant_id=%op.tenant_id))]
2959 : async fn abort_tenant_shard_split(
2960 : &self,
2961 : op: &TenantShardSplitAbort,
2962 : ) -> Result<(), TenantShardSplitAbortError> {
2963 : // Cleaning up a split:
2964 : // - Parent shards are not destroyed during a split, just detached.
2965 : // - Failed pageserver split API calls can leave the remote node with just the parent attached,
2966 : // just the children attached, or both.
2967 : //
2968 : // Therefore our work to do is to:
2969 : // 1. Clean up storage controller's internal state to just refer to parents, no children
2970 : // 2. Call out to pageservers to ensure that children are detached
2971 : // 3. Call out to pageservers to ensure that parents are attached.
2972 : //
2973 : // Crash safety:
2974 : // - If the storage controller stops running during this cleanup *after* clearing the splitting state
2975 : // from our database, then [`Self::startup_reconcile`] will regard child attachments as garbage
2976 : // and detach them.
2977 : // - TODO: If the storage controller stops running during this cleanup *before* clearing the splitting state
2978 : // from our database, then we will re-enter this cleanup routine on startup.
2979 :
2980 : let TenantShardSplitAbort {
2981 : tenant_id,
2982 : new_shard_count,
2983 : new_stripe_size,
2984 : ..
2985 : } = op;
2986 :
2987 : // First abort persistent state, if any exists.
2988 : match self
2989 : .persistence
2990 : .abort_shard_split(*tenant_id, *new_shard_count)
2991 : .await?
2992 : {
2993 : AbortShardSplitStatus::Aborted => {
2994 : // Proceed to roll back any child shards created on pageservers
2995 : }
2996 : AbortShardSplitStatus::Complete => {
2997 : // The split completed (we might hit that path if e.g. our database transaction
2998 : // to write the completion landed in the database, but we dropped connection
2999 : // before seeing the result).
3000 : //
3001 : // We must update in-memory state to reflect the successful split.
3002 : self.tenant_shard_split_commit_inmem(
3003 : *tenant_id,
3004 : *new_shard_count,
3005 : *new_stripe_size,
3006 : );
3007 : return Ok(());
3008 : }
3009 : }
3010 :
3011 : // Clean up in-memory state, and accumulate the list of child locations that need detaching
3012 : let detach_locations: Vec<(Node, TenantShardId)> = {
3013 : let mut detach_locations = Vec::new();
3014 : let mut locked = self.inner.write().unwrap();
3015 : let (nodes, tenants, scheduler) = locked.parts_mut();
3016 :
3017 : for (tenant_shard_id, shard) in
3018 : tenants.range_mut(TenantShardId::tenant_range(op.tenant_id))
3019 : {
3020 : if shard.shard.count == op.new_shard_count {
3021 : // Surprising: the phase of [`Self::do_tenant_shard_split`] which inserts child shards in-memory
3022 : // is infallible, so if we got an error we shouldn't have got that far.
3023 : tracing::warn!(
3024 : "During split abort, child shard {tenant_shard_id} found in-memory"
3025 : );
3026 : continue;
3027 : }
3028 :
3029 : // Add the children of this shard to this list of things to detach
3030 : if let Some(node_id) = shard.intent.get_attached() {
3031 : for child_id in tenant_shard_id.split(*new_shard_count) {
3032 : detach_locations.push((
3033 : nodes
3034 : .get(node_id)
3035 : .expect("Intent references nonexistent node")
3036 : .clone(),
3037 : child_id,
3038 : ));
3039 : }
3040 : } else {
3041 : tracing::warn!(
3042 : "During split abort, shard {tenant_shard_id} has no attached location"
3043 : );
3044 : }
3045 :
3046 : tracing::info!("Restoring parent shard {tenant_shard_id}");
3047 : shard.splitting = SplitState::Idle;
3048 : if let Err(e) = shard.schedule(scheduler, &mut ScheduleContext::default()) {
3049 : // If this shard can't be scheduled now (perhaps due to offline nodes or
3050 : // capacity issues), that must not prevent us rolling back a split. In this
3051 : // case it should be eventually scheduled in the background.
3052 : tracing::warn!("Failed to schedule {tenant_shard_id} during shard abort: {e}")
3053 : }
3054 :
3055 : self.maybe_reconcile_shard(shard, nodes);
3056 : }
3057 :
3058 : // We don't expect any new_shard_count shards to exist here, but drop them just in case
3059 0 : tenants.retain(|_id, s| s.shard.count != *new_shard_count);
3060 :
3061 : detach_locations
3062 : };
3063 :
3064 : for (node, child_id) in detach_locations {
3065 : if !node.is_available() {
3066 : // An unavailable node cannot be cleaned up now: to avoid blocking forever, we will permit this, and
3067 : // rely on the reconciliation that happens when a node transitions to Active to clean up. Since we have
3068 : // removed child shards from our in-memory state and database, the reconciliation will implicitly remove
3069 : // them from the node.
3070 : tracing::warn!("Node {node} unavailable, can't clean up during split abort. It will be cleaned up when it is reactivated.");
3071 : continue;
3072 : }
3073 :
3074 : // Detach the remote child. If the pageserver split API call is still in progress, this call will get
3075 : // a 503 and retry, up to our limit.
3076 : tracing::info!("Detaching {child_id} on {node}...");
3077 : match node
3078 : .with_client_retries(
3079 0 : |client| async move {
3080 0 : let config = LocationConfig {
3081 0 : mode: LocationConfigMode::Detached,
3082 0 : generation: None,
3083 0 : secondary_conf: None,
3084 0 : shard_number: child_id.shard_number.0,
3085 0 : shard_count: child_id.shard_count.literal(),
3086 0 : // Stripe size and tenant config don't matter when detaching
3087 0 : shard_stripe_size: 0,
3088 0 : tenant_conf: TenantConfig::default(),
3089 0 : };
3090 0 :
3091 0 : client.location_config(child_id, config, None, false).await
3092 0 : },
3093 : &self.config.jwt_token,
3094 : 1,
3095 : 10,
3096 : Duration::from_secs(5),
3097 : &self.cancel,
3098 : )
3099 : .await
3100 : {
3101 : Some(Ok(_)) => {}
3102 : Some(Err(e)) => {
3103 : // We failed to communicate with the remote node. This is problematic: we may be
3104 : // leaving it with a rogue child shard.
3105 : tracing::warn!(
3106 : "Failed to detach child {child_id} from node {node} during abort"
3107 : );
3108 : return Err(e.into());
3109 : }
3110 : None => {
3111 : // Cancellation: we were shutdown or the node went offline. Shutdown is fine, we'll
3112 : // clean up on restart. The node going offline requires a retry.
3113 : return Err(TenantShardSplitAbortError::Unavailable);
3114 : }
3115 : };
3116 : }
3117 :
3118 : tracing::info!("Successfully aborted split");
3119 : Ok(())
3120 : }
3121 :
3122 : /// Infallible final stage of [`Self::tenant_shard_split`]: update the contents
3123 : /// of the tenant map to reflect the child shards that exist after the split.
3124 0 : fn tenant_shard_split_commit_inmem(
3125 0 : &self,
3126 0 : tenant_id: TenantId,
3127 0 : new_shard_count: ShardCount,
3128 0 : new_stripe_size: Option<ShardStripeSize>,
3129 0 : ) -> (
3130 0 : TenantShardSplitResponse,
3131 0 : Vec<(TenantShardId, NodeId, ShardStripeSize)>,
3132 0 : Vec<ReconcilerWaiter>,
3133 0 : ) {
3134 0 : let mut response = TenantShardSplitResponse {
3135 0 : new_shards: Vec::new(),
3136 0 : };
3137 0 : let mut child_locations = Vec::new();
3138 0 : let mut waiters = Vec::new();
3139 0 :
3140 0 : {
3141 0 : let mut locked = self.inner.write().unwrap();
3142 0 :
3143 0 : let parent_ids = locked
3144 0 : .tenants
3145 0 : .range(TenantShardId::tenant_range(tenant_id))
3146 0 : .map(|(shard_id, _)| *shard_id)
3147 0 : .collect::<Vec<_>>();
3148 0 :
3149 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3150 0 : for parent_id in parent_ids {
3151 0 : let child_ids = parent_id.split(new_shard_count);
3152 :
3153 0 : let (pageserver, generation, policy, parent_ident, config) = {
3154 0 : let mut old_state = tenants
3155 0 : .remove(&parent_id)
3156 0 : .expect("It was present, we just split it");
3157 0 :
3158 0 : // A non-splitting state is impossible, because [`Self::tenant_shard_split`] holds
3159 0 : // a TenantId lock and passes it through to [`TenantShardSplitAbort`] in case of cleanup:
3160 0 : // nothing else can clear this.
3161 0 : assert!(matches!(old_state.splitting, SplitState::Splitting));
3162 :
3163 0 : let old_attached = old_state.intent.get_attached().unwrap();
3164 0 : old_state.intent.clear(scheduler);
3165 0 : let generation = old_state.generation.expect("Shard must have been attached");
3166 0 : (
3167 0 : old_attached,
3168 0 : generation,
3169 0 : old_state.policy,
3170 0 : old_state.shard,
3171 0 : old_state.config,
3172 0 : )
3173 0 : };
3174 0 :
3175 0 : let mut schedule_context = ScheduleContext::default();
3176 0 : for child in child_ids {
3177 0 : let mut child_shard = parent_ident;
3178 0 : child_shard.number = child.shard_number;
3179 0 : child_shard.count = child.shard_count;
3180 0 : if let Some(stripe_size) = new_stripe_size {
3181 0 : child_shard.stripe_size = stripe_size;
3182 0 : }
3183 :
3184 0 : let mut child_observed: HashMap<NodeId, ObservedStateLocation> = HashMap::new();
3185 0 : child_observed.insert(
3186 0 : pageserver,
3187 0 : ObservedStateLocation {
3188 0 : conf: Some(attached_location_conf(
3189 0 : generation,
3190 0 : &child_shard,
3191 0 : &config,
3192 0 : matches!(policy, PlacementPolicy::Attached(n) if n > 0),
3193 : )),
3194 : },
3195 : );
3196 :
3197 0 : let mut child_state = TenantShard::new(child, child_shard, policy.clone());
3198 0 : child_state.intent = IntentState::single(scheduler, Some(pageserver));
3199 0 : child_state.observed = ObservedState {
3200 0 : locations: child_observed,
3201 0 : };
3202 0 : child_state.generation = Some(generation);
3203 0 : child_state.config = config.clone();
3204 0 :
3205 0 : // The child's TenantShard::splitting is intentionally left at the default value of Idle,
3206 0 : // as at this point in the split process we have succeeded and this part is infallible:
3207 0 : // we will never need to do any special recovery from this state.
3208 0 :
3209 0 : child_locations.push((child, pageserver, child_shard.stripe_size));
3210 :
3211 0 : if let Err(e) = child_state.schedule(scheduler, &mut schedule_context) {
3212 : // This is not fatal, because we've implicitly already got an attached
3213 : // location for the child shard. Failure here just means we couldn't
3214 : // find a secondary (e.g. because cluster is overloaded).
3215 0 : tracing::warn!("Failed to schedule child shard {child}: {e}");
3216 0 : }
3217 : // In the background, attach secondary locations for the new shards
3218 0 : if let Some(waiter) = self.maybe_reconcile_shard(&mut child_state, nodes) {
3219 0 : waiters.push(waiter);
3220 0 : }
3221 :
3222 0 : tenants.insert(child, child_state);
3223 0 : response.new_shards.push(child);
3224 : }
3225 : }
3226 0 : (response, child_locations, waiters)
3227 0 : }
3228 0 : }
3229 :
3230 0 : async fn tenant_shard_split_start_secondaries(
3231 0 : &self,
3232 0 : tenant_id: TenantId,
3233 0 : waiters: Vec<ReconcilerWaiter>,
3234 0 : ) {
3235 : // Wait for initial reconcile of child shards, this creates the secondary locations
3236 0 : if let Err(e) = self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
3237 : // This is not a failure to split: it's some issue reconciling the new child shards, perhaps
3238 : // their secondaries couldn't be attached.
3239 0 : tracing::warn!("Failed to reconcile after split: {e}");
3240 0 : return;
3241 0 : }
3242 :
3243 : // Take the state lock to discover the attached & secondary intents for all shards
3244 0 : let (attached, secondary) = {
3245 0 : let locked = self.inner.read().unwrap();
3246 0 : let mut attached = Vec::new();
3247 0 : let mut secondary = Vec::new();
3248 :
3249 0 : for (tenant_shard_id, shard) in
3250 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3251 : {
3252 0 : let Some(node_id) = shard.intent.get_attached() else {
3253 : // Unexpected. Race with a PlacementPolicy change?
3254 0 : tracing::warn!(
3255 0 : "No attached node on {tenant_shard_id} immediately after shard split!"
3256 : );
3257 0 : continue;
3258 : };
3259 :
3260 0 : let Some(secondary_node_id) = shard.intent.get_secondary().first() else {
3261 : // No secondary location. Nothing for us to do.
3262 0 : continue;
3263 : };
3264 :
3265 0 : let attached_node = locked
3266 0 : .nodes
3267 0 : .get(node_id)
3268 0 : .expect("Pageservers may not be deleted while referenced");
3269 0 :
3270 0 : let secondary_node = locked
3271 0 : .nodes
3272 0 : .get(secondary_node_id)
3273 0 : .expect("Pageservers may not be deleted while referenced");
3274 0 :
3275 0 : attached.push((*tenant_shard_id, attached_node.clone()));
3276 0 : secondary.push((*tenant_shard_id, secondary_node.clone()));
3277 : }
3278 0 : (attached, secondary)
3279 0 : };
3280 0 :
3281 0 : if secondary.is_empty() {
3282 : // No secondary locations; nothing for us to do
3283 0 : return;
3284 0 : }
3285 :
3286 0 : for result in self
3287 0 : .tenant_for_shards_api(
3288 0 : attached,
3289 0 : |tenant_shard_id, client| async move {
3290 0 : client.tenant_heatmap_upload(tenant_shard_id).await
3291 0 : },
3292 0 : 1,
3293 0 : 1,
3294 0 : SHORT_RECONCILE_TIMEOUT,
3295 0 : &self.cancel,
3296 0 : )
3297 0 : .await
3298 : {
3299 0 : if let Err(e) = result {
3300 0 : tracing::warn!("Error calling heatmap upload after shard split: {e}");
3301 0 : return;
3302 0 : }
3303 : }
3304 :
3305 0 : for result in self
3306 0 : .tenant_for_shards_api(
3307 0 : secondary,
3308 0 : |tenant_shard_id, client| async move {
3309 0 : client
3310 0 : .tenant_secondary_download(tenant_shard_id, Some(Duration::ZERO))
3311 0 : .await
3312 0 : },
3313 0 : 1,
3314 0 : 1,
3315 0 : SHORT_RECONCILE_TIMEOUT,
3316 0 : &self.cancel,
3317 0 : )
3318 0 : .await
3319 : {
3320 0 : if let Err(e) = result {
3321 0 : tracing::warn!("Error calling secondary download after shard split: {e}");
3322 0 : return;
3323 0 : }
3324 : }
3325 0 : }
3326 :
3327 0 : pub(crate) async fn tenant_shard_split(
3328 0 : &self,
3329 0 : tenant_id: TenantId,
3330 0 : split_req: TenantShardSplitRequest,
3331 0 : ) -> Result<TenantShardSplitResponse, ApiError> {
3332 : // TODO: return 503 if we get stuck waiting for this lock
3333 : // (issue https://github.com/neondatabase/neon/issues/7108)
3334 0 : let _tenant_lock = trace_exclusive_lock(
3335 0 : &self.tenant_op_locks,
3336 0 : tenant_id,
3337 0 : TenantOperations::ShardSplit,
3338 0 : )
3339 0 : .await;
3340 :
3341 0 : let new_shard_count = ShardCount::new(split_req.new_shard_count);
3342 0 : let new_stripe_size = split_req.new_stripe_size;
3343 :
3344 : // Validate the request and construct parameters. This phase is fallible, but does not require
3345 : // rollback on errors, as it does no I/O and mutates no state.
3346 0 : let shard_split_params = match self.prepare_tenant_shard_split(tenant_id, split_req)? {
3347 0 : ShardSplitAction::NoOp(resp) => return Ok(resp),
3348 0 : ShardSplitAction::Split(params) => params,
3349 : };
3350 :
3351 : // Execute this split: this phase mutates state and does remote I/O on pageservers. If it fails,
3352 : // we must roll back.
3353 0 : let r = self
3354 0 : .do_tenant_shard_split(tenant_id, shard_split_params)
3355 0 : .await;
3356 :
3357 0 : let (response, waiters) = match r {
3358 0 : Ok(r) => r,
3359 0 : Err(e) => {
3360 0 : // Split might be part-done, we must do work to abort it.
3361 0 : tracing::warn!("Enqueuing background abort of split on {tenant_id}");
3362 0 : self.abort_tx
3363 0 : .send(TenantShardSplitAbort {
3364 0 : tenant_id,
3365 0 : new_shard_count,
3366 0 : new_stripe_size,
3367 0 : _tenant_lock,
3368 0 : })
3369 0 : // Ignore error sending: that just means we're shutting down: aborts are ephemeral so it's fine to drop it.
3370 0 : .ok();
3371 0 : return Err(e);
3372 : }
3373 : };
3374 :
3375 : // The split is now complete. As an optimization, we will trigger all the child shards to upload
3376 : // a heatmap immediately, and all their secondary locations to start downloading: this avoids waiting
3377 : // for the background heatmap/download interval before secondaries get warm enough to migrate shards
3378 : // in [`Self::optimize_all`]
3379 0 : self.tenant_shard_split_start_secondaries(tenant_id, waiters)
3380 0 : .await;
3381 0 : Ok(response)
3382 0 : }
3383 :
3384 0 : fn prepare_tenant_shard_split(
3385 0 : &self,
3386 0 : tenant_id: TenantId,
3387 0 : split_req: TenantShardSplitRequest,
3388 0 : ) -> Result<ShardSplitAction, ApiError> {
3389 0 : fail::fail_point!("shard-split-validation", |_| Err(ApiError::BadRequest(
3390 0 : anyhow::anyhow!("failpoint")
3391 0 : )));
3392 :
3393 0 : let mut policy = None;
3394 0 : let mut config = None;
3395 0 : let mut shard_ident = None;
3396 : // Validate input, and calculate which shards we will create
3397 0 : let (old_shard_count, targets) =
3398 : {
3399 0 : let locked = self.inner.read().unwrap();
3400 0 :
3401 0 : let pageservers = locked.nodes.clone();
3402 0 :
3403 0 : let mut targets = Vec::new();
3404 0 :
3405 0 : // In case this is a retry, count how many already-split shards we found
3406 0 : let mut children_found = Vec::new();
3407 0 : let mut old_shard_count = None;
3408 :
3409 0 : for (tenant_shard_id, shard) in
3410 0 : locked.tenants.range(TenantShardId::tenant_range(tenant_id))
3411 : {
3412 0 : match shard.shard.count.count().cmp(&split_req.new_shard_count) {
3413 : Ordering::Equal => {
3414 : // Already split this
3415 0 : children_found.push(*tenant_shard_id);
3416 0 : continue;
3417 : }
3418 : Ordering::Greater => {
3419 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3420 0 : "Requested count {} but already have shards at count {}",
3421 0 : split_req.new_shard_count,
3422 0 : shard.shard.count.count()
3423 0 : )));
3424 : }
3425 0 : Ordering::Less => {
3426 0 : // Fall through: this shard has lower count than requested,
3427 0 : // is a candidate for splitting.
3428 0 : }
3429 0 : }
3430 0 :
3431 0 : match old_shard_count {
3432 0 : None => old_shard_count = Some(shard.shard.count),
3433 0 : Some(old_shard_count) => {
3434 0 : if old_shard_count != shard.shard.count {
3435 : // We may hit this case if a caller asked for two splits to
3436 : // different sizes, before the first one is complete.
3437 : // e.g. 1->2, 2->4, where the 4 call comes while we have a mixture
3438 : // of shard_count=1 and shard_count=2 shards in the map.
3439 0 : return Err(ApiError::Conflict(
3440 0 : "Cannot split, currently mid-split".to_string(),
3441 0 : ));
3442 0 : }
3443 : }
3444 : }
3445 0 : if policy.is_none() {
3446 0 : policy = Some(shard.policy.clone());
3447 0 : }
3448 0 : if shard_ident.is_none() {
3449 0 : shard_ident = Some(shard.shard);
3450 0 : }
3451 0 : if config.is_none() {
3452 0 : config = Some(shard.config.clone());
3453 0 : }
3454 :
3455 0 : if tenant_shard_id.shard_count.count() == split_req.new_shard_count {
3456 0 : tracing::info!(
3457 0 : "Tenant shard {} already has shard count {}",
3458 : tenant_shard_id,
3459 : split_req.new_shard_count
3460 : );
3461 0 : continue;
3462 0 : }
3463 :
3464 0 : let node_id = shard.intent.get_attached().ok_or(ApiError::BadRequest(
3465 0 : anyhow::anyhow!("Cannot split a tenant that is not attached"),
3466 0 : ))?;
3467 :
3468 0 : let node = pageservers
3469 0 : .get(&node_id)
3470 0 : .expect("Pageservers may not be deleted while referenced");
3471 0 :
3472 0 : targets.push(ShardSplitTarget {
3473 0 : parent_id: *tenant_shard_id,
3474 0 : node: node.clone(),
3475 0 : child_ids: tenant_shard_id
3476 0 : .split(ShardCount::new(split_req.new_shard_count)),
3477 0 : });
3478 : }
3479 :
3480 0 : if targets.is_empty() {
3481 0 : if children_found.len() == split_req.new_shard_count as usize {
3482 0 : return Ok(ShardSplitAction::NoOp(TenantShardSplitResponse {
3483 0 : new_shards: children_found,
3484 0 : }));
3485 : } else {
3486 : // No shards found to split, and no existing children found: the
3487 : // tenant doesn't exist at all.
3488 0 : return Err(ApiError::NotFound(
3489 0 : anyhow::anyhow!("Tenant {} not found", tenant_id).into(),
3490 0 : ));
3491 : }
3492 0 : }
3493 0 :
3494 0 : (old_shard_count, targets)
3495 0 : };
3496 0 :
3497 0 : // unwrap safety: we would have returned above if we didn't find at least one shard to split
3498 0 : let old_shard_count = old_shard_count.unwrap();
3499 0 : let shard_ident = if let Some(new_stripe_size) = split_req.new_stripe_size {
3500 : // This ShardIdentity will be used as the template for all children, so this implicitly
3501 : // applies the new stripe size to the children.
3502 0 : let mut shard_ident = shard_ident.unwrap();
3503 0 : if shard_ident.count.count() > 1 && shard_ident.stripe_size != new_stripe_size {
3504 0 : return Err(ApiError::BadRequest(anyhow::anyhow!("Attempted to change stripe size ({:?}->{new_stripe_size:?}) on a tenant with multiple shards", shard_ident.stripe_size)));
3505 0 : }
3506 0 :
3507 0 : shard_ident.stripe_size = new_stripe_size;
3508 0 : tracing::info!("applied stripe size {}", shard_ident.stripe_size.0);
3509 0 : shard_ident
3510 : } else {
3511 0 : shard_ident.unwrap()
3512 : };
3513 0 : let policy = policy.unwrap();
3514 0 : let config = config.unwrap();
3515 0 :
3516 0 : Ok(ShardSplitAction::Split(ShardSplitParams {
3517 0 : old_shard_count,
3518 0 : new_shard_count: ShardCount::new(split_req.new_shard_count),
3519 0 : new_stripe_size: split_req.new_stripe_size,
3520 0 : targets,
3521 0 : policy,
3522 0 : config,
3523 0 : shard_ident,
3524 0 : }))
3525 0 : }
3526 :
3527 0 : async fn do_tenant_shard_split(
3528 0 : &self,
3529 0 : tenant_id: TenantId,
3530 0 : params: ShardSplitParams,
3531 0 : ) -> Result<(TenantShardSplitResponse, Vec<ReconcilerWaiter>), ApiError> {
3532 0 : // FIXME: we have dropped self.inner lock, and not yet written anything to the database: another
3533 0 : // request could occur here, deleting or mutating the tenant. begin_shard_split checks that the
3534 0 : // parent shards exist as expected, but it would be neater to do the above pre-checks within the
3535 0 : // same database transaction rather than pre-check in-memory and then maybe-fail the database write.
3536 0 : // (https://github.com/neondatabase/neon/issues/6676)
3537 0 :
3538 0 : let ShardSplitParams {
3539 0 : old_shard_count,
3540 0 : new_shard_count,
3541 0 : new_stripe_size,
3542 0 : mut targets,
3543 0 : policy,
3544 0 : config,
3545 0 : shard_ident,
3546 0 : } = params;
3547 :
3548 : // Drop any secondary locations: pageservers do not support splitting these, and in any case the
3549 : // end-state for a split tenant will usually be to have secondary locations on different nodes.
3550 : // The reconciliation calls in this block also implicitly cancel+barrier wrt any ongoing reconciliation
3551 : // at the time of split.
3552 0 : let waiters = {
3553 0 : let mut locked = self.inner.write().unwrap();
3554 0 : let mut waiters = Vec::new();
3555 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3556 0 : for target in &mut targets {
3557 0 : let Some(shard) = tenants.get_mut(&target.parent_id) else {
3558 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
3559 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3560 0 : "Shard {} not found",
3561 0 : target.parent_id
3562 0 : )));
3563 : };
3564 :
3565 0 : if shard.intent.get_attached() != &Some(target.node.get_id()) {
3566 : // Paranoia check: this shouldn't happen: we have the oplock for this tenant ID.
3567 0 : return Err(ApiError::Conflict(format!(
3568 0 : "Shard {} unexpectedly rescheduled during split",
3569 0 : target.parent_id
3570 0 : )));
3571 0 : }
3572 0 :
3573 0 : // Irrespective of PlacementPolicy, clear secondary locations from intent
3574 0 : shard.intent.clear_secondary(scheduler);
3575 :
3576 : // Run Reconciler to execute detach fo secondary locations.
3577 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
3578 0 : waiters.push(waiter);
3579 0 : }
3580 : }
3581 0 : waiters
3582 0 : };
3583 0 : self.await_waiters(waiters, RECONCILE_TIMEOUT).await?;
3584 :
3585 : // Before creating any new child shards in memory or on the pageservers, persist them: this
3586 : // enables us to ensure that we will always be able to clean up if something goes wrong. This also
3587 : // acts as the protection against two concurrent attempts to split: one of them will get a database
3588 : // error trying to insert the child shards.
3589 0 : let mut child_tsps = Vec::new();
3590 0 : for target in &targets {
3591 0 : let mut this_child_tsps = Vec::new();
3592 0 : for child in &target.child_ids {
3593 0 : let mut child_shard = shard_ident;
3594 0 : child_shard.number = child.shard_number;
3595 0 : child_shard.count = child.shard_count;
3596 0 :
3597 0 : tracing::info!(
3598 0 : "Create child shard persistence with stripe size {}",
3599 : shard_ident.stripe_size.0
3600 : );
3601 :
3602 0 : this_child_tsps.push(TenantShardPersistence {
3603 0 : tenant_id: child.tenant_id.to_string(),
3604 0 : shard_number: child.shard_number.0 as i32,
3605 0 : shard_count: child.shard_count.literal() as i32,
3606 0 : shard_stripe_size: shard_ident.stripe_size.0 as i32,
3607 0 : // Note: this generation is a placeholder, [`Persistence::begin_shard_split`] will
3608 0 : // populate the correct generation as part of its transaction, to protect us
3609 0 : // against racing with changes in the state of the parent.
3610 0 : generation: None,
3611 0 : generation_pageserver: Some(target.node.get_id().0 as i64),
3612 0 : placement_policy: serde_json::to_string(&policy).unwrap(),
3613 0 : config: serde_json::to_string(&config).unwrap(),
3614 0 : splitting: SplitState::Splitting,
3615 0 :
3616 0 : // Scheduling policies do not carry through to children
3617 0 : scheduling_policy: serde_json::to_string(&ShardSchedulingPolicy::default())
3618 0 : .unwrap(),
3619 0 : });
3620 : }
3621 :
3622 0 : child_tsps.push((target.parent_id, this_child_tsps));
3623 : }
3624 :
3625 0 : if let Err(e) = self
3626 0 : .persistence
3627 0 : .begin_shard_split(old_shard_count, tenant_id, child_tsps)
3628 0 : .await
3629 : {
3630 0 : match e {
3631 : DatabaseError::Query(diesel::result::Error::DatabaseError(
3632 : DatabaseErrorKind::UniqueViolation,
3633 : _,
3634 : )) => {
3635 : // Inserting a child shard violated a unique constraint: we raced with another call to
3636 : // this function
3637 0 : tracing::warn!("Conflicting attempt to split {tenant_id}: {e}");
3638 0 : return Err(ApiError::Conflict("Tenant is already splitting".into()));
3639 : }
3640 0 : _ => return Err(ApiError::InternalServerError(e.into())),
3641 : }
3642 0 : }
3643 0 : fail::fail_point!("shard-split-post-begin", |_| Err(
3644 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
3645 0 : ));
3646 :
3647 : // Now that I have persisted the splitting state, apply it in-memory. This is infallible, so
3648 : // callers may assume that if splitting is set in memory, then it was persisted, and if splitting
3649 : // is not set in memory, then it was not persisted.
3650 : {
3651 0 : let mut locked = self.inner.write().unwrap();
3652 0 : for target in &targets {
3653 0 : if let Some(parent_shard) = locked.tenants.get_mut(&target.parent_id) {
3654 0 : parent_shard.splitting = SplitState::Splitting;
3655 0 : // Put the observed state to None, to reflect that it is indeterminate once we start the
3656 0 : // split operation.
3657 0 : parent_shard
3658 0 : .observed
3659 0 : .locations
3660 0 : .insert(target.node.get_id(), ObservedStateLocation { conf: None });
3661 0 : }
3662 : }
3663 : }
3664 :
3665 : // TODO: issue split calls concurrently (this only matters once we're splitting
3666 : // N>1 shards into M shards -- initially we're usually splitting 1 shard into N).
3667 :
3668 0 : for target in &targets {
3669 : let ShardSplitTarget {
3670 0 : parent_id,
3671 0 : node,
3672 0 : child_ids,
3673 0 : } = target;
3674 0 : let client = PageserverClient::new(
3675 0 : node.get_id(),
3676 0 : node.base_url(),
3677 0 : self.config.jwt_token.as_deref(),
3678 0 : );
3679 0 : let response = client
3680 0 : .tenant_shard_split(
3681 0 : *parent_id,
3682 0 : TenantShardSplitRequest {
3683 0 : new_shard_count: new_shard_count.literal(),
3684 0 : new_stripe_size,
3685 0 : },
3686 0 : )
3687 0 : .await
3688 0 : .map_err(|e| ApiError::Conflict(format!("Failed to split {}: {}", parent_id, e)))?;
3689 :
3690 0 : fail::fail_point!("shard-split-post-remote", |_| Err(ApiError::Conflict(
3691 0 : "failpoint".to_string()
3692 0 : )));
3693 :
3694 0 : tracing::info!(
3695 0 : "Split {} into {}",
3696 0 : parent_id,
3697 0 : response
3698 0 : .new_shards
3699 0 : .iter()
3700 0 : .map(|s| format!("{:?}", s))
3701 0 : .collect::<Vec<_>>()
3702 0 : .join(",")
3703 : );
3704 :
3705 0 : if &response.new_shards != child_ids {
3706 : // This should never happen: the pageserver should agree with us on how shard splits work.
3707 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3708 0 : "Splitting shard {} resulted in unexpected IDs: {:?} (expected {:?})",
3709 0 : parent_id,
3710 0 : response.new_shards,
3711 0 : child_ids
3712 0 : )));
3713 0 : }
3714 : }
3715 :
3716 : // TODO: if the pageserver restarted concurrently with our split API call,
3717 : // the actual generation of the child shard might differ from the generation
3718 : // we expect it to have. In order for our in-database generation to end up
3719 : // correct, we should carry the child generation back in the response and apply it here
3720 : // in complete_shard_split (and apply the correct generation in memory)
3721 : // (or, we can carry generation in the request and reject the request if
3722 : // it doesn't match, but that requires more retry logic on this side)
3723 :
3724 0 : self.persistence
3725 0 : .complete_shard_split(tenant_id, old_shard_count)
3726 0 : .await?;
3727 :
3728 0 : fail::fail_point!("shard-split-post-complete", |_| Err(
3729 0 : ApiError::InternalServerError(anyhow::anyhow!("failpoint"))
3730 0 : ));
3731 :
3732 : // Replace all the shards we just split with their children: this phase is infallible.
3733 0 : let (response, child_locations, waiters) =
3734 0 : self.tenant_shard_split_commit_inmem(tenant_id, new_shard_count, new_stripe_size);
3735 0 :
3736 0 : // Send compute notifications for all the new shards
3737 0 : let mut failed_notifications = Vec::new();
3738 0 : for (child_id, child_ps, stripe_size) in child_locations {
3739 0 : if let Err(e) = self
3740 0 : .compute_hook
3741 0 : .notify(child_id, child_ps, stripe_size, &self.cancel)
3742 0 : .await
3743 : {
3744 0 : tracing::warn!("Failed to update compute of {}->{} during split, proceeding anyway to complete split ({e})",
3745 : child_id, child_ps);
3746 0 : failed_notifications.push(child_id);
3747 0 : }
3748 : }
3749 :
3750 : // If we failed any compute notifications, make a note to retry later.
3751 0 : if !failed_notifications.is_empty() {
3752 0 : let mut locked = self.inner.write().unwrap();
3753 0 : for failed in failed_notifications {
3754 0 : if let Some(shard) = locked.tenants.get_mut(&failed) {
3755 0 : shard.pending_compute_notification = true;
3756 0 : }
3757 : }
3758 0 : }
3759 :
3760 0 : Ok((response, waiters))
3761 0 : }
3762 :
3763 0 : pub(crate) async fn tenant_shard_migrate(
3764 0 : &self,
3765 0 : tenant_shard_id: TenantShardId,
3766 0 : migrate_req: TenantShardMigrateRequest,
3767 0 : ) -> Result<TenantShardMigrateResponse, ApiError> {
3768 0 : let waiter = {
3769 0 : let mut locked = self.inner.write().unwrap();
3770 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
3771 :
3772 0 : let Some(node) = nodes.get(&migrate_req.node_id) else {
3773 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3774 0 : "Node {} not found",
3775 0 : migrate_req.node_id
3776 0 : )));
3777 : };
3778 :
3779 0 : if !node.is_available() {
3780 : // Warn but proceed: the caller may intend to manually adjust the placement of
3781 : // a shard even if the node is down, e.g. if intervening during an incident.
3782 0 : tracing::warn!("Migrating to unavailable node {node}");
3783 0 : }
3784 :
3785 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
3786 0 : return Err(ApiError::NotFound(
3787 0 : anyhow::anyhow!("Tenant shard not found").into(),
3788 0 : ));
3789 : };
3790 :
3791 0 : if shard.intent.get_attached() == &Some(migrate_req.node_id) {
3792 : // No-op case: we will still proceed to wait for reconciliation in case it is
3793 : // incomplete from an earlier update to the intent.
3794 0 : tracing::info!("Migrating: intent is unchanged {:?}", shard.intent);
3795 : } else {
3796 0 : let old_attached = *shard.intent.get_attached();
3797 0 :
3798 0 : match shard.policy {
3799 0 : PlacementPolicy::Attached(n) => {
3800 0 : // If our new attached node was a secondary, it no longer should be.
3801 0 : shard.intent.remove_secondary(scheduler, migrate_req.node_id);
3802 :
3803 : // If we were already attached to something, demote that to a secondary
3804 0 : if let Some(old_attached) = old_attached {
3805 0 : if n > 0 {
3806 : // Remove other secondaries to make room for the location we'll demote
3807 0 : while shard.intent.get_secondary().len() >= n {
3808 0 : shard.intent.pop_secondary(scheduler);
3809 0 : }
3810 :
3811 0 : shard.intent.push_secondary(scheduler, old_attached);
3812 0 : }
3813 0 : }
3814 :
3815 0 : shard.intent.set_attached(scheduler, Some(migrate_req.node_id));
3816 : }
3817 0 : PlacementPolicy::Secondary => {
3818 0 : shard.intent.clear(scheduler);
3819 0 : shard.intent.push_secondary(scheduler, migrate_req.node_id);
3820 0 : }
3821 : PlacementPolicy::Detached => {
3822 0 : return Err(ApiError::BadRequest(anyhow::anyhow!(
3823 0 : "Cannot migrate a tenant that is PlacementPolicy::Detached: configure it to an attached policy first"
3824 0 : )))
3825 : }
3826 : }
3827 :
3828 0 : tracing::info!("Migrating: new intent {:?}", shard.intent);
3829 0 : shard.sequence = shard.sequence.next();
3830 : }
3831 :
3832 0 : self.maybe_reconcile_shard(shard, nodes)
3833 : };
3834 :
3835 0 : if let Some(waiter) = waiter {
3836 0 : waiter.wait_timeout(RECONCILE_TIMEOUT).await?;
3837 : } else {
3838 0 : tracing::info!("Migration is a no-op");
3839 : }
3840 :
3841 0 : Ok(TenantShardMigrateResponse {})
3842 0 : }
3843 :
3844 : /// This is for debug/support only: we simply drop all state for a tenant, without
3845 : /// detaching or deleting it on pageservers.
3846 0 : pub(crate) async fn tenant_drop(&self, tenant_id: TenantId) -> Result<(), ApiError> {
3847 0 : self.persistence.delete_tenant(tenant_id).await?;
3848 :
3849 0 : let mut locked = self.inner.write().unwrap();
3850 0 : let (_nodes, tenants, scheduler) = locked.parts_mut();
3851 0 : let mut shards = Vec::new();
3852 0 : for (tenant_shard_id, _) in tenants.range(TenantShardId::tenant_range(tenant_id)) {
3853 0 : shards.push(*tenant_shard_id);
3854 0 : }
3855 :
3856 0 : for shard_id in shards {
3857 0 : if let Some(mut shard) = tenants.remove(&shard_id) {
3858 0 : shard.intent.clear(scheduler);
3859 0 : }
3860 : }
3861 :
3862 0 : Ok(())
3863 0 : }
3864 :
3865 : /// This is for debug/support only: assuming tenant data is already present in S3, we "create" a
3866 : /// tenant with a very high generation number so that it will see the existing data.
3867 0 : pub(crate) async fn tenant_import(
3868 0 : &self,
3869 0 : tenant_id: TenantId,
3870 0 : ) -> Result<TenantCreateResponse, ApiError> {
3871 0 : // Pick an arbitrary available pageserver to use for scanning the tenant in remote storage
3872 0 : let maybe_node = {
3873 0 : self.inner
3874 0 : .read()
3875 0 : .unwrap()
3876 0 : .nodes
3877 0 : .values()
3878 0 : .find(|n| n.is_available())
3879 0 : .cloned()
3880 : };
3881 0 : let Some(node) = maybe_node else {
3882 0 : return Err(ApiError::BadRequest(anyhow::anyhow!("No nodes available")));
3883 : };
3884 :
3885 0 : let client = PageserverClient::new(
3886 0 : node.get_id(),
3887 0 : node.base_url(),
3888 0 : self.config.jwt_token.as_deref(),
3889 0 : );
3890 :
3891 0 : let scan_result = client
3892 0 : .tenant_scan_remote_storage(tenant_id)
3893 0 : .await
3894 0 : .map_err(|e| passthrough_api_error(&node, e))?;
3895 :
3896 : // A post-split tenant may contain a mixture of shard counts in remote storage: pick the highest count.
3897 0 : let Some(shard_count) = scan_result
3898 0 : .shards
3899 0 : .iter()
3900 0 : .map(|s| s.tenant_shard_id.shard_count)
3901 0 : .max()
3902 : else {
3903 0 : return Err(ApiError::NotFound(
3904 0 : anyhow::anyhow!("No shards found").into(),
3905 0 : ));
3906 : };
3907 :
3908 : // Ideally we would set each newly imported shard's generation independently, but for correctness it is sufficient
3909 : // to
3910 0 : let generation = scan_result
3911 0 : .shards
3912 0 : .iter()
3913 0 : .map(|s| s.generation)
3914 0 : .max()
3915 0 : .expect("We already validated >0 shards");
3916 0 :
3917 0 : // FIXME: we have no way to recover the shard stripe size from contents of remote storage: this will
3918 0 : // only work if they were using the default stripe size.
3919 0 : let stripe_size = ShardParameters::DEFAULT_STRIPE_SIZE;
3920 :
3921 0 : let (response, waiters) = self
3922 0 : .do_tenant_create(TenantCreateRequest {
3923 0 : new_tenant_id: TenantShardId::unsharded(tenant_id),
3924 0 : generation,
3925 0 :
3926 0 : shard_parameters: ShardParameters {
3927 0 : count: shard_count,
3928 0 : stripe_size,
3929 0 : },
3930 0 : placement_policy: Some(PlacementPolicy::Attached(0)), // No secondaries, for convenient debug/hacking
3931 0 :
3932 0 : // There is no way to know what the tenant's config was: revert to defaults
3933 0 : config: TenantConfig::default(),
3934 0 : })
3935 0 : .await?;
3936 :
3937 0 : if let Err(e) = self.await_waiters(waiters, SHORT_RECONCILE_TIMEOUT).await {
3938 : // Since this is a debug/support operation, all kinds of weird issues are possible (e.g. this
3939 : // tenant doesn't exist in the control plane), so don't fail the request if it can't fully
3940 : // reconcile, as reconciliation includes notifying compute.
3941 0 : tracing::warn!(%tenant_id, "Reconcile not done yet while importing tenant ({e})");
3942 0 : }
3943 :
3944 0 : Ok(response)
3945 0 : }
3946 :
3947 : /// For debug/support: a full JSON dump of TenantShards. Returns a response so that
3948 : /// we don't have to make TenantShard clonable in the return path.
3949 0 : pub(crate) fn tenants_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
3950 0 : let serialized = {
3951 0 : let locked = self.inner.read().unwrap();
3952 0 : let result = locked.tenants.values().collect::<Vec<_>>();
3953 0 : serde_json::to_string(&result).map_err(|e| ApiError::InternalServerError(e.into()))?
3954 : };
3955 :
3956 0 : hyper::Response::builder()
3957 0 : .status(hyper::StatusCode::OK)
3958 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
3959 0 : .body(hyper::Body::from(serialized))
3960 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
3961 0 : }
3962 :
3963 : /// Check the consistency of in-memory state vs. persistent state, and check that the
3964 : /// scheduler's statistics are up to date.
3965 : ///
3966 : /// These consistency checks expect an **idle** system. If changes are going on while
3967 : /// we run, then we can falsely indicate a consistency issue. This is sufficient for end-of-test
3968 : /// checks, but not suitable for running continuously in the background in the field.
3969 0 : pub(crate) async fn consistency_check(&self) -> Result<(), ApiError> {
3970 0 : let (mut expect_nodes, mut expect_shards) = {
3971 0 : let locked = self.inner.read().unwrap();
3972 0 :
3973 0 : locked
3974 0 : .scheduler
3975 0 : .consistency_check(locked.nodes.values(), locked.tenants.values())
3976 0 : .context("Scheduler checks")
3977 0 : .map_err(ApiError::InternalServerError)?;
3978 :
3979 0 : let expect_nodes = locked
3980 0 : .nodes
3981 0 : .values()
3982 0 : .map(|n| n.to_persistent())
3983 0 : .collect::<Vec<_>>();
3984 0 :
3985 0 : let expect_shards = locked
3986 0 : .tenants
3987 0 : .values()
3988 0 : .map(|t| t.to_persistent())
3989 0 : .collect::<Vec<_>>();
3990 :
3991 : // This method can only validate the state of an idle system: if a reconcile is in
3992 : // progress, fail out early to avoid giving false errors on state that won't match
3993 : // between database and memory under a ReconcileResult is processed.
3994 0 : for t in locked.tenants.values() {
3995 0 : if t.reconciler.is_some() {
3996 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
3997 0 : "Shard {} reconciliation in progress",
3998 0 : t.tenant_shard_id
3999 0 : )));
4000 0 : }
4001 : }
4002 :
4003 0 : (expect_nodes, expect_shards)
4004 : };
4005 :
4006 0 : let mut nodes = self.persistence.list_nodes().await?;
4007 0 : expect_nodes.sort_by_key(|n| n.node_id);
4008 0 : nodes.sort_by_key(|n| n.node_id);
4009 0 :
4010 0 : if nodes != expect_nodes {
4011 0 : tracing::error!("Consistency check failed on nodes.");
4012 0 : tracing::error!(
4013 0 : "Nodes in memory: {}",
4014 0 : serde_json::to_string(&expect_nodes)
4015 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4016 : );
4017 0 : tracing::error!(
4018 0 : "Nodes in database: {}",
4019 0 : serde_json::to_string(&nodes)
4020 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4021 : );
4022 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4023 0 : "Node consistency failure"
4024 0 : )));
4025 0 : }
4026 :
4027 0 : let mut shards = self.persistence.list_tenant_shards().await?;
4028 0 : shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
4029 0 : expect_shards.sort_by_key(|tsp| (tsp.tenant_id.clone(), tsp.shard_number, tsp.shard_count));
4030 0 :
4031 0 : if shards != expect_shards {
4032 0 : tracing::error!("Consistency check failed on shards.");
4033 0 : tracing::error!(
4034 0 : "Shards in memory: {}",
4035 0 : serde_json::to_string(&expect_shards)
4036 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4037 : );
4038 0 : tracing::error!(
4039 0 : "Shards in database: {}",
4040 0 : serde_json::to_string(&shards)
4041 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4042 : );
4043 0 : return Err(ApiError::InternalServerError(anyhow::anyhow!(
4044 0 : "Shard consistency failure"
4045 0 : )));
4046 0 : }
4047 0 :
4048 0 : Ok(())
4049 0 : }
4050 :
4051 : /// For debug/support: a JSON dump of the [`Scheduler`]. Returns a response so that
4052 : /// we don't have to make TenantShard clonable in the return path.
4053 0 : pub(crate) fn scheduler_dump(&self) -> Result<hyper::Response<hyper::Body>, ApiError> {
4054 0 : let serialized = {
4055 0 : let locked = self.inner.read().unwrap();
4056 0 : serde_json::to_string(&locked.scheduler)
4057 0 : .map_err(|e| ApiError::InternalServerError(e.into()))?
4058 : };
4059 :
4060 0 : hyper::Response::builder()
4061 0 : .status(hyper::StatusCode::OK)
4062 0 : .header(hyper::header::CONTENT_TYPE, "application/json")
4063 0 : .body(hyper::Body::from(serialized))
4064 0 : .map_err(|e| ApiError::InternalServerError(e.into()))
4065 0 : }
4066 :
4067 : /// This is for debug/support only: we simply drop all state for a tenant, without
4068 : /// detaching or deleting it on pageservers. We do not try and re-schedule any
4069 : /// tenants that were on this node.
4070 : ///
4071 : /// TODO: proper node deletion API that unhooks things more gracefully
4072 0 : pub(crate) async fn node_drop(&self, node_id: NodeId) -> Result<(), ApiError> {
4073 0 : self.persistence.delete_node(node_id).await?;
4074 :
4075 0 : let mut locked = self.inner.write().unwrap();
4076 :
4077 0 : for shard in locked.tenants.values_mut() {
4078 0 : shard.deref_node(node_id);
4079 0 : }
4080 :
4081 0 : let mut nodes = (*locked.nodes).clone();
4082 0 : nodes.remove(&node_id);
4083 0 : locked.nodes = Arc::new(nodes);
4084 0 :
4085 0 : locked.scheduler.node_remove(node_id);
4086 0 :
4087 0 : Ok(())
4088 0 : }
4089 :
4090 0 : pub(crate) async fn node_list(&self) -> Result<Vec<Node>, ApiError> {
4091 0 : let nodes = {
4092 0 : self.inner
4093 0 : .read()
4094 0 : .unwrap()
4095 0 : .nodes
4096 0 : .values()
4097 0 : .cloned()
4098 0 : .collect::<Vec<_>>()
4099 0 : };
4100 0 :
4101 0 : Ok(nodes)
4102 0 : }
4103 :
4104 0 : pub(crate) async fn node_register(
4105 0 : &self,
4106 0 : register_req: NodeRegisterRequest,
4107 0 : ) -> Result<(), ApiError> {
4108 0 : let _node_lock = trace_exclusive_lock(
4109 0 : &self.node_op_locks,
4110 0 : register_req.node_id,
4111 0 : NodeOperations::Register,
4112 0 : )
4113 0 : .await;
4114 :
4115 : {
4116 0 : let locked = self.inner.read().unwrap();
4117 0 : if let Some(node) = locked.nodes.get(®ister_req.node_id) {
4118 : // Note that we do not do a total equality of the struct, because we don't require
4119 : // the availability/scheduling states to agree for a POST to be idempotent.
4120 0 : if node.registration_match(®ister_req) {
4121 0 : tracing::info!(
4122 0 : "Node {} re-registered with matching address",
4123 : register_req.node_id
4124 : );
4125 0 : return Ok(());
4126 : } else {
4127 : // TODO: decide if we want to allow modifying node addresses without removing and re-adding
4128 : // the node. Safest/simplest thing is to refuse it, and usually we deploy with
4129 : // a fixed address through the lifetime of a node.
4130 0 : tracing::warn!(
4131 0 : "Node {} tried to register with different address",
4132 : register_req.node_id
4133 : );
4134 0 : return Err(ApiError::Conflict(
4135 0 : "Node is already registered with different address".to_string(),
4136 0 : ));
4137 : }
4138 0 : }
4139 0 : }
4140 0 :
4141 0 : // We do not require that a node is actually online when registered (it will start life
4142 0 : // with it's availability set to Offline), but we _do_ require that its DNS record exists. We're
4143 0 : // therefore not immune to asymmetric L3 connectivity issues, but we are protected against nodes
4144 0 : // that register themselves with a broken DNS config. We check only the HTTP hostname, because
4145 0 : // the postgres hostname might only be resolvable to clients (e.g. if we're on a different VPC than clients).
4146 0 : if tokio::net::lookup_host(format!(
4147 0 : "{}:{}",
4148 0 : register_req.listen_http_addr, register_req.listen_http_port
4149 0 : ))
4150 0 : .await
4151 0 : .is_err()
4152 : {
4153 : // If we have a transient DNS issue, it's up to the caller to retry their registration. Because
4154 : // we can't robustly distinguish between an intermittent issue and a totally bogus DNS situation,
4155 : // we return a soft 503 error, to encourage callers to retry past transient issues.
4156 0 : return Err(ApiError::ResourceUnavailable(
4157 0 : format!(
4158 0 : "Node {} tried to register with unknown DNS name '{}'",
4159 0 : register_req.node_id, register_req.listen_http_addr
4160 0 : )
4161 0 : .into(),
4162 0 : ));
4163 0 : }
4164 0 :
4165 0 : // Ordering: we must persist the new node _before_ adding it to in-memory state.
4166 0 : // This ensures that before we use it for anything or expose it via any external
4167 0 : // API, it is guaranteed to be available after a restart.
4168 0 : let new_node = Node::new(
4169 0 : register_req.node_id,
4170 0 : register_req.listen_http_addr,
4171 0 : register_req.listen_http_port,
4172 0 : register_req.listen_pg_addr,
4173 0 : register_req.listen_pg_port,
4174 0 : );
4175 0 :
4176 0 : // TODO: idempotency if the node already exists in the database
4177 0 : self.persistence.insert_node(&new_node).await?;
4178 :
4179 0 : let mut locked = self.inner.write().unwrap();
4180 0 : let mut new_nodes = (*locked.nodes).clone();
4181 0 :
4182 0 : locked.scheduler.node_upsert(&new_node);
4183 0 : new_nodes.insert(register_req.node_id, new_node);
4184 0 :
4185 0 : locked.nodes = Arc::new(new_nodes);
4186 0 :
4187 0 : tracing::info!(
4188 0 : "Registered pageserver {}, now have {} pageservers",
4189 0 : register_req.node_id,
4190 0 : locked.nodes.len()
4191 : );
4192 0 : Ok(())
4193 0 : }
4194 :
4195 0 : pub(crate) async fn node_configure(
4196 0 : &self,
4197 0 : node_id: NodeId,
4198 0 : availability: Option<NodeAvailability>,
4199 0 : scheduling: Option<NodeSchedulingPolicy>,
4200 0 : ) -> Result<(), ApiError> {
4201 0 : let _node_lock =
4202 0 : trace_exclusive_lock(&self.node_op_locks, node_id, NodeOperations::Configure).await;
4203 :
4204 0 : if let Some(scheduling) = scheduling {
4205 : // Scheduling is a persistent part of Node: we must write updates to the database before
4206 : // applying them in memory
4207 0 : self.persistence.update_node(node_id, scheduling).await?;
4208 0 : }
4209 :
4210 : // If we're activating a node, then before setting it active we must reconcile any shard locations
4211 : // on that node, in case it is out of sync, e.g. due to being unavailable during controller startup,
4212 : // by calling [`Self::node_activate_reconcile`]
4213 : //
4214 : // The transition we calculate here remains valid later in the function because we hold the op lock on the node:
4215 : // nothing else can mutate its availability while we run.
4216 0 : let availability_transition = if let Some(input_availability) = availability {
4217 0 : let (activate_node, availability_transition) = {
4218 0 : let locked = self.inner.read().unwrap();
4219 0 : let Some(node) = locked.nodes.get(&node_id) else {
4220 0 : return Err(ApiError::NotFound(
4221 0 : anyhow::anyhow!("Node {} not registered", node_id).into(),
4222 0 : ));
4223 : };
4224 :
4225 0 : (
4226 0 : node.clone(),
4227 0 : node.get_availability_transition(input_availability),
4228 0 : )
4229 : };
4230 :
4231 0 : if matches!(availability_transition, AvailabilityTransition::ToActive) {
4232 0 : self.node_activate_reconcile(activate_node, &_node_lock)
4233 0 : .await?;
4234 0 : }
4235 0 : availability_transition
4236 : } else {
4237 0 : AvailabilityTransition::Unchanged
4238 : };
4239 :
4240 : // Apply changes from the request to our in-memory state for the Node
4241 0 : let mut locked = self.inner.write().unwrap();
4242 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4243 0 :
4244 0 : let mut new_nodes = (**nodes).clone();
4245 :
4246 0 : let Some(node) = new_nodes.get_mut(&node_id) else {
4247 0 : return Err(ApiError::NotFound(
4248 0 : anyhow::anyhow!("Node not registered").into(),
4249 0 : ));
4250 : };
4251 :
4252 0 : if let Some(availability) = &availability {
4253 0 : node.set_availability(*availability);
4254 0 : }
4255 :
4256 0 : if let Some(scheduling) = scheduling {
4257 0 : node.set_scheduling(scheduling);
4258 0 :
4259 0 : // TODO: once we have a background scheduling ticker for fill/drain, kick it
4260 0 : // to wake up and start working.
4261 0 : }
4262 :
4263 : // Update the scheduler, in case the elegibility of the node for new shards has changed
4264 0 : scheduler.node_upsert(node);
4265 0 :
4266 0 : let new_nodes = Arc::new(new_nodes);
4267 0 :
4268 0 : // Modify scheduling state for any Tenants that are affected by a change in the node's availability state.
4269 0 : match availability_transition {
4270 : AvailabilityTransition::ToOffline => {
4271 0 : tracing::info!("Node {} transition to offline", node_id);
4272 0 : let mut tenants_affected: usize = 0;
4273 :
4274 0 : for (tenant_shard_id, tenant_shard) in tenants {
4275 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
4276 0 : // When a node goes offline, we set its observed configuration to None, indicating unknown: we will
4277 0 : // not assume our knowledge of the node's configuration is accurate until it comes back online
4278 0 : observed_loc.conf = None;
4279 0 : }
4280 :
4281 0 : if new_nodes.len() == 1 {
4282 : // Special case for single-node cluster: there is no point trying to reschedule
4283 : // any tenant shards: avoid doing so, in order to avoid spewing warnings about
4284 : // failures to schedule them.
4285 0 : continue;
4286 0 : }
4287 0 :
4288 0 : if tenant_shard.intent.demote_attached(node_id) {
4289 0 : tenant_shard.sequence = tenant_shard.sequence.next();
4290 0 :
4291 0 : // TODO: populate a ScheduleContext including all shards in the same tenant_id (only matters
4292 0 : // for tenants without secondary locations: if they have a secondary location, then this
4293 0 : // schedule() call is just promoting an existing secondary)
4294 0 : let mut schedule_context = ScheduleContext::default();
4295 0 :
4296 0 : match tenant_shard.schedule(scheduler, &mut schedule_context) {
4297 0 : Err(e) => {
4298 0 : // It is possible that some tenants will become unschedulable when too many pageservers
4299 0 : // go offline: in this case there isn't much we can do other than make the issue observable.
4300 0 : // TODO: give TenantShard a scheduling error attribute to be queried later.
4301 0 : tracing::warn!(%tenant_shard_id, "Scheduling error when marking pageserver {} offline: {e}", node_id);
4302 : }
4303 : Ok(()) => {
4304 0 : if self
4305 0 : .maybe_reconcile_shard(tenant_shard, &new_nodes)
4306 0 : .is_some()
4307 0 : {
4308 0 : tenants_affected += 1;
4309 0 : };
4310 : }
4311 : }
4312 0 : }
4313 : }
4314 0 : tracing::info!(
4315 0 : "Launched {} reconciler tasks for tenants affected by node {} going offline",
4316 : tenants_affected,
4317 : node_id
4318 : )
4319 : }
4320 : AvailabilityTransition::ToActive => {
4321 0 : tracing::info!("Node {} transition to active", node_id);
4322 : // When a node comes back online, we must reconcile any tenant that has a None observed
4323 : // location on the node.
4324 0 : for tenant_shard in locked.tenants.values_mut() {
4325 0 : if let Some(observed_loc) = tenant_shard.observed.locations.get_mut(&node_id) {
4326 0 : if observed_loc.conf.is_none() {
4327 0 : self.maybe_reconcile_shard(tenant_shard, &new_nodes);
4328 0 : }
4329 0 : }
4330 : }
4331 :
4332 : // TODO: in the background, we should balance work back onto this pageserver
4333 : }
4334 : AvailabilityTransition::Unchanged => {
4335 0 : tracing::debug!("Node {} no change during config", node_id);
4336 : }
4337 : }
4338 :
4339 0 : locked.nodes = new_nodes;
4340 0 :
4341 0 : Ok(())
4342 0 : }
4343 :
4344 : /// Helper for methods that will try and call pageserver APIs for
4345 : /// a tenant, such as timeline CRUD: they cannot proceed unless the tenant
4346 : /// is attached somewhere.
4347 0 : fn ensure_attached_schedule(
4348 0 : &self,
4349 0 : mut locked: std::sync::RwLockWriteGuard<'_, ServiceState>,
4350 0 : tenant_id: TenantId,
4351 0 : ) -> Result<Vec<ReconcilerWaiter>, anyhow::Error> {
4352 0 : let mut waiters = Vec::new();
4353 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4354 0 :
4355 0 : let mut schedule_context = ScheduleContext::default();
4356 0 : for (tenant_shard_id, shard) in tenants.range_mut(TenantShardId::tenant_range(tenant_id)) {
4357 0 : shard.schedule(scheduler, &mut schedule_context)?;
4358 :
4359 : // The shard's policies may not result in an attached location being scheduled: this
4360 : // is an error because our caller needs it attached somewhere.
4361 0 : if shard.intent.get_attached().is_none() {
4362 0 : return Err(anyhow::anyhow!(
4363 0 : "Tenant {tenant_id} not scheduled to be attached"
4364 0 : ));
4365 0 : };
4366 0 :
4367 0 : if shard.stably_attached().is_some() {
4368 : // We do not require the shard to be totally up to date on reconciliation: we just require
4369 : // that it has been attached on the intended node. Other dirty state such as unattached secondary
4370 : // locations, or compute hook notifications can be ignored.
4371 0 : continue;
4372 0 : }
4373 :
4374 0 : if let Some(waiter) = self.maybe_reconcile_shard(shard, nodes) {
4375 0 : tracing::info!("Waiting for shard {tenant_shard_id} to reconcile, in order to ensure it is attached");
4376 0 : waiters.push(waiter);
4377 0 : }
4378 : }
4379 0 : Ok(waiters)
4380 0 : }
4381 :
4382 0 : async fn ensure_attached_wait(&self, tenant_id: TenantId) -> Result<(), ApiError> {
4383 0 : let ensure_waiters = {
4384 0 : let locked = self.inner.write().unwrap();
4385 :
4386 : // Check if the tenant is splitting: in this case, even if it is attached,
4387 : // we must act as if it is not: this blocks e.g. timeline creation/deletion
4388 : // operations during the split.
4389 0 : for (_shard_id, shard) in locked.tenants.range(TenantShardId::tenant_range(tenant_id)) {
4390 0 : if !matches!(shard.splitting, SplitState::Idle) {
4391 0 : return Err(ApiError::ResourceUnavailable(
4392 0 : "Tenant shards are currently splitting".into(),
4393 0 : ));
4394 0 : }
4395 : }
4396 :
4397 0 : self.ensure_attached_schedule(locked, tenant_id)
4398 0 : .map_err(ApiError::InternalServerError)?
4399 : };
4400 :
4401 0 : let deadline = Instant::now().checked_add(Duration::from_secs(5)).unwrap();
4402 0 : for waiter in ensure_waiters {
4403 0 : let timeout = deadline.duration_since(Instant::now());
4404 0 : waiter.wait_timeout(timeout).await?;
4405 : }
4406 :
4407 0 : Ok(())
4408 0 : }
4409 :
4410 : /// Wrap [`TenantShard`] reconciliation methods with acquisition of [`Gate`] and [`ReconcileUnits`],
4411 0 : fn maybe_reconcile_shard(
4412 0 : &self,
4413 0 : shard: &mut TenantShard,
4414 0 : nodes: &Arc<HashMap<NodeId, Node>>,
4415 0 : ) -> Option<ReconcilerWaiter> {
4416 0 : let reconcile_needed = shard.get_reconcile_needed(nodes);
4417 0 :
4418 0 : match reconcile_needed {
4419 0 : ReconcileNeeded::No => return None,
4420 0 : ReconcileNeeded::WaitExisting(waiter) => return Some(waiter),
4421 0 : ReconcileNeeded::Yes => {
4422 0 : // Fall through to try and acquire units for spawning reconciler
4423 0 : }
4424 : };
4425 :
4426 0 : let units = match self.reconciler_concurrency.clone().try_acquire_owned() {
4427 0 : Ok(u) => ReconcileUnits::new(u),
4428 : Err(_) => {
4429 0 : tracing::info!(tenant_id=%shard.tenant_shard_id.tenant_id, shard_id=%shard.tenant_shard_id.shard_slug(),
4430 0 : "Concurrency limited: enqueued for reconcile later");
4431 0 : if !shard.delayed_reconcile {
4432 0 : match self.delayed_reconcile_tx.try_send(shard.tenant_shard_id) {
4433 0 : Err(TrySendError::Closed(_)) => {
4434 0 : // Weird mid-shutdown case?
4435 0 : }
4436 : Err(TrySendError::Full(_)) => {
4437 : // It is safe to skip sending our ID in the channel: we will eventually get retried by the background reconcile task.
4438 0 : tracing::warn!(
4439 0 : "Many shards are waiting to reconcile: delayed_reconcile queue is full"
4440 : );
4441 : }
4442 0 : Ok(()) => {
4443 0 : shard.delayed_reconcile = true;
4444 0 : }
4445 : }
4446 0 : }
4447 :
4448 : // We won't spawn a reconciler, but we will construct a waiter that waits for the shard's sequence
4449 : // number to advance. When this function is eventually called again and succeeds in getting units,
4450 : // it will spawn a reconciler that makes this waiter complete.
4451 0 : return Some(shard.future_reconcile_waiter());
4452 : }
4453 : };
4454 :
4455 0 : let Ok(gate_guard) = self.gate.enter() else {
4456 : // Gate closed: we're shutting down, drop out.
4457 0 : return None;
4458 : };
4459 :
4460 0 : shard.spawn_reconciler(
4461 0 : &self.result_tx,
4462 0 : nodes,
4463 0 : &self.compute_hook,
4464 0 : &self.config,
4465 0 : &self.persistence,
4466 0 : units,
4467 0 : gate_guard,
4468 0 : &self.cancel,
4469 0 : )
4470 0 : }
4471 :
4472 : /// Check all tenants for pending reconciliation work, and reconcile those in need.
4473 : /// Additionally, reschedule tenants that require it.
4474 : ///
4475 : /// Returns how many reconciliation tasks were started, or `1` if no reconciles were
4476 : /// spawned but some _would_ have been spawned if `reconciler_concurrency` units where
4477 : /// available. A return value of 0 indicates that everything is fully reconciled already.
4478 0 : fn reconcile_all(&self) -> usize {
4479 0 : let mut locked = self.inner.write().unwrap();
4480 0 : let (nodes, tenants, _scheduler) = locked.parts_mut();
4481 0 : let pageservers = nodes.clone();
4482 0 :
4483 0 : let mut schedule_context = ScheduleContext::default();
4484 0 :
4485 0 : let mut reconciles_spawned = 0;
4486 0 : for (tenant_shard_id, shard) in tenants.iter_mut() {
4487 0 : if tenant_shard_id.is_shard_zero() {
4488 0 : schedule_context = ScheduleContext::default();
4489 0 : }
4490 :
4491 : // Skip checking if this shard is already enqueued for reconciliation
4492 0 : if shard.delayed_reconcile && self.reconciler_concurrency.available_permits() == 0 {
4493 : // If there is something delayed, then return a nonzero count so that
4494 : // callers like reconcile_all_now do not incorrectly get the impression
4495 : // that the system is in a quiescent state.
4496 0 : reconciles_spawned = std::cmp::max(1, reconciles_spawned);
4497 0 : continue;
4498 0 : }
4499 0 :
4500 0 : // Eventual consistency: if an earlier reconcile job failed, and the shard is still
4501 0 : // dirty, spawn another rone
4502 0 : if self.maybe_reconcile_shard(shard, &pageservers).is_some() {
4503 0 : reconciles_spawned += 1;
4504 0 : }
4505 :
4506 0 : schedule_context.avoid(&shard.intent.all_pageservers());
4507 : }
4508 :
4509 0 : reconciles_spawned
4510 0 : }
4511 :
4512 : /// `optimize` in this context means identifying shards which have valid scheduled locations, but
4513 : /// could be scheduled somewhere better:
4514 : /// - Cutting over to a secondary if the node with the secondary is more lightly loaded
4515 : /// * e.g. after a node fails then recovers, to move some work back to it
4516 : /// - Cutting over to a secondary if it improves the spread of shard attachments within a tenant
4517 : /// * e.g. after a shard split, the initial attached locations will all be on the node where
4518 : /// we did the split, but are probably better placed elsewhere.
4519 : /// - Creating new secondary locations if it improves the spreading of a sharded tenant
4520 : /// * e.g. after a shard split, some locations will be on the same node (where the split
4521 : /// happened), and will probably be better placed elsewhere.
4522 : ///
4523 : /// To put it more briefly: whereas the scheduler respects soft constraints in a ScheduleContext at
4524 : /// the time of scheduling, this function looks for cases where a better-scoring location is available
4525 : /// according to those same soft constraints.
4526 0 : async fn optimize_all(&self) -> usize {
4527 0 : // Limit on how many shards' optmizations each call to this function will execute. Combined
4528 0 : // with the frequency of background calls, this acts as an implicit rate limit that runs a small
4529 0 : // trickle of optimizations in the background, rather than executing a large number in parallel
4530 0 : // when a change occurs.
4531 0 : const MAX_OPTIMIZATIONS_EXEC_PER_PASS: usize = 2;
4532 0 :
4533 0 : // Synchronous prepare: scan shards for possible scheduling optimizations
4534 0 : let candidate_work = self.optimize_all_plan();
4535 0 : let candidate_work_len = candidate_work.len();
4536 :
4537 : // Asynchronous validate: I/O to pageservers to make sure shards are in a good state to apply validation
4538 0 : let validated_work = self.optimize_all_validate(candidate_work).await;
4539 :
4540 0 : let was_work_filtered = validated_work.len() != candidate_work_len;
4541 0 :
4542 0 : // Synchronous apply: update the shards' intent states according to validated optimisations
4543 0 : let mut reconciles_spawned = 0;
4544 0 : let mut optimizations_applied = 0;
4545 0 : let mut locked = self.inner.write().unwrap();
4546 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4547 0 : for (tenant_shard_id, optimization) in validated_work {
4548 0 : let Some(shard) = tenants.get_mut(&tenant_shard_id) else {
4549 : // Shard was dropped between planning and execution;
4550 0 : continue;
4551 : };
4552 0 : if shard.apply_optimization(scheduler, optimization) {
4553 0 : optimizations_applied += 1;
4554 0 : if self.maybe_reconcile_shard(shard, nodes).is_some() {
4555 0 : reconciles_spawned += 1;
4556 0 : }
4557 0 : }
4558 :
4559 0 : if optimizations_applied >= MAX_OPTIMIZATIONS_EXEC_PER_PASS {
4560 0 : break;
4561 0 : }
4562 : }
4563 :
4564 0 : if was_work_filtered {
4565 0 : // If we filtered any work out during validation, ensure we return a nonzero value to indicate
4566 0 : // to callers that the system is not in a truly quiet state, it's going to do some work as soon
4567 0 : // as these validations start passing.
4568 0 : reconciles_spawned = std::cmp::max(reconciles_spawned, 1);
4569 0 : }
4570 :
4571 0 : reconciles_spawned
4572 0 : }
4573 :
4574 0 : fn optimize_all_plan(&self) -> Vec<(TenantShardId, ScheduleOptimization)> {
4575 0 : let mut schedule_context = ScheduleContext::default();
4576 0 :
4577 0 : let mut tenant_shards: Vec<&TenantShard> = Vec::new();
4578 0 :
4579 0 : // How many candidate optimizations we will generate, before evaluating them for readniess: setting
4580 0 : // this higher than the execution limit gives us a chance to execute some work even if the first
4581 0 : // few optimizations we find are not ready.
4582 0 : const MAX_OPTIMIZATIONS_PLAN_PER_PASS: usize = 8;
4583 0 :
4584 0 : let mut work = Vec::new();
4585 0 :
4586 0 : let mut locked = self.inner.write().unwrap();
4587 0 : let (nodes, tenants, scheduler) = locked.parts_mut();
4588 0 : for (tenant_shard_id, shard) in tenants.iter() {
4589 0 : if tenant_shard_id.is_shard_zero() {
4590 0 : // Reset accumulators on the first shard in a tenant
4591 0 : schedule_context = ScheduleContext::default();
4592 0 : schedule_context.mode = ScheduleMode::Speculative;
4593 0 : tenant_shards.clear();
4594 0 : }
4595 :
4596 0 : if work.len() >= MAX_OPTIMIZATIONS_PLAN_PER_PASS {
4597 0 : break;
4598 0 : }
4599 0 :
4600 0 : match shard.get_scheduling_policy() {
4601 0 : ShardSchedulingPolicy::Active => {
4602 0 : // Ok to do optimization
4603 0 : }
4604 : ShardSchedulingPolicy::Essential
4605 : | ShardSchedulingPolicy::Pause
4606 : | ShardSchedulingPolicy::Stop => {
4607 : // Policy prevents optimizing this shard.
4608 0 : continue;
4609 : }
4610 : }
4611 :
4612 : // Accumulate the schedule context for all the shards in a tenant: we must have
4613 : // the total view of all shards before we can try to optimize any of them.
4614 0 : schedule_context.avoid(&shard.intent.all_pageservers());
4615 0 : if let Some(attached) = shard.intent.get_attached() {
4616 0 : schedule_context.push_attached(*attached);
4617 0 : }
4618 0 : tenant_shards.push(shard);
4619 0 :
4620 0 : // Once we have seen the last shard in the tenant, proceed to search across all shards
4621 0 : // in the tenant for optimizations
4622 0 : if shard.shard.number.0 == shard.shard.count.count() - 1 {
4623 0 : if tenant_shards.iter().any(|s| s.reconciler.is_some()) {
4624 : // Do not start any optimizations while another change to the tenant is ongoing: this
4625 : // is not necessary for correctness, but simplifies operations and implicitly throttles
4626 : // optimization changes to happen in a "trickle" over time.
4627 0 : continue;
4628 0 : }
4629 0 :
4630 0 : if tenant_shards.iter().any(|s| {
4631 0 : !matches!(s.splitting, SplitState::Idle)
4632 0 : || matches!(s.policy, PlacementPolicy::Detached)
4633 0 : }) {
4634 : // Never attempt to optimize a tenant that is currently being split, or
4635 : // a tenant that is meant to be detached
4636 0 : continue;
4637 0 : }
4638 :
4639 : // TODO: optimization calculations are relatively expensive: create some fast-path for
4640 : // the common idle case (avoiding the search on tenants that we have recently checked)
4641 :
4642 0 : for shard in &tenant_shards {
4643 0 : if let Some(optimization) =
4644 : // If idle, maybe ptimize attachments: if a shard has a secondary location that is preferable to
4645 : // its primary location based on soft constraints, cut it over.
4646 0 : shard.optimize_attachment(nodes, &schedule_context)
4647 : {
4648 0 : work.push((shard.tenant_shard_id, optimization));
4649 0 : break;
4650 0 : } else if let Some(optimization) =
4651 : // If idle, maybe optimize secondary locations: if a shard has a secondary location that would be
4652 : // better placed on another node, based on ScheduleContext, then adjust it. This
4653 : // covers cases like after a shard split, where we might have too many shards
4654 : // in the same tenant with secondary locations on the node where they originally split.
4655 0 : shard.optimize_secondary(scheduler, &schedule_context)
4656 : {
4657 0 : work.push((shard.tenant_shard_id, optimization));
4658 0 : break;
4659 0 : }
4660 :
4661 : // TODO: extend this mechanism to prefer attaching on nodes with fewer attached
4662 : // tenants (i.e. extend schedule state to distinguish attached from secondary counts),
4663 : // for the total number of attachments on a node (not just within a tenant.)
4664 : }
4665 0 : }
4666 : }
4667 :
4668 0 : work
4669 0 : }
4670 :
4671 0 : async fn optimize_all_validate(
4672 0 : &self,
4673 0 : candidate_work: Vec<(TenantShardId, ScheduleOptimization)>,
4674 0 : ) -> Vec<(TenantShardId, ScheduleOptimization)> {
4675 0 : // Take a clone of the node map to use outside the lock in async validation phase
4676 0 : let validation_nodes = { self.inner.read().unwrap().nodes.clone() };
4677 0 :
4678 0 : let mut want_secondary_status = Vec::new();
4679 0 :
4680 0 : // Validate our plans: this is an async phase where we may do I/O to pageservers to
4681 0 : // check that the state of locations is acceptable to run the optimization, such as
4682 0 : // checking that a secondary location is sufficiently warmed-up to cleanly cut over
4683 0 : // in a live migration.
4684 0 : let mut validated_work = Vec::new();
4685 0 : for (tenant_shard_id, optimization) in candidate_work {
4686 0 : match optimization.action {
4687 : ScheduleOptimizationAction::MigrateAttachment(MigrateAttachment {
4688 : old_attached_node_id: _,
4689 0 : new_attached_node_id,
4690 0 : }) => {
4691 0 : match validation_nodes.get(&new_attached_node_id) {
4692 0 : None => {
4693 0 : // Node was dropped between planning and validation
4694 0 : }
4695 0 : Some(node) => {
4696 0 : if !node.is_available() {
4697 0 : tracing::info!("Skipping optimization migration of {tenant_shard_id} to {new_attached_node_id} because node unavailable");
4698 0 : } else {
4699 0 : // Accumulate optimizations that require fetching secondary status, so that we can execute these
4700 0 : // remote API requests concurrently.
4701 0 : want_secondary_status.push((
4702 0 : tenant_shard_id,
4703 0 : node.clone(),
4704 0 : optimization,
4705 0 : ));
4706 0 : }
4707 : }
4708 : }
4709 : }
4710 : ScheduleOptimizationAction::ReplaceSecondary(_) => {
4711 : // No extra checks needed to replace a secondary: this does not interrupt client access
4712 0 : validated_work.push((tenant_shard_id, optimization))
4713 : }
4714 : };
4715 : }
4716 :
4717 : // Call into pageserver API to find out if the destination secondary location is warm enough for a reasonably smooth migration: we
4718 : // do this so that we avoid spawning a Reconciler that would have to wait minutes/hours for a destination to warm up: that reconciler
4719 : // would hold a precious reconcile semaphore unit the whole time it was waiting for the destination to warm up.
4720 0 : let results = self
4721 0 : .tenant_for_shards_api(
4722 0 : want_secondary_status
4723 0 : .iter()
4724 0 : .map(|i| (i.0, i.1.clone()))
4725 0 : .collect(),
4726 0 : |tenant_shard_id, client| async move {
4727 0 : client.tenant_secondary_status(tenant_shard_id).await
4728 0 : },
4729 0 : 1,
4730 0 : 1,
4731 0 : SHORT_RECONCILE_TIMEOUT,
4732 0 : &self.cancel,
4733 0 : )
4734 0 : .await;
4735 :
4736 0 : for ((tenant_shard_id, node, optimization), secondary_status) in
4737 0 : want_secondary_status.into_iter().zip(results.into_iter())
4738 : {
4739 0 : match secondary_status {
4740 0 : Err(e) => {
4741 0 : tracing::info!("Skipping migration of {tenant_shard_id} to {node}, error querying secondary: {e}");
4742 : }
4743 0 : Ok(progress) => {
4744 0 : // We require secondary locations to have less than 10GiB of downloads pending before we will use
4745 0 : // them in an optimization
4746 0 : const DOWNLOAD_FRESHNESS_THRESHOLD: u64 = 10 * 1024 * 1024 * 1024;
4747 0 :
4748 0 : if progress.heatmap_mtime.is_none()
4749 0 : || progress.bytes_total < DOWNLOAD_FRESHNESS_THRESHOLD
4750 0 : && progress.bytes_downloaded != progress.bytes_total
4751 0 : || progress.bytes_total - progress.bytes_downloaded
4752 0 : > DOWNLOAD_FRESHNESS_THRESHOLD
4753 : {
4754 0 : tracing::info!("Skipping migration of {tenant_shard_id} to {node} because secondary isn't ready: {progress:?}");
4755 : } else {
4756 : // Location looks ready: proceed
4757 0 : tracing::info!(
4758 0 : "{tenant_shard_id} secondary on {node} is warm enough for migration: {progress:?}"
4759 : );
4760 0 : validated_work.push((tenant_shard_id, optimization))
4761 : }
4762 : }
4763 : }
4764 : }
4765 :
4766 0 : validated_work
4767 0 : }
4768 :
4769 : /// Useful for tests: run whatever work a background [`Self::reconcile_all`] would have done, but
4770 : /// also wait for any generated Reconcilers to complete. Calling this until it returns zero should
4771 : /// put the system into a quiescent state where future background reconciliations won't do anything.
4772 0 : pub(crate) async fn reconcile_all_now(&self) -> Result<usize, ReconcileWaitError> {
4773 0 : let reconciles_spawned = self.reconcile_all();
4774 0 : let reconciles_spawned = if reconciles_spawned == 0 {
4775 : // Only optimize when we are otherwise idle
4776 0 : self.optimize_all().await
4777 : } else {
4778 0 : reconciles_spawned
4779 : };
4780 :
4781 0 : let waiters = {
4782 0 : let mut waiters = Vec::new();
4783 0 : let locked = self.inner.read().unwrap();
4784 0 : for (_tenant_shard_id, shard) in locked.tenants.iter() {
4785 0 : if let Some(waiter) = shard.get_waiter() {
4786 0 : waiters.push(waiter);
4787 0 : }
4788 : }
4789 0 : waiters
4790 0 : };
4791 0 :
4792 0 : let waiter_count = waiters.len();
4793 0 : match self.await_waiters(waiters, RECONCILE_TIMEOUT).await {
4794 0 : Ok(()) => {}
4795 0 : Err(ReconcileWaitError::Failed(_, reconcile_error))
4796 0 : if matches!(*reconcile_error, ReconcileError::Cancel) =>
4797 0 : {
4798 0 : // Ignore reconciler cancel errors: this reconciler might have shut down
4799 0 : // because some other change superceded it. We will return a nonzero number,
4800 0 : // so the caller knows they might have to call again to quiesce the system.
4801 0 : }
4802 0 : Err(e) => {
4803 0 : return Err(e);
4804 : }
4805 : };
4806 :
4807 0 : tracing::info!(
4808 0 : "{} reconciles in reconcile_all, {} waiters",
4809 : reconciles_spawned,
4810 : waiter_count
4811 : );
4812 :
4813 0 : Ok(std::cmp::max(waiter_count, reconciles_spawned))
4814 0 : }
4815 :
4816 0 : pub async fn shutdown(&self) {
4817 0 : // Note that this already stops processing any results from reconciles: so
4818 0 : // we do not expect that our [`TenantShard`] objects will reach a neat
4819 0 : // final state.
4820 0 : self.cancel.cancel();
4821 0 :
4822 0 : // The cancellation tokens in [`crate::reconciler::Reconciler`] are children
4823 0 : // of our cancellation token, so we do not need to explicitly cancel each of
4824 0 : // them.
4825 0 :
4826 0 : // Background tasks and reconcilers hold gate guards: this waits for them all
4827 0 : // to complete.
4828 0 : self.gate.close().await;
4829 0 : }
4830 : }
|