Line data Source code
1 : //! This module acts as a switchboard to access different repositories managed by this
2 : //! page server.
3 :
4 : use camino::{Utf8DirEntry, Utf8Path, Utf8PathBuf};
5 : use futures::StreamExt;
6 : use itertools::Itertools;
7 : use pageserver_api::key::Key;
8 : use pageserver_api::models::LocationConfigMode;
9 : use pageserver_api::shard::{
10 : ShardCount, ShardIdentity, ShardNumber, ShardStripeSize, TenantShardId,
11 : };
12 : use pageserver_api::upcall_api::ReAttachResponseTenant;
13 : use rand::{distributions::Alphanumeric, Rng};
14 : use std::borrow::Cow;
15 : use std::cmp::Ordering;
16 : use std::collections::{BTreeMap, HashMap};
17 : use std::ops::Deref;
18 : use std::sync::Arc;
19 : use std::time::{Duration, Instant};
20 : use sysinfo::SystemExt;
21 : use tokio::fs;
22 : use utils::timeout::{timeout_cancellable, TimeoutCancellableError};
23 :
24 : use anyhow::Context;
25 : use once_cell::sync::Lazy;
26 : use tokio::task::JoinSet;
27 : use tokio_util::sync::CancellationToken;
28 : use tracing::*;
29 :
30 : use remote_storage::GenericRemoteStorage;
31 : use utils::{completion, crashsafe};
32 :
33 : use crate::config::PageServerConf;
34 : use crate::context::{DownloadBehavior, RequestContext};
35 : use crate::control_plane_client::{
36 : ControlPlaneClient, ControlPlaneGenerationsApi, RetryForeverError,
37 : };
38 : use crate::deletion_queue::DeletionQueueClient;
39 : use crate::http::routes::ACTIVE_TENANT_TIMEOUT;
40 : use crate::metrics::{TENANT, TENANT_MANAGER as METRICS};
41 : use crate::task_mgr::{self, TaskKind};
42 : use crate::tenant::config::{
43 : AttachedLocationConfig, AttachmentMode, LocationConf, LocationMode, SecondaryLocationConfig,
44 : };
45 : use crate::tenant::delete::DeleteTenantFlow;
46 : use crate::tenant::span::debug_assert_current_span_has_tenant_id;
47 : use crate::tenant::storage_layer::inmemory_layer;
48 : use crate::tenant::timeline::ShutdownMode;
49 : use crate::tenant::{AttachedTenantConf, SpawnMode, Tenant, TenantState};
50 : use crate::{InitializationOrder, IGNORED_TENANT_FILE_NAME, METADATA_FILE_NAME, TEMP_FILE_SUFFIX};
51 :
52 : use utils::crashsafe::path_with_suffix_extension;
53 : use utils::fs_ext::PathExt;
54 : use utils::generation::Generation;
55 : use utils::id::{TenantId, TimelineId};
56 :
57 : use super::delete::DeleteTenantError;
58 : use super::secondary::SecondaryTenant;
59 : use super::timeline::detach_ancestor::PreparedTimelineDetach;
60 : use super::TenantSharedResources;
61 :
62 : /// For a tenant that appears in TenantsMap, it may either be
63 : /// - `Attached`: has a full Tenant object, is elegible to service
64 : /// reads and ingest WAL.
65 : /// - `Secondary`: is only keeping a local cache warm.
66 : ///
67 : /// Secondary is a totally distinct state rather than being a mode of a `Tenant`, because
68 : /// that way we avoid having to carefully switch a tenant's ingestion etc on and off during
69 : /// its lifetime, and we can preserve some important safety invariants like `Tenant` always
70 : /// having a properly acquired generation (Secondary doesn't need a generation)
71 : #[derive(Clone)]
72 : pub(crate) enum TenantSlot {
73 : Attached(Arc<Tenant>),
74 : Secondary(Arc<SecondaryTenant>),
75 : /// In this state, other administrative operations acting on the TenantId should
76 : /// block, or return a retry indicator equivalent to HTTP 503.
77 : InProgress(utils::completion::Barrier),
78 : }
79 :
80 : impl std::fmt::Debug for TenantSlot {
81 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
82 0 : match self {
83 0 : Self::Attached(tenant) => write!(f, "Attached({})", tenant.current_state()),
84 0 : Self::Secondary(_) => write!(f, "Secondary"),
85 0 : Self::InProgress(_) => write!(f, "InProgress"),
86 : }
87 0 : }
88 : }
89 :
90 : impl TenantSlot {
91 : /// Return the `Tenant` in this slot if attached, else None
92 0 : fn get_attached(&self) -> Option<&Arc<Tenant>> {
93 0 : match self {
94 0 : Self::Attached(t) => Some(t),
95 0 : Self::Secondary(_) => None,
96 0 : Self::InProgress(_) => None,
97 : }
98 0 : }
99 : }
100 :
101 : /// The tenants known to the pageserver.
102 : /// The enum variants are used to distinguish the different states that the pageserver can be in.
103 : pub(crate) enum TenantsMap {
104 : /// [`init_tenant_mgr`] is not done yet.
105 : Initializing,
106 : /// [`init_tenant_mgr`] is done, all on-disk tenants have been loaded.
107 : /// New tenants can be added using [`tenant_map_acquire_slot`].
108 : Open(BTreeMap<TenantShardId, TenantSlot>),
109 : /// The pageserver has entered shutdown mode via [`TenantManager::shutdown`].
110 : /// Existing tenants are still accessible, but no new tenants can be created.
111 : ShuttingDown(BTreeMap<TenantShardId, TenantSlot>),
112 : }
113 :
114 : pub(crate) enum TenantsMapRemoveResult {
115 : Occupied(TenantSlot),
116 : Vacant,
117 : InProgress(utils::completion::Barrier),
118 : }
119 :
120 : /// When resolving a TenantId to a shard, we may be looking for the 0th
121 : /// shard, or we might be looking for whichever shard holds a particular page.
122 : pub(crate) enum ShardSelector {
123 : /// Only return the 0th shard, if it is present. If a non-0th shard is present,
124 : /// ignore it.
125 : Zero,
126 : /// Pick the first shard we find for the TenantId
127 : First,
128 : /// Pick the shard that holds this key
129 : Page(Key),
130 : }
131 :
132 : /// A convenience for use with the re_attach ControlPlaneClient function: rather
133 : /// than the serializable struct, we build this enum that encapsulates
134 : /// the invariant that attached tenants always have generations.
135 : ///
136 : /// This represents the subset of a LocationConfig that we receive during re-attach.
137 : pub(crate) enum TenantStartupMode {
138 : Attached((AttachmentMode, Generation)),
139 : Secondary,
140 : }
141 :
142 : impl TenantStartupMode {
143 : /// Return the generation & mode that should be used when starting
144 : /// this tenant.
145 : ///
146 : /// If this returns None, the re-attach struct is in an invalid state and
147 : /// should be ignored in the response.
148 0 : fn from_reattach_tenant(rart: ReAttachResponseTenant) -> Option<Self> {
149 0 : match (rart.mode, rart.gen) {
150 0 : (LocationConfigMode::Detached, _) => None,
151 0 : (LocationConfigMode::Secondary, _) => Some(Self::Secondary),
152 0 : (LocationConfigMode::AttachedMulti, Some(g)) => {
153 0 : Some(Self::Attached((AttachmentMode::Multi, Generation::new(g))))
154 : }
155 0 : (LocationConfigMode::AttachedSingle, Some(g)) => {
156 0 : Some(Self::Attached((AttachmentMode::Single, Generation::new(g))))
157 : }
158 0 : (LocationConfigMode::AttachedStale, Some(g)) => {
159 0 : Some(Self::Attached((AttachmentMode::Stale, Generation::new(g))))
160 : }
161 : _ => {
162 0 : tracing::warn!(
163 0 : "Received invalid re-attach state for tenant {}: {rart:?}",
164 : rart.id
165 : );
166 0 : None
167 : }
168 : }
169 0 : }
170 : }
171 :
172 : impl TenantsMap {
173 : /// Convenience function for typical usage, where we want to get a `Tenant` object, for
174 : /// working with attached tenants. If the TenantId is in the map but in Secondary state,
175 : /// None is returned.
176 0 : pub(crate) fn get(&self, tenant_shard_id: &TenantShardId) -> Option<&Arc<Tenant>> {
177 0 : match self {
178 0 : TenantsMap::Initializing => None,
179 0 : TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => {
180 0 : m.get(tenant_shard_id).and_then(|slot| slot.get_attached())
181 : }
182 : }
183 0 : }
184 :
185 : /// A page service client sends a TenantId, and to look up the correct Tenant we must
186 : /// resolve this to a fully qualified TenantShardId.
187 0 : fn resolve_attached_shard(
188 0 : &self,
189 0 : tenant_id: &TenantId,
190 0 : selector: ShardSelector,
191 0 : ) -> Option<TenantShardId> {
192 0 : let mut want_shard = None;
193 0 : match self {
194 0 : TenantsMap::Initializing => None,
195 0 : TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => {
196 0 : for slot in m.range(TenantShardId::tenant_range(*tenant_id)) {
197 : // Ignore all slots that don't contain an attached tenant
198 0 : let tenant = match &slot.1 {
199 0 : TenantSlot::Attached(t) => t,
200 0 : _ => continue,
201 : };
202 :
203 0 : match selector {
204 0 : ShardSelector::First => return Some(*slot.0),
205 0 : ShardSelector::Zero if slot.0.shard_number == ShardNumber(0) => {
206 0 : return Some(*slot.0)
207 : }
208 0 : ShardSelector::Page(key) => {
209 0 : // First slot we see for this tenant, calculate the expected shard number
210 0 : // for the key: we will use this for checking if this and subsequent
211 0 : // slots contain the key, rather than recalculating the hash each time.
212 0 : if want_shard.is_none() {
213 0 : want_shard = Some(tenant.shard_identity.get_shard_number(&key));
214 0 : }
215 :
216 0 : if Some(tenant.shard_identity.number) == want_shard {
217 0 : return Some(*slot.0);
218 0 : }
219 : }
220 0 : _ => continue,
221 : }
222 : }
223 :
224 : // Fall through: we didn't find an acceptable shard
225 0 : None
226 : }
227 : }
228 0 : }
229 :
230 : /// Only for use from DeleteTenantFlow. This method directly removes a TenantSlot from the map.
231 : ///
232 : /// The normal way to remove a tenant is using a SlotGuard, which will gracefully remove the guarded
233 : /// slot if the enclosed tenant is shutdown.
234 0 : pub(crate) fn remove(&mut self, tenant_shard_id: TenantShardId) -> TenantsMapRemoveResult {
235 0 : use std::collections::btree_map::Entry;
236 0 : match self {
237 0 : TenantsMap::Initializing => TenantsMapRemoveResult::Vacant,
238 0 : TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => match m.entry(tenant_shard_id) {
239 0 : Entry::Occupied(entry) => match entry.get() {
240 0 : TenantSlot::InProgress(barrier) => {
241 0 : TenantsMapRemoveResult::InProgress(barrier.clone())
242 : }
243 0 : _ => TenantsMapRemoveResult::Occupied(entry.remove()),
244 : },
245 0 : Entry::Vacant(_entry) => TenantsMapRemoveResult::Vacant,
246 : },
247 : }
248 0 : }
249 :
250 : #[cfg(all(debug_assertions, not(test)))]
251 0 : pub(crate) fn len(&self) -> usize {
252 0 : match self {
253 0 : TenantsMap::Initializing => 0,
254 0 : TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => m.len(),
255 : }
256 0 : }
257 : }
258 :
259 : /// Precursor to deletion of a tenant dir: we do a fast rename to a tmp path, and then
260 : /// the slower actual deletion in the background.
261 : ///
262 : /// This is "safe" in that that it won't leave behind a partially deleted directory
263 : /// at the original path, because we rename with TEMP_FILE_SUFFIX before starting deleting
264 : /// the contents.
265 : ///
266 : /// This is pageserver-specific, as it relies on future processes after a crash to check
267 : /// for TEMP_FILE_SUFFIX when loading things.
268 0 : async fn safe_rename_tenant_dir(path: impl AsRef<Utf8Path>) -> std::io::Result<Utf8PathBuf> {
269 0 : let parent = path
270 0 : .as_ref()
271 0 : .parent()
272 0 : // It is invalid to call this function with a relative path. Tenant directories
273 0 : // should always have a parent.
274 0 : .ok_or(std::io::Error::new(
275 0 : std::io::ErrorKind::InvalidInput,
276 0 : "Path must be absolute",
277 0 : ))?;
278 0 : let rand_suffix = rand::thread_rng()
279 0 : .sample_iter(&Alphanumeric)
280 0 : .take(8)
281 0 : .map(char::from)
282 0 : .collect::<String>()
283 0 : + TEMP_FILE_SUFFIX;
284 0 : let tmp_path = path_with_suffix_extension(&path, &rand_suffix);
285 0 : fs::rename(path.as_ref(), &tmp_path).await?;
286 0 : fs::File::open(parent).await?.sync_all().await?;
287 0 : Ok(tmp_path)
288 0 : }
289 :
290 : /// When we have moved a tenant's content to a temporary directory, we may delete it lazily in
291 : /// the background, and thereby avoid blocking any API requests on this deletion completing.
292 0 : fn spawn_background_purge(tmp_path: Utf8PathBuf) {
293 0 : // Although we are cleaning up the tenant, this task is not meant to be bound by the lifetime of the tenant in memory.
294 0 : // After a tenant is detached, there are no more task_mgr tasks for that tenant_id.
295 0 : let task_tenant_id = None;
296 0 :
297 0 : task_mgr::spawn(
298 0 : task_mgr::BACKGROUND_RUNTIME.handle(),
299 0 : TaskKind::MgmtRequest,
300 0 : task_tenant_id,
301 0 : None,
302 0 : "tenant_files_delete",
303 0 : false,
304 0 : async move {
305 0 : fs::remove_dir_all(tmp_path.as_path())
306 0 : .await
307 0 : .with_context(|| format!("tenant directory {:?} deletion", tmp_path))
308 0 : },
309 0 : );
310 0 : }
311 :
312 : static TENANTS: Lazy<std::sync::RwLock<TenantsMap>> =
313 2 : Lazy::new(|| std::sync::RwLock::new(TenantsMap::Initializing));
314 :
315 : /// The TenantManager is responsible for storing and mutating the collection of all tenants
316 : /// that this pageserver process has state for. Every Tenant and SecondaryTenant instance
317 : /// lives inside the TenantManager.
318 : ///
319 : /// The most important role of the TenantManager is to prevent conflicts: e.g. trying to attach
320 : /// the same tenant twice concurrently, or trying to configure the same tenant into secondary
321 : /// and attached modes concurrently.
322 : pub struct TenantManager {
323 : conf: &'static PageServerConf,
324 : // TODO: currently this is a &'static pointing to TENANTs. When we finish refactoring
325 : // out of that static variable, the TenantManager can own this.
326 : // See https://github.com/neondatabase/neon/issues/5796
327 : tenants: &'static std::sync::RwLock<TenantsMap>,
328 : resources: TenantSharedResources,
329 :
330 : // Long-running operations that happen outside of a [`Tenant`] lifetime should respect this token.
331 : // This is for edge cases like tenant deletion. In normal cases (within a Tenant lifetime),
332 : // tenants have their own cancellation tokens, which we fire individually in [`Self::shutdown`], or
333 : // when the tenant detaches.
334 : cancel: CancellationToken,
335 : }
336 :
337 0 : fn emergency_generations(
338 0 : tenant_confs: &HashMap<TenantShardId, anyhow::Result<LocationConf>>,
339 0 : ) -> HashMap<TenantShardId, TenantStartupMode> {
340 0 : tenant_confs
341 0 : .iter()
342 0 : .filter_map(|(tid, lc)| {
343 0 : let lc = match lc {
344 0 : Ok(lc) => lc,
345 0 : Err(_) => return None,
346 : };
347 : Some((
348 0 : *tid,
349 0 : match &lc.mode {
350 0 : LocationMode::Attached(alc) => {
351 0 : TenantStartupMode::Attached((alc.attach_mode, alc.generation))
352 : }
353 0 : LocationMode::Secondary(_) => TenantStartupMode::Secondary,
354 : },
355 : ))
356 0 : })
357 0 : .collect()
358 0 : }
359 :
360 0 : async fn init_load_generations(
361 0 : conf: &'static PageServerConf,
362 0 : tenant_confs: &HashMap<TenantShardId, anyhow::Result<LocationConf>>,
363 0 : resources: &TenantSharedResources,
364 0 : cancel: &CancellationToken,
365 0 : ) -> anyhow::Result<Option<HashMap<TenantShardId, TenantStartupMode>>> {
366 0 : let generations = if conf.control_plane_emergency_mode {
367 0 : error!(
368 0 : "Emergency mode! Tenants will be attached unsafely using their last known generation"
369 : );
370 0 : emergency_generations(tenant_confs)
371 0 : } else if let Some(client) = ControlPlaneClient::new(conf, cancel) {
372 0 : info!("Calling control plane API to re-attach tenants");
373 : // If we are configured to use the control plane API, then it is the source of truth for what tenants to load.
374 0 : match client.re_attach(conf).await {
375 0 : Ok(tenants) => tenants
376 0 : .into_iter()
377 0 : .flat_map(|(id, rart)| {
378 0 : TenantStartupMode::from_reattach_tenant(rart).map(|tsm| (id, tsm))
379 0 : })
380 0 : .collect(),
381 : Err(RetryForeverError::ShuttingDown) => {
382 0 : anyhow::bail!("Shut down while waiting for control plane re-attach response")
383 : }
384 : }
385 : } else {
386 0 : info!("Control plane API not configured, tenant generations are disabled");
387 0 : return Ok(None);
388 : };
389 :
390 : // The deletion queue needs to know about the startup attachment state to decide which (if any) stored
391 : // deletion list entries may still be valid. We provide that by pushing a recovery operation into
392 : // the queue. Sequential processing of te queue ensures that recovery is done before any new tenant deletions
393 : // are processed, even though we don't block on recovery completing here.
394 : //
395 : // Must only do this if remote storage is enabled, otherwise deletion queue
396 : // is not running and channel push will fail.
397 0 : if resources.remote_storage.is_some() {
398 0 : let attached_tenants = generations
399 0 : .iter()
400 0 : .flat_map(|(id, start_mode)| {
401 0 : match start_mode {
402 0 : TenantStartupMode::Attached((_mode, generation)) => Some(generation),
403 0 : TenantStartupMode::Secondary => None,
404 : }
405 0 : .map(|gen| (*id, *gen))
406 0 : })
407 0 : .collect();
408 0 : resources.deletion_queue_client.recover(attached_tenants)?;
409 0 : }
410 :
411 0 : Ok(Some(generations))
412 0 : }
413 :
414 : /// Given a directory discovered in the pageserver's tenants/ directory, attempt
415 : /// to load a tenant config from it.
416 : ///
417 : /// If file is missing, return Ok(None)
418 0 : fn load_tenant_config(
419 0 : conf: &'static PageServerConf,
420 0 : dentry: Utf8DirEntry,
421 0 : ) -> anyhow::Result<Option<(TenantShardId, anyhow::Result<LocationConf>)>> {
422 0 : let tenant_dir_path = dentry.path().to_path_buf();
423 0 : if crate::is_temporary(&tenant_dir_path) {
424 0 : info!("Found temporary tenant directory, removing: {tenant_dir_path}");
425 : // No need to use safe_remove_tenant_dir_all because this is already
426 : // a temporary path
427 0 : if let Err(e) = std::fs::remove_dir_all(&tenant_dir_path) {
428 0 : error!(
429 0 : "Failed to remove temporary directory '{}': {:?}",
430 : tenant_dir_path, e
431 : );
432 0 : }
433 0 : return Ok(None);
434 0 : }
435 :
436 : // This case happens if we crash during attachment before writing a config into the dir
437 0 : let is_empty = tenant_dir_path
438 0 : .is_empty_dir()
439 0 : .with_context(|| format!("Failed to check whether {tenant_dir_path:?} is an empty dir"))?;
440 0 : if is_empty {
441 0 : info!("removing empty tenant directory {tenant_dir_path:?}");
442 0 : if let Err(e) = std::fs::remove_dir(&tenant_dir_path) {
443 0 : error!(
444 0 : "Failed to remove empty tenant directory '{}': {e:#}",
445 : tenant_dir_path
446 : )
447 0 : }
448 0 : return Ok(None);
449 0 : }
450 :
451 0 : let tenant_shard_id = match tenant_dir_path
452 0 : .file_name()
453 0 : .unwrap_or_default()
454 0 : .parse::<TenantShardId>()
455 : {
456 0 : Ok(id) => id,
457 : Err(_) => {
458 0 : warn!("Invalid tenant path (garbage in our repo directory?): {tenant_dir_path}",);
459 0 : return Ok(None);
460 : }
461 : };
462 :
463 : // Clean up legacy `metadata` files.
464 : // Doing it here because every single tenant directory is visited here.
465 : // In any later code, there's different treatment of tenant dirs
466 : // ... depending on whether the tenant is in re-attach response or not
467 : // ... epending on whether the tenant is ignored or not
468 0 : assert_eq!(
469 0 : &conf.tenant_path(&tenant_shard_id),
470 0 : &tenant_dir_path,
471 0 : "later use of conf....path() methods would be dubious"
472 : );
473 0 : let timelines: Vec<TimelineId> = match conf.timelines_path(&tenant_shard_id).read_dir_utf8() {
474 0 : Ok(iter) => {
475 0 : let mut timelines = Vec::new();
476 0 : for res in iter {
477 0 : let p = res?;
478 0 : let Some(timeline_id) = p.file_name().parse::<TimelineId>().ok() else {
479 : // skip any entries that aren't TimelineId, such as
480 : // - *.___temp dirs
481 : // - unfinished initdb uploads (test_non_uploaded_root_timeline_is_deleted_after_restart)
482 0 : continue;
483 : };
484 0 : timelines.push(timeline_id);
485 : }
486 0 : timelines
487 : }
488 0 : Err(e) if e.kind() == std::io::ErrorKind::NotFound => vec![],
489 0 : Err(e) => return Err(anyhow::anyhow!(e)),
490 : };
491 0 : for timeline_id in timelines {
492 0 : let timeline_path = &conf.timeline_path(&tenant_shard_id, &timeline_id);
493 0 : let metadata_path = timeline_path.join(METADATA_FILE_NAME);
494 0 : match std::fs::remove_file(&metadata_path) {
495 : Ok(()) => {
496 0 : crashsafe::fsync(timeline_path)
497 0 : .context("fsync timeline dir after removing legacy metadata file")?;
498 0 : info!("removed legacy metadata file at {metadata_path}");
499 : }
500 0 : Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
501 0 : // something removed the file earlier, or it was never there
502 0 : // We don't care, this software version doesn't write it again, so, we're good.
503 0 : }
504 0 : Err(e) => {
505 0 : anyhow::bail!("remove legacy metadata file: {e}: {metadata_path}");
506 : }
507 : }
508 : }
509 :
510 0 : let tenant_ignore_mark_file = tenant_dir_path.join(IGNORED_TENANT_FILE_NAME);
511 0 : if tenant_ignore_mark_file.exists() {
512 0 : info!("Found an ignore mark file {tenant_ignore_mark_file:?}, skipping the tenant");
513 0 : return Ok(None);
514 0 : }
515 0 :
516 0 : Ok(Some((
517 0 : tenant_shard_id,
518 0 : Tenant::load_tenant_config(conf, &tenant_shard_id),
519 0 : )))
520 0 : }
521 :
522 : /// Initial stage of load: walk the local tenants directory, clean up any temp files,
523 : /// and load configurations for the tenants we found.
524 : ///
525 : /// Do this in parallel, because we expect 10k+ tenants, so serial execution can take
526 : /// seconds even on reasonably fast drives.
527 0 : async fn init_load_tenant_configs(
528 0 : conf: &'static PageServerConf,
529 0 : ) -> anyhow::Result<HashMap<TenantShardId, anyhow::Result<LocationConf>>> {
530 0 : let tenants_dir = conf.tenants_path();
531 :
532 0 : let dentries = tokio::task::spawn_blocking(move || -> anyhow::Result<Vec<Utf8DirEntry>> {
533 0 : let dir_entries = tenants_dir
534 0 : .read_dir_utf8()
535 0 : .with_context(|| format!("Failed to list tenants dir {tenants_dir:?}"))?;
536 :
537 0 : Ok(dir_entries.collect::<Result<Vec<_>, std::io::Error>>()?)
538 0 : })
539 0 : .await??;
540 :
541 0 : let mut configs = HashMap::new();
542 0 :
543 0 : let mut join_set = JoinSet::new();
544 0 : for dentry in dentries {
545 0 : join_set.spawn_blocking(move || load_tenant_config(conf, dentry));
546 0 : }
547 :
548 0 : while let Some(r) = join_set.join_next().await {
549 0 : if let Some((tenant_id, tenant_config)) = r?? {
550 0 : configs.insert(tenant_id, tenant_config);
551 0 : }
552 : }
553 :
554 0 : Ok(configs)
555 0 : }
556 :
557 : /// Initialize repositories with locally available timelines.
558 : /// Timelines that are only partially available locally (remote storage has more data than this pageserver)
559 : /// are scheduled for download and added to the tenant once download is completed.
560 0 : #[instrument(skip_all)]
561 : pub async fn init_tenant_mgr(
562 : conf: &'static PageServerConf,
563 : resources: TenantSharedResources,
564 : init_order: InitializationOrder,
565 : cancel: CancellationToken,
566 : ) -> anyhow::Result<TenantManager> {
567 : let mut tenants = BTreeMap::new();
568 :
569 : let ctx = RequestContext::todo_child(TaskKind::Startup, DownloadBehavior::Warn);
570 :
571 : // Initialize dynamic limits that depend on system resources
572 : let system_memory =
573 : sysinfo::System::new_with_specifics(sysinfo::RefreshKind::new().with_memory())
574 : .total_memory();
575 : let max_ephemeral_layer_bytes =
576 : conf.ephemeral_bytes_per_memory_kb as u64 * (system_memory / 1024);
577 : tracing::info!("Initialized ephemeral layer size limit to {max_ephemeral_layer_bytes}, for {system_memory} bytes of memory");
578 : inmemory_layer::GLOBAL_RESOURCES.max_dirty_bytes.store(
579 : max_ephemeral_layer_bytes,
580 : std::sync::atomic::Ordering::Relaxed,
581 : );
582 :
583 : // Scan local filesystem for attached tenants
584 : let tenant_configs = init_load_tenant_configs(conf).await?;
585 :
586 : // Determine which tenants are to be secondary or attached, and in which generation
587 : let tenant_modes = init_load_generations(conf, &tenant_configs, &resources, &cancel).await?;
588 :
589 : tracing::info!(
590 : "Attaching {} tenants at startup, warming up {} at a time",
591 : tenant_configs.len(),
592 : conf.concurrent_tenant_warmup.initial_permits()
593 : );
594 : TENANT.startup_scheduled.inc_by(tenant_configs.len() as u64);
595 :
596 : // Accumulate futures for writing tenant configs, so that we can execute in parallel
597 : let mut config_write_futs = Vec::new();
598 :
599 : // Update the location configs according to the re-attach response and persist them to disk
600 : tracing::info!("Updating {} location configs", tenant_configs.len());
601 : for (tenant_shard_id, location_conf) in tenant_configs {
602 : let tenant_dir_path = conf.tenant_path(&tenant_shard_id);
603 :
604 : let mut location_conf = match location_conf {
605 : Ok(l) => l,
606 : Err(e) => {
607 : warn!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Marking tenant broken, failed to {e:#}");
608 :
609 : tenants.insert(
610 : tenant_shard_id,
611 : TenantSlot::Attached(Tenant::create_broken_tenant(
612 : conf,
613 : tenant_shard_id,
614 : format!("{}", e),
615 : )),
616 : );
617 : continue;
618 : }
619 : };
620 :
621 : // FIXME: if we were attached, and get demoted to secondary on re-attach, we
622 : // don't have a place to get a config.
623 : // (https://github.com/neondatabase/neon/issues/5377)
624 : const DEFAULT_SECONDARY_CONF: SecondaryLocationConfig =
625 : SecondaryLocationConfig { warm: true };
626 :
627 : if let Some(tenant_modes) = &tenant_modes {
628 : // We have a generation map: treat it as the authority for whether
629 : // this tenant is really attached.
630 : match tenant_modes.get(&tenant_shard_id) {
631 : None => {
632 : info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Detaching tenant, control plane omitted it in re-attach response");
633 :
634 : match safe_rename_tenant_dir(&tenant_dir_path).await {
635 : Ok(tmp_path) => {
636 : spawn_background_purge(tmp_path);
637 : }
638 : Err(e) => {
639 : error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
640 : "Failed to move detached tenant directory '{tenant_dir_path}': {e:?}");
641 : }
642 : };
643 :
644 : // We deleted local content: move on to next tenant, don't try and spawn this one.
645 : continue;
646 : }
647 : Some(TenantStartupMode::Secondary) => {
648 : if !matches!(location_conf.mode, LocationMode::Secondary(_)) {
649 : location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF);
650 : }
651 : }
652 : Some(TenantStartupMode::Attached((attach_mode, generation))) => {
653 : let old_gen_higher = match &location_conf.mode {
654 : LocationMode::Attached(AttachedLocationConfig {
655 : generation: old_generation,
656 : attach_mode: _attach_mode,
657 : }) => {
658 : if old_generation > generation {
659 : Some(old_generation)
660 : } else {
661 : None
662 : }
663 : }
664 : _ => None,
665 : };
666 : if let Some(old_generation) = old_gen_higher {
667 : tracing::error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),
668 : "Control plane gave decreasing generation ({generation:?}) in re-attach response for tenant that was attached in generation {:?}, demoting to secondary",
669 : old_generation
670 : );
671 :
672 : // We cannot safely attach this tenant given a bogus generation number, but let's avoid throwing away
673 : // local disk content: demote to secondary rather than detaching.
674 : location_conf.mode = LocationMode::Secondary(DEFAULT_SECONDARY_CONF);
675 : } else {
676 : location_conf.attach_in_generation(*attach_mode, *generation);
677 : }
678 : }
679 : }
680 : } else {
681 : // Legacy mode: no generation information, any tenant present
682 : // on local disk may activate
683 : info!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Starting tenant in legacy mode, no generation",);
684 : };
685 :
686 : // Presence of a generation number implies attachment: attach the tenant
687 : // if it wasn't already, and apply the generation number.
688 0 : config_write_futs.push(async move {
689 0 : let r = Tenant::persist_tenant_config(conf, &tenant_shard_id, &location_conf).await;
690 0 : (tenant_shard_id, location_conf, r)
691 0 : });
692 : }
693 :
694 : // Execute config writes with concurrency, to avoid bottlenecking on local FS write latency
695 : tracing::info!(
696 : "Writing {} location config files...",
697 : config_write_futs.len()
698 : );
699 : let config_write_results = futures::stream::iter(config_write_futs)
700 : .buffer_unordered(16)
701 : .collect::<Vec<_>>()
702 : .await;
703 :
704 : tracing::info!(
705 : "Spawning {} tenant shard locations...",
706 : config_write_results.len()
707 : );
708 : // For those shards that have live configurations, construct `Tenant` or `SecondaryTenant` objects and start them running
709 : for (tenant_shard_id, location_conf, config_write_result) in config_write_results {
710 : // Errors writing configs are fatal
711 : config_write_result?;
712 :
713 : let tenant_dir_path = conf.tenant_path(&tenant_shard_id);
714 : let shard_identity = location_conf.shard;
715 : let slot = match location_conf.mode {
716 : LocationMode::Attached(attached_conf) => {
717 : match tenant_spawn(
718 : conf,
719 : tenant_shard_id,
720 : &tenant_dir_path,
721 : resources.clone(),
722 : AttachedTenantConf::new(location_conf.tenant_conf, attached_conf),
723 : shard_identity,
724 : Some(init_order.clone()),
725 : &TENANTS,
726 : SpawnMode::Lazy,
727 : &ctx,
728 : ) {
729 : Ok(tenant) => TenantSlot::Attached(tenant),
730 : Err(e) => {
731 : error!(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), "Failed to start tenant: {e:#}");
732 : continue;
733 : }
734 : }
735 : }
736 : LocationMode::Secondary(secondary_conf) => {
737 : info!(
738 : tenant_id = %tenant_shard_id.tenant_id,
739 : shard_id = %tenant_shard_id.shard_slug(),
740 : "Starting secondary tenant"
741 : );
742 : TenantSlot::Secondary(SecondaryTenant::new(
743 : tenant_shard_id,
744 : shard_identity,
745 : location_conf.tenant_conf,
746 : &secondary_conf,
747 : ))
748 : }
749 : };
750 :
751 : METRICS.slot_inserted(&slot);
752 : tenants.insert(tenant_shard_id, slot);
753 : }
754 :
755 : info!("Processed {} local tenants at startup", tenants.len());
756 :
757 : let mut tenants_map = TENANTS.write().unwrap();
758 : assert!(matches!(&*tenants_map, &TenantsMap::Initializing));
759 :
760 : *tenants_map = TenantsMap::Open(tenants);
761 :
762 : Ok(TenantManager {
763 : conf,
764 : tenants: &TENANTS,
765 : resources,
766 : cancel: CancellationToken::new(),
767 : })
768 : }
769 :
770 : /// Wrapper for Tenant::spawn that checks invariants before running, and inserts
771 : /// a broken tenant in the map if Tenant::spawn fails.
772 : #[allow(clippy::too_many_arguments)]
773 0 : fn tenant_spawn(
774 0 : conf: &'static PageServerConf,
775 0 : tenant_shard_id: TenantShardId,
776 0 : tenant_path: &Utf8Path,
777 0 : resources: TenantSharedResources,
778 0 : location_conf: AttachedTenantConf,
779 0 : shard_identity: ShardIdentity,
780 0 : init_order: Option<InitializationOrder>,
781 0 : tenants: &'static std::sync::RwLock<TenantsMap>,
782 0 : mode: SpawnMode,
783 0 : ctx: &RequestContext,
784 0 : ) -> anyhow::Result<Arc<Tenant>> {
785 0 : anyhow::ensure!(
786 0 : tenant_path.is_dir(),
787 0 : "Cannot load tenant from path {tenant_path:?}, it either does not exist or not a directory"
788 : );
789 0 : anyhow::ensure!(
790 0 : !crate::is_temporary(tenant_path),
791 0 : "Cannot load tenant from temporary path {tenant_path:?}"
792 : );
793 0 : anyhow::ensure!(
794 0 : !tenant_path.is_empty_dir().with_context(|| {
795 0 : format!("Failed to check whether {tenant_path:?} is an empty dir")
796 0 : })?,
797 0 : "Cannot load tenant from empty directory {tenant_path:?}"
798 : );
799 :
800 0 : let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_shard_id);
801 0 : anyhow::ensure!(
802 0 : !conf.tenant_ignore_mark_file_path(&tenant_shard_id).exists(),
803 0 : "Cannot load tenant, ignore mark found at {tenant_ignore_mark:?}"
804 : );
805 :
806 0 : let tenant = match Tenant::spawn(
807 0 : conf,
808 0 : tenant_shard_id,
809 0 : resources,
810 0 : location_conf,
811 0 : shard_identity,
812 0 : init_order,
813 0 : tenants,
814 0 : mode,
815 0 : ctx,
816 0 : ) {
817 0 : Ok(tenant) => tenant,
818 0 : Err(e) => {
819 0 : error!("Failed to spawn tenant {tenant_shard_id}, reason: {e:#}");
820 0 : Tenant::create_broken_tenant(conf, tenant_shard_id, format!("{e:#}"))
821 : }
822 : };
823 :
824 0 : Ok(tenant)
825 0 : }
826 :
827 2 : async fn shutdown_all_tenants0(tenants: &std::sync::RwLock<TenantsMap>) {
828 2 : let mut join_set = JoinSet::new();
829 0 :
830 0 : #[cfg(all(debug_assertions, not(test)))]
831 0 : {
832 0 : // Check that our metrics properly tracked the size of the tenants map. This is a convenient location to check,
833 0 : // as it happens implicitly at the end of tests etc.
834 0 : let m = tenants.read().unwrap();
835 0 : debug_assert_eq!(METRICS.slots_total(), m.len() as u64);
836 : }
837 :
838 : // Atomically, 1. create the shutdown tasks and 2. prevent creation of new tenants.
839 2 : let (total_in_progress, total_attached) = {
840 2 : let mut m = tenants.write().unwrap();
841 2 : match &mut *m {
842 : TenantsMap::Initializing => {
843 0 : *m = TenantsMap::ShuttingDown(BTreeMap::default());
844 0 : info!("tenants map is empty");
845 0 : return;
846 : }
847 2 : TenantsMap::Open(tenants) => {
848 2 : let mut shutdown_state = BTreeMap::new();
849 2 : let mut total_in_progress = 0;
850 2 : let mut total_attached = 0;
851 :
852 2 : for (tenant_shard_id, v) in std::mem::take(tenants).into_iter() {
853 2 : match v {
854 0 : TenantSlot::Attached(t) => {
855 0 : shutdown_state.insert(tenant_shard_id, TenantSlot::Attached(t.clone()));
856 0 : join_set.spawn(
857 0 : async move {
858 0 : let res = {
859 0 : let (_guard, shutdown_progress) = completion::channel();
860 0 : t.shutdown(shutdown_progress, ShutdownMode::FreezeAndFlush).await
861 : };
862 :
863 0 : if let Err(other_progress) = res {
864 : // join the another shutdown in progress
865 0 : other_progress.wait().await;
866 0 : }
867 :
868 : // we cannot afford per tenant logging here, because if s3 is degraded, we are
869 : // going to log too many lines
870 0 : debug!("tenant successfully stopped");
871 0 : }
872 0 : .instrument(info_span!("shutdown", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())),
873 : );
874 :
875 0 : total_attached += 1;
876 : }
877 0 : TenantSlot::Secondary(state) => {
878 0 : // We don't need to wait for this individually per-tenant: the
879 0 : // downloader task will be waited on eventually, this cancel
880 0 : // is just to encourage it to drop out if it is doing work
881 0 : // for this tenant right now.
882 0 : state.cancel.cancel();
883 0 :
884 0 : shutdown_state.insert(tenant_shard_id, TenantSlot::Secondary(state));
885 0 : }
886 2 : TenantSlot::InProgress(notify) => {
887 2 : // InProgress tenants are not visible in TenantsMap::ShuttingDown: we will
888 2 : // wait for their notifications to fire in this function.
889 2 : join_set.spawn(async move {
890 2 : notify.wait().await;
891 2 : });
892 2 :
893 2 : total_in_progress += 1;
894 2 : }
895 : }
896 : }
897 2 : *m = TenantsMap::ShuttingDown(shutdown_state);
898 2 : (total_in_progress, total_attached)
899 : }
900 : TenantsMap::ShuttingDown(_) => {
901 0 : error!("already shutting down, this function isn't supposed to be called more than once");
902 0 : return;
903 : }
904 : }
905 : };
906 :
907 2 : let started_at = std::time::Instant::now();
908 2 :
909 2 : info!(
910 0 : "Waiting for {} InProgress tenants and {} Attached tenants to shut down",
911 : total_in_progress, total_attached
912 : );
913 :
914 2 : let total = join_set.len();
915 2 : let mut panicked = 0;
916 2 : let mut buffering = true;
917 2 : const BUFFER_FOR: std::time::Duration = std::time::Duration::from_millis(500);
918 2 : let mut buffered = std::pin::pin!(tokio::time::sleep(BUFFER_FOR));
919 :
920 6 : while !join_set.is_empty() {
921 : tokio::select! {
922 : Some(joined) = join_set.join_next() => {
923 : match joined {
924 : Ok(()) => {},
925 : Err(join_error) if join_error.is_cancelled() => {
926 : unreachable!("we are not cancelling any of the tasks");
927 : }
928 : Err(join_error) if join_error.is_panic() => {
929 : // cannot really do anything, as this panic is likely a bug
930 : panicked += 1;
931 : }
932 : Err(join_error) => {
933 : warn!("unknown kind of JoinError: {join_error}");
934 : }
935 : }
936 : if !buffering {
937 : // buffer so that every 500ms since the first update (or starting) we'll log
938 : // how far away we are; this is because we will get SIGKILL'd at 10s, and we
939 : // are not able to log *then*.
940 : buffering = true;
941 : buffered.as_mut().reset(tokio::time::Instant::now() + BUFFER_FOR);
942 : }
943 : },
944 : _ = &mut buffered, if buffering => {
945 : buffering = false;
946 : info!(remaining = join_set.len(), total, elapsed_ms = started_at.elapsed().as_millis(), "waiting for tenants to shutdown");
947 : }
948 : }
949 : }
950 :
951 2 : if panicked > 0 {
952 0 : warn!(
953 : panicked,
954 0 : total, "observed panicks while shutting down tenants"
955 : );
956 2 : }
957 :
958 : // caller will log how long we took
959 2 : }
960 :
961 0 : #[derive(thiserror::Error, Debug)]
962 : pub(crate) enum UpsertLocationError {
963 : #[error("Bad config request: {0}")]
964 : BadRequest(anyhow::Error),
965 :
966 : #[error("Cannot change config in this state: {0}")]
967 : Unavailable(#[from] TenantMapError),
968 :
969 : #[error("Tenant is already being modified")]
970 : InProgress,
971 :
972 : #[error("Failed to flush: {0}")]
973 : Flush(anyhow::Error),
974 :
975 : #[error("Internal error: {0}")]
976 : Other(#[from] anyhow::Error),
977 : }
978 :
979 : impl TenantManager {
980 : /// Convenience function so that anyone with a TenantManager can get at the global configuration, without
981 : /// having to pass it around everywhere as a separate object.
982 0 : pub(crate) fn get_conf(&self) -> &'static PageServerConf {
983 0 : self.conf
984 0 : }
985 :
986 : /// Gets the attached tenant from the in-memory data, erroring if it's absent, in secondary mode, or currently
987 : /// undergoing a state change (i.e. slot is InProgress).
988 : ///
989 : /// The return Tenant is not guaranteed to be active: check its status after obtaing it, or
990 : /// use [`Tenant::wait_to_become_active`] before using it if you will do I/O on it.
991 0 : pub(crate) fn get_attached_tenant_shard(
992 0 : &self,
993 0 : tenant_shard_id: TenantShardId,
994 0 : ) -> Result<Arc<Tenant>, GetTenantError> {
995 0 : let locked = self.tenants.read().unwrap();
996 :
997 0 : let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)?;
998 :
999 0 : match peek_slot {
1000 0 : Some(TenantSlot::Attached(tenant)) => Ok(Arc::clone(tenant)),
1001 0 : Some(TenantSlot::InProgress(_)) => Err(GetTenantError::NotActive(tenant_shard_id)),
1002 : None | Some(TenantSlot::Secondary(_)) => {
1003 0 : Err(GetTenantError::NotFound(tenant_shard_id.tenant_id))
1004 : }
1005 : }
1006 0 : }
1007 :
1008 0 : pub(crate) fn get_secondary_tenant_shard(
1009 0 : &self,
1010 0 : tenant_shard_id: TenantShardId,
1011 0 : ) -> Option<Arc<SecondaryTenant>> {
1012 0 : let locked = self.tenants.read().unwrap();
1013 0 :
1014 0 : let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
1015 0 : .ok()
1016 0 : .flatten();
1017 :
1018 0 : match peek_slot {
1019 0 : Some(TenantSlot::Secondary(s)) => Some(s.clone()),
1020 0 : _ => None,
1021 : }
1022 0 : }
1023 :
1024 : /// Whether the `TenantManager` is responsible for the tenant shard
1025 0 : pub(crate) fn manages_tenant_shard(&self, tenant_shard_id: TenantShardId) -> bool {
1026 0 : let locked = self.tenants.read().unwrap();
1027 0 :
1028 0 : let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
1029 0 : .ok()
1030 0 : .flatten();
1031 0 :
1032 0 : peek_slot.is_some()
1033 0 : }
1034 :
1035 0 : #[instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug()))]
1036 : pub(crate) async fn upsert_location(
1037 : &self,
1038 : tenant_shard_id: TenantShardId,
1039 : new_location_config: LocationConf,
1040 : flush: Option<Duration>,
1041 : mut spawn_mode: SpawnMode,
1042 : ctx: &RequestContext,
1043 : ) -> Result<Option<Arc<Tenant>>, UpsertLocationError> {
1044 : debug_assert_current_span_has_tenant_id();
1045 : info!("configuring tenant location to state {new_location_config:?}");
1046 :
1047 : enum FastPathModified {
1048 : Attached(Arc<Tenant>),
1049 : Secondary(Arc<SecondaryTenant>),
1050 : }
1051 :
1052 : // Special case fast-path for updates to existing slots: if our upsert is only updating configuration,
1053 : // then we do not need to set the slot to InProgress, we can just call into the
1054 : // existng tenant.
1055 : let fast_path_taken = {
1056 : let locked = self.tenants.read().unwrap();
1057 : let peek_slot =
1058 : tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Write)?;
1059 : match (&new_location_config.mode, peek_slot) {
1060 : (LocationMode::Attached(attach_conf), Some(TenantSlot::Attached(tenant))) => {
1061 : match attach_conf.generation.cmp(&tenant.generation) {
1062 : Ordering::Equal => {
1063 : // A transition from Attached to Attached in the same generation, we may
1064 : // take our fast path and just provide the updated configuration
1065 : // to the tenant.
1066 : tenant.set_new_location_config(
1067 : AttachedTenantConf::try_from(new_location_config.clone())
1068 : .map_err(UpsertLocationError::BadRequest)?,
1069 : );
1070 :
1071 : Some(FastPathModified::Attached(tenant.clone()))
1072 : }
1073 : Ordering::Less => {
1074 : return Err(UpsertLocationError::BadRequest(anyhow::anyhow!(
1075 : "Generation {:?} is less than existing {:?}",
1076 : attach_conf.generation,
1077 : tenant.generation
1078 : )));
1079 : }
1080 : Ordering::Greater => {
1081 : // Generation advanced, fall through to general case of replacing `Tenant` object
1082 : None
1083 : }
1084 : }
1085 : }
1086 : (
1087 : LocationMode::Secondary(secondary_conf),
1088 : Some(TenantSlot::Secondary(secondary_tenant)),
1089 : ) => {
1090 : secondary_tenant.set_config(secondary_conf);
1091 : secondary_tenant.set_tenant_conf(&new_location_config.tenant_conf);
1092 : Some(FastPathModified::Secondary(secondary_tenant.clone()))
1093 : }
1094 : _ => {
1095 : // Not an Attached->Attached transition, fall through to general case
1096 : None
1097 : }
1098 : }
1099 : };
1100 :
1101 : // Fast-path continued: having dropped out of the self.tenants lock, do the async
1102 : // phase of writing config and/or waiting for flush, before returning.
1103 : match fast_path_taken {
1104 : Some(FastPathModified::Attached(tenant)) => {
1105 : Tenant::persist_tenant_config(self.conf, &tenant_shard_id, &new_location_config)
1106 : .await?;
1107 :
1108 : // Transition to AttachedStale means we may well hold a valid generation
1109 : // still, and have been requested to go stale as part of a migration. If
1110 : // the caller set `flush`, then flush to remote storage.
1111 : if let LocationMode::Attached(AttachedLocationConfig {
1112 : generation: _,
1113 : attach_mode: AttachmentMode::Stale,
1114 : }) = &new_location_config.mode
1115 : {
1116 : if let Some(flush_timeout) = flush {
1117 : match tokio::time::timeout(flush_timeout, tenant.flush_remote()).await {
1118 : Ok(Err(e)) => {
1119 : return Err(UpsertLocationError::Flush(e));
1120 : }
1121 : Ok(Ok(_)) => return Ok(Some(tenant)),
1122 : Err(_) => {
1123 : tracing::warn!(
1124 : timeout_ms = flush_timeout.as_millis(),
1125 : "Timed out waiting for flush to remote storage, proceeding anyway."
1126 : )
1127 : }
1128 : }
1129 : }
1130 : }
1131 :
1132 : return Ok(Some(tenant));
1133 : }
1134 : Some(FastPathModified::Secondary(_secondary_tenant)) => {
1135 : Tenant::persist_tenant_config(self.conf, &tenant_shard_id, &new_location_config)
1136 : .await?;
1137 :
1138 : return Ok(None);
1139 : }
1140 : None => {
1141 : // Proceed with the general case procedure, where we will shutdown & remove any existing
1142 : // slot contents and replace with a fresh one
1143 : }
1144 : };
1145 :
1146 : // General case for upserts to TenantsMap, excluding the case above: we will substitute an
1147 : // InProgress value to the slot while we make whatever changes are required. The state for
1148 : // the tenant is inaccessible to the outside world while we are doing this, but that is sensible:
1149 : // the state is ill-defined while we're in transition. Transitions are async, but fast: we do
1150 : // not do significant I/O, and shutdowns should be prompt via cancellation tokens.
1151 : let mut slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)
1152 0 : .map_err(|e| match e {
1153 : TenantSlotError::AlreadyExists(_, _) | TenantSlotError::NotFound(_) => {
1154 0 : unreachable!("Called with mode Any")
1155 : }
1156 0 : TenantSlotError::InProgress => UpsertLocationError::InProgress,
1157 0 : TenantSlotError::MapState(s) => UpsertLocationError::Unavailable(s),
1158 0 : })?;
1159 :
1160 : match slot_guard.get_old_value() {
1161 : Some(TenantSlot::Attached(tenant)) => {
1162 : // The case where we keep a Tenant alive was covered above in the special case
1163 : // for Attached->Attached transitions in the same generation. By this point,
1164 : // if we see an attached tenant we know it will be discarded and should be
1165 : // shut down.
1166 : let (_guard, progress) = utils::completion::channel();
1167 :
1168 : match tenant.get_attach_mode() {
1169 : AttachmentMode::Single | AttachmentMode::Multi => {
1170 : // Before we leave our state as the presumed holder of the latest generation,
1171 : // flush any outstanding deletions to reduce the risk of leaking objects.
1172 : self.resources.deletion_queue_client.flush_advisory()
1173 : }
1174 : AttachmentMode::Stale => {
1175 : // If we're stale there's not point trying to flush deletions
1176 : }
1177 : };
1178 :
1179 : info!("Shutting down attached tenant");
1180 : match tenant.shutdown(progress, ShutdownMode::Hard).await {
1181 : Ok(()) => {}
1182 : Err(barrier) => {
1183 : info!("Shutdown already in progress, waiting for it to complete");
1184 : barrier.wait().await;
1185 : }
1186 : }
1187 : slot_guard.drop_old_value().expect("We just shut it down");
1188 :
1189 : // Edge case: if we were called with SpawnMode::Create, but a Tenant already existed, then
1190 : // the caller thinks they're creating but the tenant already existed. We must switch to
1191 : // Eager mode so that when starting this Tenant we properly probe remote storage for timelines,
1192 : // rather than assuming it to be empty.
1193 : spawn_mode = SpawnMode::Eager;
1194 : }
1195 : Some(TenantSlot::Secondary(state)) => {
1196 : info!("Shutting down secondary tenant");
1197 : state.shutdown().await;
1198 : }
1199 : Some(TenantSlot::InProgress(_)) => {
1200 : // This should never happen: acquire_slot should error out
1201 : // if the contents of a slot were InProgress.
1202 : return Err(UpsertLocationError::Other(anyhow::anyhow!(
1203 : "Acquired an InProgress slot, this is a bug."
1204 : )));
1205 : }
1206 : None => {
1207 : // Slot was vacant, nothing needs shutting down.
1208 : }
1209 : }
1210 :
1211 : let tenant_path = self.conf.tenant_path(&tenant_shard_id);
1212 : let timelines_path = self.conf.timelines_path(&tenant_shard_id);
1213 :
1214 : // Directory structure is the same for attached and secondary modes:
1215 : // create it if it doesn't exist. Timeline load/creation expects the
1216 : // timelines/ subdir to already exist.
1217 : //
1218 : // Does not need to be fsync'd because local storage is just a cache.
1219 : tokio::fs::create_dir_all(&timelines_path)
1220 : .await
1221 0 : .with_context(|| format!("Creating {timelines_path}"))?;
1222 :
1223 : // Before activating either secondary or attached mode, persist the
1224 : // configuration, so that on restart we will re-attach (or re-start
1225 : // secondary) on the tenant.
1226 : Tenant::persist_tenant_config(self.conf, &tenant_shard_id, &new_location_config).await?;
1227 :
1228 : let new_slot = match &new_location_config.mode {
1229 : LocationMode::Secondary(secondary_config) => {
1230 : let shard_identity = new_location_config.shard;
1231 : TenantSlot::Secondary(SecondaryTenant::new(
1232 : tenant_shard_id,
1233 : shard_identity,
1234 : new_location_config.tenant_conf,
1235 : secondary_config,
1236 : ))
1237 : }
1238 : LocationMode::Attached(_attach_config) => {
1239 : let shard_identity = new_location_config.shard;
1240 :
1241 : // Testing hack: if we are configured with no control plane, then drop the generation
1242 : // from upserts. This enables creating generation-less tenants even though neon_local
1243 : // always uses generations when calling the location conf API.
1244 : let attached_conf = if cfg!(feature = "testing") {
1245 : let mut conf = AttachedTenantConf::try_from(new_location_config)?;
1246 : if self.conf.control_plane_api.is_none() {
1247 : conf.location.generation = Generation::none();
1248 : }
1249 : conf
1250 : } else {
1251 : AttachedTenantConf::try_from(new_location_config)?
1252 : };
1253 :
1254 : let tenant = tenant_spawn(
1255 : self.conf,
1256 : tenant_shard_id,
1257 : &tenant_path,
1258 : self.resources.clone(),
1259 : attached_conf,
1260 : shard_identity,
1261 : None,
1262 : self.tenants,
1263 : spawn_mode,
1264 : ctx,
1265 : )?;
1266 :
1267 : TenantSlot::Attached(tenant)
1268 : }
1269 : };
1270 :
1271 : let attached_tenant = if let TenantSlot::Attached(tenant) = &new_slot {
1272 : Some(tenant.clone())
1273 : } else {
1274 : None
1275 : };
1276 :
1277 : match slot_guard.upsert(new_slot) {
1278 : Err(TenantSlotUpsertError::InternalError(e)) => {
1279 : Err(UpsertLocationError::Other(anyhow::anyhow!(e)))
1280 : }
1281 : Err(TenantSlotUpsertError::MapState(e)) => Err(UpsertLocationError::Unavailable(e)),
1282 : Err(TenantSlotUpsertError::ShuttingDown((new_slot, _completion))) => {
1283 : // If we just called tenant_spawn() on a new tenant, and can't insert it into our map, then
1284 : // we must not leak it: this would violate the invariant that after shutdown_all_tenants, all tenants
1285 : // are shutdown.
1286 : //
1287 : // We must shut it down inline here.
1288 : match new_slot {
1289 : TenantSlot::InProgress(_) => {
1290 : // Unreachable because we never insert an InProgress
1291 : unreachable!()
1292 : }
1293 : TenantSlot::Attached(tenant) => {
1294 : let (_guard, progress) = utils::completion::channel();
1295 : info!("Shutting down just-spawned tenant, because tenant manager is shut down");
1296 : match tenant.shutdown(progress, ShutdownMode::Hard).await {
1297 : Ok(()) => {
1298 : info!("Finished shutting down just-spawned tenant");
1299 : }
1300 : Err(barrier) => {
1301 : info!("Shutdown already in progress, waiting for it to complete");
1302 : barrier.wait().await;
1303 : }
1304 : }
1305 : }
1306 : TenantSlot::Secondary(secondary_tenant) => {
1307 : secondary_tenant.shutdown().await;
1308 : }
1309 : }
1310 :
1311 : Err(UpsertLocationError::Unavailable(
1312 : TenantMapError::ShuttingDown,
1313 : ))
1314 : }
1315 : Ok(()) => Ok(attached_tenant),
1316 : }
1317 : }
1318 :
1319 : /// Resetting a tenant is equivalent to detaching it, then attaching it again with the same
1320 : /// LocationConf that was last used to attach it. Optionally, the local file cache may be
1321 : /// dropped before re-attaching.
1322 : ///
1323 : /// This is not part of a tenant's normal lifecycle: it is used for debug/support, in situations
1324 : /// where an issue is identified that would go away with a restart of the tenant.
1325 : ///
1326 : /// This does not have any special "force" shutdown of a tenant: it relies on the tenant's tasks
1327 : /// to respect the cancellation tokens used in normal shutdown().
1328 0 : #[instrument(skip_all, fields(tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %drop_cache))]
1329 : pub(crate) async fn reset_tenant(
1330 : &self,
1331 : tenant_shard_id: TenantShardId,
1332 : drop_cache: bool,
1333 : ctx: &RequestContext,
1334 : ) -> anyhow::Result<()> {
1335 : let mut slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?;
1336 : let Some(old_slot) = slot_guard.get_old_value() else {
1337 : anyhow::bail!("Tenant not found when trying to reset");
1338 : };
1339 :
1340 : let Some(tenant) = old_slot.get_attached() else {
1341 : slot_guard.revert();
1342 : anyhow::bail!("Tenant is not in attached state");
1343 : };
1344 :
1345 : let (_guard, progress) = utils::completion::channel();
1346 : match tenant.shutdown(progress, ShutdownMode::Hard).await {
1347 : Ok(()) => {
1348 : slot_guard.drop_old_value()?;
1349 : }
1350 : Err(_barrier) => {
1351 : slot_guard.revert();
1352 : anyhow::bail!("Cannot reset Tenant, already shutting down");
1353 : }
1354 : }
1355 :
1356 : let tenant_path = self.conf.tenant_path(&tenant_shard_id);
1357 : let timelines_path = self.conf.timelines_path(&tenant_shard_id);
1358 : let config = Tenant::load_tenant_config(self.conf, &tenant_shard_id)?;
1359 :
1360 : if drop_cache {
1361 : tracing::info!("Dropping local file cache");
1362 :
1363 : match tokio::fs::read_dir(&timelines_path).await {
1364 : Err(e) => {
1365 : tracing::warn!("Failed to list timelines while dropping cache: {}", e);
1366 : }
1367 : Ok(mut entries) => {
1368 : while let Some(entry) = entries.next_entry().await? {
1369 : tokio::fs::remove_dir_all(entry.path()).await?;
1370 : }
1371 : }
1372 : }
1373 : }
1374 :
1375 : let shard_identity = config.shard;
1376 : let tenant = tenant_spawn(
1377 : self.conf,
1378 : tenant_shard_id,
1379 : &tenant_path,
1380 : self.resources.clone(),
1381 : AttachedTenantConf::try_from(config)?,
1382 : shard_identity,
1383 : None,
1384 : self.tenants,
1385 : SpawnMode::Eager,
1386 : ctx,
1387 : )?;
1388 :
1389 : slot_guard.upsert(TenantSlot::Attached(tenant))?;
1390 :
1391 : Ok(())
1392 : }
1393 :
1394 0 : pub(crate) fn get_attached_active_tenant_shards(&self) -> Vec<Arc<Tenant>> {
1395 0 : let locked = self.tenants.read().unwrap();
1396 0 : match &*locked {
1397 0 : TenantsMap::Initializing => Vec::new(),
1398 0 : TenantsMap::Open(map) | TenantsMap::ShuttingDown(map) => map
1399 0 : .values()
1400 0 : .filter_map(|slot| {
1401 0 : slot.get_attached()
1402 0 : .and_then(|t| if t.is_active() { Some(t.clone()) } else { None })
1403 0 : })
1404 0 : .collect(),
1405 : }
1406 0 : }
1407 : // Do some synchronous work for all tenant slots in Secondary state. The provided
1408 : // callback should be small and fast, as it will be called inside the global
1409 : // TenantsMap lock.
1410 0 : pub(crate) fn foreach_secondary_tenants<F>(&self, mut func: F)
1411 0 : where
1412 0 : // TODO: let the callback return a hint to drop out of the loop early
1413 0 : F: FnMut(&TenantShardId, &Arc<SecondaryTenant>),
1414 0 : {
1415 0 : let locked = self.tenants.read().unwrap();
1416 :
1417 0 : let map = match &*locked {
1418 0 : TenantsMap::Initializing | TenantsMap::ShuttingDown(_) => return,
1419 0 : TenantsMap::Open(m) => m,
1420 : };
1421 :
1422 0 : for (tenant_id, slot) in map {
1423 0 : if let TenantSlot::Secondary(state) = slot {
1424 : // Only expose secondary tenants that are not currently shutting down
1425 0 : if !state.cancel.is_cancelled() {
1426 0 : func(tenant_id, state)
1427 0 : }
1428 0 : }
1429 : }
1430 0 : }
1431 :
1432 : /// Total list of all tenant slots: this includes attached, secondary, and InProgress.
1433 0 : pub(crate) fn list(&self) -> Vec<(TenantShardId, TenantSlot)> {
1434 0 : let locked = self.tenants.read().unwrap();
1435 0 : match &*locked {
1436 0 : TenantsMap::Initializing => Vec::new(),
1437 0 : TenantsMap::Open(map) | TenantsMap::ShuttingDown(map) => {
1438 0 : map.iter().map(|(k, v)| (*k, v.clone())).collect()
1439 : }
1440 : }
1441 0 : }
1442 :
1443 0 : pub(crate) fn get(&self, tenant_shard_id: TenantShardId) -> Option<TenantSlot> {
1444 0 : let locked = self.tenants.read().unwrap();
1445 0 : match &*locked {
1446 0 : TenantsMap::Initializing => None,
1447 0 : TenantsMap::Open(map) | TenantsMap::ShuttingDown(map) => {
1448 0 : map.get(&tenant_shard_id).cloned()
1449 : }
1450 : }
1451 0 : }
1452 :
1453 0 : pub(crate) async fn delete_tenant(
1454 0 : &self,
1455 0 : tenant_shard_id: TenantShardId,
1456 0 : activation_timeout: Duration,
1457 0 : ) -> Result<(), DeleteTenantError> {
1458 0 : super::span::debug_assert_current_span_has_tenant_id();
1459 : // We acquire a SlotGuard during this function to protect against concurrent
1460 : // changes while the ::prepare phase of DeleteTenantFlow executes, but then
1461 : // have to return the Tenant to the map while the background deletion runs.
1462 : //
1463 : // TODO: refactor deletion to happen outside the lifetime of a Tenant.
1464 : // Currently, deletion requires a reference to the tenants map in order to
1465 : // keep the Tenant in the map until deletion is complete, and then remove
1466 : // it at the end.
1467 : //
1468 : // See https://github.com/neondatabase/neon/issues/5080
1469 :
1470 0 : let slot_guard =
1471 0 : tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustExist)?;
1472 :
1473 : // unwrap is safe because we used MustExist mode when acquiring
1474 0 : let tenant = match slot_guard.get_old_value().as_ref().unwrap() {
1475 0 : TenantSlot::Attached(tenant) => tenant.clone(),
1476 : _ => {
1477 : // Express "not attached" as equivalent to "not found"
1478 0 : return Err(DeleteTenantError::NotAttached);
1479 : }
1480 : };
1481 :
1482 0 : match tenant.current_state() {
1483 : TenantState::Broken { .. } | TenantState::Stopping { .. } => {
1484 : // If deletion is already in progress, return success (the semantics of this
1485 : // function are to rerturn success afterr deletion is spawned in background).
1486 : // Otherwise fall through and let [`DeleteTenantFlow`] handle this state.
1487 0 : if DeleteTenantFlow::is_in_progress(&tenant) {
1488 : // The `delete_progress` lock is held: deletion is already happening
1489 : // in the bacckground
1490 0 : slot_guard.revert();
1491 0 : return Ok(());
1492 0 : }
1493 : }
1494 : _ => {
1495 0 : tenant
1496 0 : .wait_to_become_active(activation_timeout)
1497 0 : .await
1498 0 : .map_err(|e| match e {
1499 : GetActiveTenantError::WillNotBecomeActive(_)
1500 : | GetActiveTenantError::Broken(_) => {
1501 0 : DeleteTenantError::InvalidState(tenant.current_state())
1502 : }
1503 0 : GetActiveTenantError::Cancelled => DeleteTenantError::Cancelled,
1504 0 : GetActiveTenantError::NotFound(_) => DeleteTenantError::NotAttached,
1505 : GetActiveTenantError::WaitForActiveTimeout {
1506 0 : latest_state: _latest_state,
1507 0 : wait_time: _wait_time,
1508 0 : } => DeleteTenantError::InvalidState(tenant.current_state()),
1509 0 : })?;
1510 : }
1511 : }
1512 :
1513 0 : let result = DeleteTenantFlow::run(
1514 0 : self.conf,
1515 0 : self.resources.remote_storage.clone(),
1516 0 : &TENANTS,
1517 0 : tenant,
1518 0 : &self.cancel,
1519 0 : )
1520 0 : .await;
1521 :
1522 : // The Tenant goes back into the map in Stopping state, it will eventually be removed by DeleteTenantFLow
1523 0 : slot_guard.revert();
1524 0 : result
1525 0 : }
1526 :
1527 0 : #[instrument(skip_all, fields(tenant_id=%tenant.get_tenant_shard_id().tenant_id, shard_id=%tenant.get_tenant_shard_id().shard_slug(), new_shard_count=%new_shard_count.literal()))]
1528 : pub(crate) async fn shard_split(
1529 : &self,
1530 : tenant: Arc<Tenant>,
1531 : new_shard_count: ShardCount,
1532 : new_stripe_size: Option<ShardStripeSize>,
1533 : ctx: &RequestContext,
1534 : ) -> anyhow::Result<Vec<TenantShardId>> {
1535 : let tenant_shard_id = *tenant.get_tenant_shard_id();
1536 : let r = self
1537 : .do_shard_split(tenant, new_shard_count, new_stripe_size, ctx)
1538 : .await;
1539 : if r.is_err() {
1540 : // Shard splitting might have left the original shard in a partially shut down state (it
1541 : // stops the shard's remote timeline client). Reset it to ensure we leave things in
1542 : // a working state.
1543 : if self.get(tenant_shard_id).is_some() {
1544 : tracing::warn!("Resetting after shard split failure");
1545 : if let Err(e) = self.reset_tenant(tenant_shard_id, false, ctx).await {
1546 : // Log this error because our return value will still be the original error, not this one. This is
1547 : // a severe error: if this happens, we might be leaving behind a tenant that is not fully functional
1548 : // (e.g. has uploads disabled). We can't do anything else: if reset fails then shutting the tenant down or
1549 : // setting it broken probably won't help either.
1550 : tracing::error!("Failed to reset: {e}");
1551 : }
1552 : }
1553 : }
1554 :
1555 : r
1556 : }
1557 :
1558 0 : pub(crate) async fn do_shard_split(
1559 0 : &self,
1560 0 : tenant: Arc<Tenant>,
1561 0 : new_shard_count: ShardCount,
1562 0 : new_stripe_size: Option<ShardStripeSize>,
1563 0 : ctx: &RequestContext,
1564 0 : ) -> anyhow::Result<Vec<TenantShardId>> {
1565 0 : let tenant_shard_id = *tenant.get_tenant_shard_id();
1566 0 :
1567 0 : // Validate the incoming request
1568 0 : if new_shard_count.count() <= tenant_shard_id.shard_count.count() {
1569 0 : anyhow::bail!("Requested shard count is not an increase");
1570 0 : }
1571 0 : let expansion_factor = new_shard_count.count() / tenant_shard_id.shard_count.count();
1572 0 : if !expansion_factor.is_power_of_two() {
1573 0 : anyhow::bail!("Requested split is not a power of two");
1574 0 : }
1575 :
1576 0 : if let Some(new_stripe_size) = new_stripe_size {
1577 0 : if tenant.get_shard_stripe_size() != new_stripe_size
1578 0 : && tenant_shard_id.shard_count.count() > 1
1579 : {
1580 : // This tenant already has multiple shards, it is illegal to try and change its stripe size
1581 0 : anyhow::bail!(
1582 0 : "Shard stripe size may not be modified once tenant has multiple shards"
1583 0 : );
1584 0 : }
1585 0 : }
1586 :
1587 : // Plan: identify what the new child shards will be
1588 0 : let child_shards = tenant_shard_id.split(new_shard_count);
1589 0 : tracing::info!(
1590 0 : "Shard {} splits into: {}",
1591 0 : tenant_shard_id.to_index(),
1592 0 : child_shards
1593 0 : .iter()
1594 0 : .map(|id| format!("{}", id.to_index()))
1595 0 : .join(",")
1596 : );
1597 :
1598 0 : fail::fail_point!("shard-split-pre-prepare", |_| Err(anyhow::anyhow!(
1599 0 : "failpoint"
1600 0 : )));
1601 :
1602 0 : let parent_shard_identity = tenant.shard_identity;
1603 0 : let parent_tenant_conf = tenant.get_tenant_conf();
1604 0 : let parent_generation = tenant.generation;
1605 :
1606 : // Phase 1: Write out child shards' remote index files, in the parent tenant's current generation
1607 0 : if let Err(e) = tenant.split_prepare(&child_shards).await {
1608 : // If [`Tenant::split_prepare`] fails, we must reload the tenant, because it might
1609 : // have been left in a partially-shut-down state.
1610 0 : tracing::warn!("Failed to prepare for split: {e}, reloading Tenant before returning");
1611 0 : return Err(e);
1612 0 : }
1613 0 :
1614 0 : fail::fail_point!("shard-split-post-prepare", |_| Err(anyhow::anyhow!(
1615 0 : "failpoint"
1616 0 : )));
1617 :
1618 0 : self.resources.deletion_queue_client.flush_advisory();
1619 0 :
1620 0 : // Phase 2: Put the parent shard to InProgress and grab a reference to the parent Tenant
1621 0 : drop(tenant);
1622 0 : let mut parent_slot_guard =
1623 0 : tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?;
1624 0 : let parent = match parent_slot_guard.get_old_value() {
1625 0 : Some(TenantSlot::Attached(t)) => t,
1626 0 : Some(TenantSlot::Secondary(_)) => anyhow::bail!("Tenant location in secondary mode"),
1627 : Some(TenantSlot::InProgress(_)) => {
1628 : // tenant_map_acquire_slot never returns InProgress, if a slot was InProgress
1629 : // it would return an error.
1630 0 : unreachable!()
1631 : }
1632 : None => {
1633 : // We don't actually need the parent shard to still be attached to do our work, but it's
1634 : // a weird enough situation that the caller probably didn't want us to continue working
1635 : // if they had detached the tenant they requested the split on.
1636 0 : anyhow::bail!("Detached parent shard in the middle of split!")
1637 : }
1638 : };
1639 0 : fail::fail_point!("shard-split-pre-hardlink", |_| Err(anyhow::anyhow!(
1640 0 : "failpoint"
1641 0 : )));
1642 : // Optimization: hardlink layers from the parent into the children, so that they don't have to
1643 : // re-download & duplicate the data referenced in their initial IndexPart
1644 0 : self.shard_split_hardlink(parent, child_shards.clone())
1645 0 : .await?;
1646 0 : fail::fail_point!("shard-split-post-hardlink", |_| Err(anyhow::anyhow!(
1647 0 : "failpoint"
1648 0 : )));
1649 :
1650 : // Take a snapshot of where the parent's WAL ingest had got to: we will wait for
1651 : // child shards to reach this point.
1652 0 : let mut target_lsns = HashMap::new();
1653 0 : for timeline in parent.timelines.lock().unwrap().clone().values() {
1654 0 : target_lsns.insert(timeline.timeline_id, timeline.get_last_record_lsn());
1655 0 : }
1656 :
1657 : // TODO: we should have the parent shard stop its WAL ingest here, it's a waste of resources
1658 : // and could slow down the children trying to catch up.
1659 :
1660 : // Phase 3: Spawn the child shards
1661 0 : for child_shard in &child_shards {
1662 0 : let mut child_shard_identity = parent_shard_identity;
1663 0 : if let Some(new_stripe_size) = new_stripe_size {
1664 0 : child_shard_identity.stripe_size = new_stripe_size;
1665 0 : }
1666 0 : child_shard_identity.count = child_shard.shard_count;
1667 0 : child_shard_identity.number = child_shard.shard_number;
1668 0 :
1669 0 : let child_location_conf = LocationConf {
1670 0 : mode: LocationMode::Attached(AttachedLocationConfig {
1671 0 : generation: parent_generation,
1672 0 : attach_mode: AttachmentMode::Single,
1673 0 : }),
1674 0 : shard: child_shard_identity,
1675 0 : tenant_conf: parent_tenant_conf.clone(),
1676 0 : };
1677 0 :
1678 0 : self.upsert_location(
1679 0 : *child_shard,
1680 0 : child_location_conf,
1681 0 : None,
1682 0 : SpawnMode::Eager,
1683 0 : ctx,
1684 0 : )
1685 0 : .await?;
1686 : }
1687 :
1688 0 : fail::fail_point!("shard-split-post-child-conf", |_| Err(anyhow::anyhow!(
1689 0 : "failpoint"
1690 0 : )));
1691 :
1692 : // Phase 4: wait for child chards WAL ingest to catch up to target LSN
1693 0 : for child_shard_id in &child_shards {
1694 0 : let child_shard_id = *child_shard_id;
1695 0 : let child_shard = {
1696 0 : let locked = TENANTS.read().unwrap();
1697 0 : let peek_slot =
1698 0 : tenant_map_peek_slot(&locked, &child_shard_id, TenantSlotPeekMode::Read)?;
1699 0 : peek_slot.and_then(|s| s.get_attached()).cloned()
1700 : };
1701 0 : if let Some(t) = child_shard {
1702 : // Wait for the child shard to become active: this should be very quick because it only
1703 : // has to download the index_part that we just uploaded when creating it.
1704 0 : if let Err(e) = t.wait_to_become_active(ACTIVE_TENANT_TIMEOUT).await {
1705 : // This is not fatal: we have durably created the child shard. It just makes the
1706 : // split operation less seamless for clients, as we will may detach the parent
1707 : // shard before the child shards are fully ready to serve requests.
1708 0 : tracing::warn!("Failed to wait for shard {child_shard_id} to activate: {e}");
1709 0 : continue;
1710 0 : }
1711 0 :
1712 0 : let timelines = t.timelines.lock().unwrap().clone();
1713 0 : for timeline in timelines.values() {
1714 0 : let Some(target_lsn) = target_lsns.get(&timeline.timeline_id) else {
1715 0 : continue;
1716 : };
1717 :
1718 0 : tracing::info!(
1719 0 : "Waiting for child shard {}/{} to reach target lsn {}...",
1720 0 : child_shard_id,
1721 0 : timeline.timeline_id,
1722 : target_lsn
1723 : );
1724 :
1725 0 : fail::fail_point!("shard-split-lsn-wait", |_| Err(anyhow::anyhow!(
1726 0 : "failpoint"
1727 0 : )));
1728 0 : if let Err(e) = timeline
1729 0 : .wait_lsn(
1730 0 : *target_lsn,
1731 0 : crate::tenant::timeline::WaitLsnWaiter::Tenant,
1732 0 : ctx,
1733 0 : )
1734 0 : .await
1735 : {
1736 : // Failure here might mean shutdown, in any case this part is an optimization
1737 : // and we shouldn't hold up the split operation.
1738 0 : tracing::warn!(
1739 0 : "Failed to wait for timeline {} to reach lsn {target_lsn}: {e}",
1740 0 : timeline.timeline_id
1741 : );
1742 : } else {
1743 0 : tracing::info!(
1744 0 : "Child shard {}/{} reached target lsn {}",
1745 0 : child_shard_id,
1746 0 : timeline.timeline_id,
1747 : target_lsn
1748 : );
1749 : }
1750 : }
1751 0 : }
1752 : }
1753 :
1754 : // Phase 5: Shut down the parent shard, and erase it from disk
1755 0 : let (_guard, progress) = completion::channel();
1756 0 : match parent.shutdown(progress, ShutdownMode::Hard).await {
1757 0 : Ok(()) => {}
1758 0 : Err(other) => {
1759 0 : other.wait().await;
1760 : }
1761 : }
1762 0 : let local_tenant_directory = self.conf.tenant_path(&tenant_shard_id);
1763 0 : let tmp_path = safe_rename_tenant_dir(&local_tenant_directory)
1764 0 : .await
1765 0 : .with_context(|| format!("local tenant directory {local_tenant_directory:?} rename"))?;
1766 0 : spawn_background_purge(tmp_path);
1767 0 :
1768 0 : fail::fail_point!("shard-split-pre-finish", |_| Err(anyhow::anyhow!(
1769 0 : "failpoint"
1770 0 : )));
1771 :
1772 0 : parent_slot_guard.drop_old_value()?;
1773 :
1774 : // Phase 6: Release the InProgress on the parent shard
1775 0 : drop(parent_slot_guard);
1776 0 :
1777 0 : Ok(child_shards)
1778 0 : }
1779 :
1780 : /// Part of [`Self::shard_split`]: hard link parent shard layers into child shards, as an optimization
1781 : /// to avoid the children downloading them again.
1782 : ///
1783 : /// For each resident layer in the parent shard, we will hard link it into all of the child shards.
1784 0 : async fn shard_split_hardlink(
1785 0 : &self,
1786 0 : parent_shard: &Tenant,
1787 0 : child_shards: Vec<TenantShardId>,
1788 0 : ) -> anyhow::Result<()> {
1789 0 : debug_assert_current_span_has_tenant_id();
1790 0 :
1791 0 : let parent_path = self.conf.tenant_path(parent_shard.get_tenant_shard_id());
1792 0 : let (parent_timelines, parent_layers) = {
1793 0 : let mut parent_layers = Vec::new();
1794 0 : let timelines = parent_shard.timelines.lock().unwrap().clone();
1795 0 : let parent_timelines = timelines.keys().cloned().collect::<Vec<_>>();
1796 0 : for timeline in timelines.values() {
1797 0 : let timeline_layers = timeline
1798 0 : .layers
1799 0 : .read()
1800 0 : .await
1801 0 : .likely_resident_layers()
1802 0 : .collect::<Vec<_>>();
1803 :
1804 0 : for layer in timeline_layers {
1805 0 : let relative_path = layer
1806 0 : .local_path()
1807 0 : .strip_prefix(&parent_path)
1808 0 : .context("Removing prefix from parent layer path")?;
1809 0 : parent_layers.push(relative_path.to_owned());
1810 : }
1811 : }
1812 0 : debug_assert!(
1813 0 : !parent_layers.is_empty(),
1814 0 : "shutdown cannot empty the layermap"
1815 : );
1816 0 : (parent_timelines, parent_layers)
1817 0 : };
1818 0 :
1819 0 : let mut child_prefixes = Vec::new();
1820 0 : let mut create_dirs = Vec::new();
1821 :
1822 0 : for child in child_shards {
1823 0 : let child_prefix = self.conf.tenant_path(&child);
1824 0 : create_dirs.push(child_prefix.clone());
1825 0 : create_dirs.extend(
1826 0 : parent_timelines
1827 0 : .iter()
1828 0 : .map(|t| self.conf.timeline_path(&child, t)),
1829 0 : );
1830 0 :
1831 0 : child_prefixes.push(child_prefix);
1832 0 : }
1833 :
1834 : // Since we will do a large number of small filesystem metadata operations, batch them into
1835 : // spawn_blocking calls rather than doing each one as a tokio::fs round-trip.
1836 0 : let jh = tokio::task::spawn_blocking(move || -> anyhow::Result<usize> {
1837 0 : for dir in &create_dirs {
1838 0 : if let Err(e) = std::fs::create_dir_all(dir) {
1839 : // Ignore AlreadyExists errors, drop out on all other errors
1840 0 : match e.kind() {
1841 0 : std::io::ErrorKind::AlreadyExists => {}
1842 : _ => {
1843 0 : return Err(anyhow::anyhow!(e).context(format!("Creating {dir}")));
1844 : }
1845 : }
1846 0 : }
1847 : }
1848 :
1849 0 : for child_prefix in child_prefixes {
1850 0 : for relative_layer in &parent_layers {
1851 0 : let parent_path = parent_path.join(relative_layer);
1852 0 : let child_path = child_prefix.join(relative_layer);
1853 0 : if let Err(e) = std::fs::hard_link(&parent_path, &child_path) {
1854 0 : match e.kind() {
1855 0 : std::io::ErrorKind::AlreadyExists => {}
1856 : std::io::ErrorKind::NotFound => {
1857 0 : tracing::info!(
1858 0 : "Layer {} not found during hard-linking, evicted during split?",
1859 : relative_layer
1860 : );
1861 : }
1862 : _ => {
1863 0 : return Err(anyhow::anyhow!(e).context(format!(
1864 0 : "Hard linking {relative_layer} into {child_prefix}"
1865 0 : )))
1866 : }
1867 : }
1868 0 : }
1869 : }
1870 : }
1871 :
1872 : // Durability is not required for correctness, but if we crashed during split and
1873 : // then came restarted with empty timeline dirs, it would be very inefficient to
1874 : // re-populate from remote storage.
1875 0 : for dir in create_dirs {
1876 0 : if let Err(e) = crashsafe::fsync(&dir) {
1877 : // Something removed a newly created timeline dir out from underneath us? Extremely
1878 : // unexpected, but not worth panic'ing over as this whole function is just an
1879 : // optimization.
1880 0 : tracing::warn!("Failed to fsync directory {dir}: {e}")
1881 0 : }
1882 : }
1883 :
1884 0 : Ok(parent_layers.len())
1885 0 : });
1886 0 :
1887 0 : match jh.await {
1888 0 : Ok(Ok(layer_count)) => {
1889 0 : tracing::info!(count = layer_count, "Hard linked layers into child shards");
1890 : }
1891 0 : Ok(Err(e)) => {
1892 0 : // This is an optimization, so we tolerate failure.
1893 0 : tracing::warn!("Error hard-linking layers, proceeding anyway: {e}")
1894 : }
1895 0 : Err(e) => {
1896 0 : // This is something totally unexpected like a panic, so bail out.
1897 0 : anyhow::bail!("Error joining hard linking task: {e}");
1898 : }
1899 : }
1900 :
1901 0 : Ok(())
1902 0 : }
1903 :
1904 : ///
1905 : /// Shut down all tenants. This runs as part of pageserver shutdown.
1906 : ///
1907 : /// NB: We leave the tenants in the map, so that they remain accessible through
1908 : /// the management API until we shut it down. If we removed the shut-down tenants
1909 : /// from the tenants map, the management API would return 404 for these tenants,
1910 : /// because TenantsMap::get() now returns `None`.
1911 : /// That could be easily misinterpreted by control plane, the consumer of the
1912 : /// management API. For example, it could attach the tenant on a different pageserver.
1913 : /// We would then be in split-brain once this pageserver restarts.
1914 0 : #[instrument(skip_all)]
1915 : pub(crate) async fn shutdown(&self) {
1916 : self.cancel.cancel();
1917 :
1918 : shutdown_all_tenants0(self.tenants).await
1919 : }
1920 :
1921 0 : pub(crate) async fn detach_tenant(
1922 0 : &self,
1923 0 : conf: &'static PageServerConf,
1924 0 : tenant_shard_id: TenantShardId,
1925 0 : detach_ignored: bool,
1926 0 : deletion_queue_client: &DeletionQueueClient,
1927 0 : ) -> Result<(), TenantStateError> {
1928 0 : let tmp_path = self
1929 0 : .detach_tenant0(
1930 0 : conf,
1931 0 : &TENANTS,
1932 0 : tenant_shard_id,
1933 0 : detach_ignored,
1934 0 : deletion_queue_client,
1935 0 : )
1936 0 : .await?;
1937 0 : spawn_background_purge(tmp_path);
1938 0 :
1939 0 : Ok(())
1940 0 : }
1941 :
1942 0 : async fn detach_tenant0(
1943 0 : &self,
1944 0 : conf: &'static PageServerConf,
1945 0 : tenants: &std::sync::RwLock<TenantsMap>,
1946 0 : tenant_shard_id: TenantShardId,
1947 0 : detach_ignored: bool,
1948 0 : deletion_queue_client: &DeletionQueueClient,
1949 0 : ) -> Result<Utf8PathBuf, TenantStateError> {
1950 0 : let tenant_dir_rename_operation = |tenant_id_to_clean: TenantShardId| async move {
1951 0 : let local_tenant_directory = conf.tenant_path(&tenant_id_to_clean);
1952 0 : safe_rename_tenant_dir(&local_tenant_directory)
1953 0 : .await
1954 0 : .with_context(|| {
1955 0 : format!("local tenant directory {local_tenant_directory:?} rename")
1956 0 : })
1957 0 : };
1958 :
1959 0 : let removal_result = remove_tenant_from_memory(
1960 0 : tenants,
1961 0 : tenant_shard_id,
1962 0 : tenant_dir_rename_operation(tenant_shard_id),
1963 0 : )
1964 0 : .await;
1965 :
1966 : // Flush pending deletions, so that they have a good chance of passing validation
1967 : // before this tenant is potentially re-attached elsewhere.
1968 0 : deletion_queue_client.flush_advisory();
1969 0 :
1970 0 : // Ignored tenants are not present in memory and will bail the removal from memory operation.
1971 0 : // Before returning the error, check for ignored tenant removal case — we only need to clean its local files then.
1972 0 : if detach_ignored
1973 0 : && matches!(
1974 0 : removal_result,
1975 : Err(TenantStateError::SlotError(TenantSlotError::NotFound(_)))
1976 : )
1977 : {
1978 0 : let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_shard_id);
1979 0 : if tenant_ignore_mark.exists() {
1980 0 : info!("Detaching an ignored tenant");
1981 0 : let tmp_path = tenant_dir_rename_operation(tenant_shard_id)
1982 0 : .await
1983 0 : .with_context(|| {
1984 0 : format!("Ignored tenant {tenant_shard_id} local directory rename")
1985 0 : })?;
1986 0 : return Ok(tmp_path);
1987 0 : }
1988 0 : }
1989 :
1990 0 : removal_result
1991 0 : }
1992 :
1993 0 : pub(crate) fn list_tenants(
1994 0 : &self,
1995 0 : ) -> Result<Vec<(TenantShardId, TenantState, Generation)>, TenantMapListError> {
1996 0 : let tenants = TENANTS.read().unwrap();
1997 0 : let m = match &*tenants {
1998 0 : TenantsMap::Initializing => return Err(TenantMapListError::Initializing),
1999 0 : TenantsMap::Open(m) | TenantsMap::ShuttingDown(m) => m,
2000 0 : };
2001 0 : Ok(m.iter()
2002 0 : .filter_map(|(id, tenant)| match tenant {
2003 0 : TenantSlot::Attached(tenant) => {
2004 0 : Some((*id, tenant.current_state(), tenant.generation()))
2005 : }
2006 0 : TenantSlot::Secondary(_) => None,
2007 0 : TenantSlot::InProgress(_) => None,
2008 0 : })
2009 0 : .collect())
2010 0 : }
2011 :
2012 : /// Completes an earlier prepared timeline detach ancestor.
2013 0 : pub(crate) async fn complete_detaching_timeline_ancestor(
2014 0 : &self,
2015 0 : tenant_shard_id: TenantShardId,
2016 0 : timeline_id: TimelineId,
2017 0 : prepared: PreparedTimelineDetach,
2018 0 : ctx: &RequestContext,
2019 0 : ) -> Result<Vec<TimelineId>, anyhow::Error> {
2020 : struct RevertOnDropSlot(Option<SlotGuard>);
2021 :
2022 : impl Drop for RevertOnDropSlot {
2023 0 : fn drop(&mut self) {
2024 0 : if let Some(taken) = self.0.take() {
2025 0 : taken.revert();
2026 0 : }
2027 0 : }
2028 : }
2029 :
2030 : impl RevertOnDropSlot {
2031 0 : fn into_inner(mut self) -> SlotGuard {
2032 0 : self.0.take().unwrap()
2033 0 : }
2034 : }
2035 :
2036 : impl std::ops::Deref for RevertOnDropSlot {
2037 : type Target = SlotGuard;
2038 :
2039 0 : fn deref(&self) -> &Self::Target {
2040 0 : self.0.as_ref().unwrap()
2041 0 : }
2042 : }
2043 :
2044 0 : let slot_guard = tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::Any)?;
2045 0 : let slot_guard = RevertOnDropSlot(Some(slot_guard));
2046 :
2047 0 : let tenant = {
2048 0 : let Some(old_slot) = slot_guard.get_old_value() else {
2049 0 : anyhow::bail!(
2050 0 : "Tenant not found when trying to complete detaching timeline ancestor"
2051 0 : );
2052 : };
2053 :
2054 0 : let Some(tenant) = old_slot.get_attached() else {
2055 0 : anyhow::bail!("Tenant is not in attached state");
2056 : };
2057 :
2058 0 : if !tenant.is_active() {
2059 0 : anyhow::bail!("Tenant is not active");
2060 0 : }
2061 0 :
2062 0 : tenant.clone()
2063 : };
2064 :
2065 0 : let timeline = tenant.get_timeline(timeline_id, true)?;
2066 :
2067 0 : let reparented = timeline
2068 0 : .complete_detaching_timeline_ancestor(&tenant, prepared, ctx)
2069 0 : .await?;
2070 :
2071 0 : let mut slot_guard = slot_guard.into_inner();
2072 0 :
2073 0 : let (_guard, progress) = utils::completion::channel();
2074 0 : match tenant.shutdown(progress, ShutdownMode::Hard).await {
2075 : Ok(()) => {
2076 0 : slot_guard.drop_old_value()?;
2077 : }
2078 0 : Err(_barrier) => {
2079 0 : slot_guard.revert();
2080 0 : // this really should not happen, at all, unless shutdown was already going?
2081 0 : anyhow::bail!("Cannot restart Tenant, already shutting down");
2082 : }
2083 : }
2084 :
2085 0 : let tenant_path = self.conf.tenant_path(&tenant_shard_id);
2086 0 : let config = Tenant::load_tenant_config(self.conf, &tenant_shard_id)?;
2087 :
2088 0 : let shard_identity = config.shard;
2089 0 : let tenant = tenant_spawn(
2090 0 : self.conf,
2091 0 : tenant_shard_id,
2092 0 : &tenant_path,
2093 0 : self.resources.clone(),
2094 0 : AttachedTenantConf::try_from(config)?,
2095 0 : shard_identity,
2096 0 : None,
2097 0 : self.tenants,
2098 0 : SpawnMode::Eager,
2099 0 : ctx,
2100 0 : )?;
2101 :
2102 0 : slot_guard.upsert(TenantSlot::Attached(tenant))?;
2103 :
2104 0 : Ok(reparented)
2105 0 : }
2106 : }
2107 :
2108 0 : #[derive(Debug, thiserror::Error)]
2109 : pub(crate) enum GetTenantError {
2110 : /// NotFound is a TenantId rather than TenantShardId, because this error type is used from
2111 : /// getters that use a TenantId and a ShardSelector, not just getters that target a specific shard.
2112 : #[error("Tenant {0} not found")]
2113 : NotFound(TenantId),
2114 :
2115 : #[error("Tenant {0} is not active")]
2116 : NotActive(TenantShardId),
2117 :
2118 : // Initializing or shutting down: cannot authoritatively say whether we have this tenant
2119 : #[error("Tenant map is not available: {0}")]
2120 : MapState(#[from] TenantMapError),
2121 : }
2122 :
2123 0 : #[derive(thiserror::Error, Debug)]
2124 : pub(crate) enum GetActiveTenantError {
2125 : /// We may time out either while TenantSlot is InProgress, or while the Tenant
2126 : /// is in a non-Active state
2127 : #[error(
2128 : "Timed out waiting {wait_time:?} for tenant active state. Latest state: {latest_state:?}"
2129 : )]
2130 : WaitForActiveTimeout {
2131 : latest_state: Option<TenantState>,
2132 : wait_time: Duration,
2133 : },
2134 :
2135 : /// The TenantSlot is absent, or in secondary mode
2136 : #[error(transparent)]
2137 : NotFound(#[from] GetTenantError),
2138 :
2139 : /// Cancellation token fired while we were waiting
2140 : #[error("cancelled")]
2141 : Cancelled,
2142 :
2143 : /// Tenant exists, but is in a state that cannot become active (e.g. Stopping, Broken)
2144 : #[error("will not become active. Current state: {0}")]
2145 : WillNotBecomeActive(TenantState),
2146 :
2147 : /// Broken is logically a subset of WillNotBecomeActive, but a distinct error is useful as
2148 : /// WillNotBecomeActive is a permitted error under some circumstances, whereas broken should
2149 : /// never happen.
2150 : #[error("Tenant is broken: {0}")]
2151 : Broken(String),
2152 : }
2153 :
2154 : /// Get a [`Tenant`] in its active state. If the tenant_id is currently in [`TenantSlot::InProgress`]
2155 : /// state, then wait for up to `timeout`. If the [`Tenant`] is not currently in [`TenantState::Active`],
2156 : /// then wait for up to `timeout` (minus however long we waited for the slot).
2157 0 : pub(crate) async fn get_active_tenant_with_timeout(
2158 0 : tenant_id: TenantId,
2159 0 : shard_selector: ShardSelector,
2160 0 : timeout: Duration,
2161 0 : cancel: &CancellationToken,
2162 0 : ) -> Result<Arc<Tenant>, GetActiveTenantError> {
2163 0 : enum WaitFor {
2164 0 : Barrier(utils::completion::Barrier),
2165 0 : Tenant(Arc<Tenant>),
2166 0 : }
2167 0 :
2168 0 : let wait_start = Instant::now();
2169 0 : let deadline = wait_start + timeout;
2170 :
2171 0 : let (wait_for, tenant_shard_id) = {
2172 0 : let locked = TENANTS.read().unwrap();
2173 :
2174 : // Resolve TenantId to TenantShardId
2175 0 : let tenant_shard_id = locked
2176 0 : .resolve_attached_shard(&tenant_id, shard_selector)
2177 0 : .ok_or(GetActiveTenantError::NotFound(GetTenantError::NotFound(
2178 0 : tenant_id,
2179 0 : )))?;
2180 :
2181 0 : let peek_slot = tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
2182 0 : .map_err(GetTenantError::MapState)?;
2183 0 : match peek_slot {
2184 0 : Some(TenantSlot::Attached(tenant)) => {
2185 0 : match tenant.current_state() {
2186 : TenantState::Active => {
2187 : // Fast path: we don't need to do any async waiting.
2188 0 : return Ok(tenant.clone());
2189 : }
2190 : _ => {
2191 0 : tenant.activate_now();
2192 0 : (WaitFor::Tenant(tenant.clone()), tenant_shard_id)
2193 : }
2194 : }
2195 : }
2196 : Some(TenantSlot::Secondary(_)) => {
2197 0 : return Err(GetActiveTenantError::NotFound(GetTenantError::NotActive(
2198 0 : tenant_shard_id,
2199 0 : )))
2200 : }
2201 0 : Some(TenantSlot::InProgress(barrier)) => {
2202 0 : (WaitFor::Barrier(barrier.clone()), tenant_shard_id)
2203 : }
2204 : None => {
2205 0 : return Err(GetActiveTenantError::NotFound(GetTenantError::NotFound(
2206 0 : tenant_id,
2207 0 : )))
2208 : }
2209 : }
2210 : };
2211 :
2212 0 : let tenant = match wait_for {
2213 0 : WaitFor::Barrier(barrier) => {
2214 0 : tracing::debug!("Waiting for tenant InProgress state to pass...");
2215 0 : timeout_cancellable(
2216 0 : deadline.duration_since(Instant::now()),
2217 0 : cancel,
2218 0 : barrier.wait(),
2219 0 : )
2220 0 : .await
2221 0 : .map_err(|e| match e {
2222 0 : TimeoutCancellableError::Timeout => GetActiveTenantError::WaitForActiveTimeout {
2223 0 : latest_state: None,
2224 0 : wait_time: wait_start.elapsed(),
2225 0 : },
2226 0 : TimeoutCancellableError::Cancelled => GetActiveTenantError::Cancelled,
2227 0 : })?;
2228 : {
2229 0 : let locked = TENANTS.read().unwrap();
2230 0 : let peek_slot =
2231 0 : tenant_map_peek_slot(&locked, &tenant_shard_id, TenantSlotPeekMode::Read)
2232 0 : .map_err(GetTenantError::MapState)?;
2233 0 : match peek_slot {
2234 0 : Some(TenantSlot::Attached(tenant)) => tenant.clone(),
2235 : _ => {
2236 0 : return Err(GetActiveTenantError::NotFound(GetTenantError::NotActive(
2237 0 : tenant_shard_id,
2238 0 : )))
2239 : }
2240 : }
2241 : }
2242 : }
2243 0 : WaitFor::Tenant(tenant) => tenant,
2244 : };
2245 :
2246 0 : tracing::debug!("Waiting for tenant to enter active state...");
2247 0 : tenant
2248 0 : .wait_to_become_active(deadline.duration_since(Instant::now()))
2249 0 : .await?;
2250 0 : Ok(tenant)
2251 0 : }
2252 :
2253 0 : #[derive(Debug, thiserror::Error)]
2254 : pub(crate) enum DeleteTimelineError {
2255 : #[error("Tenant {0}")]
2256 : Tenant(#[from] GetTenantError),
2257 :
2258 : #[error("Timeline {0}")]
2259 : Timeline(#[from] crate::tenant::DeleteTimelineError),
2260 : }
2261 :
2262 0 : #[derive(Debug, thiserror::Error)]
2263 : pub(crate) enum TenantStateError {
2264 : #[error("Tenant {0} is stopping")]
2265 : IsStopping(TenantShardId),
2266 : #[error(transparent)]
2267 : SlotError(#[from] TenantSlotError),
2268 : #[error(transparent)]
2269 : SlotUpsertError(#[from] TenantSlotUpsertError),
2270 : #[error(transparent)]
2271 : Other(#[from] anyhow::Error),
2272 : }
2273 :
2274 0 : pub(crate) async fn load_tenant(
2275 0 : conf: &'static PageServerConf,
2276 0 : tenant_id: TenantId,
2277 0 : generation: Generation,
2278 0 : broker_client: storage_broker::BrokerClientChannel,
2279 0 : remote_storage: Option<GenericRemoteStorage>,
2280 0 : deletion_queue_client: DeletionQueueClient,
2281 0 : ctx: &RequestContext,
2282 0 : ) -> Result<(), TenantMapInsertError> {
2283 0 : // This is a legacy API (replaced by `/location_conf`). It does not support sharding
2284 0 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
2285 :
2286 0 : let slot_guard =
2287 0 : tenant_map_acquire_slot(&tenant_shard_id, TenantSlotAcquireMode::MustNotExist)?;
2288 0 : let tenant_path = conf.tenant_path(&tenant_shard_id);
2289 0 :
2290 0 : let tenant_ignore_mark = conf.tenant_ignore_mark_file_path(&tenant_shard_id);
2291 0 : if tenant_ignore_mark.exists() {
2292 0 : std::fs::remove_file(&tenant_ignore_mark).with_context(|| {
2293 0 : format!(
2294 0 : "Failed to remove tenant ignore mark {tenant_ignore_mark:?} during tenant loading"
2295 0 : )
2296 0 : })?;
2297 0 : }
2298 :
2299 0 : let resources = TenantSharedResources {
2300 0 : broker_client,
2301 0 : remote_storage,
2302 0 : deletion_queue_client,
2303 0 : };
2304 :
2305 0 : let mut location_conf =
2306 0 : Tenant::load_tenant_config(conf, &tenant_shard_id).map_err(TenantMapInsertError::Other)?;
2307 0 : location_conf.attach_in_generation(AttachmentMode::Single, generation);
2308 0 :
2309 0 : Tenant::persist_tenant_config(conf, &tenant_shard_id, &location_conf).await?;
2310 :
2311 0 : let shard_identity = location_conf.shard;
2312 0 : let new_tenant = tenant_spawn(
2313 0 : conf,
2314 0 : tenant_shard_id,
2315 0 : &tenant_path,
2316 0 : resources,
2317 0 : AttachedTenantConf::try_from(location_conf)?,
2318 0 : shard_identity,
2319 0 : None,
2320 0 : &TENANTS,
2321 0 : SpawnMode::Eager,
2322 0 : ctx,
2323 0 : )
2324 0 : .with_context(|| format!("Failed to schedule tenant processing in path {tenant_path:?}"))?;
2325 :
2326 0 : slot_guard.upsert(TenantSlot::Attached(new_tenant))?;
2327 0 : Ok(())
2328 0 : }
2329 :
2330 0 : pub(crate) async fn ignore_tenant(
2331 0 : conf: &'static PageServerConf,
2332 0 : tenant_id: TenantId,
2333 0 : ) -> Result<(), TenantStateError> {
2334 0 : ignore_tenant0(conf, &TENANTS, tenant_id).await
2335 0 : }
2336 :
2337 0 : #[instrument(skip_all, fields(shard_id))]
2338 : async fn ignore_tenant0(
2339 : conf: &'static PageServerConf,
2340 : tenants: &std::sync::RwLock<TenantsMap>,
2341 : tenant_id: TenantId,
2342 : ) -> Result<(), TenantStateError> {
2343 : // This is a legacy API (replaced by `/location_conf`). It does not support sharding
2344 : let tenant_shard_id = TenantShardId::unsharded(tenant_id);
2345 : tracing::Span::current().record(
2346 : "shard_id",
2347 : tracing::field::display(tenant_shard_id.shard_slug()),
2348 : );
2349 :
2350 0 : remove_tenant_from_memory(tenants, tenant_shard_id, async {
2351 0 : let ignore_mark_file = conf.tenant_ignore_mark_file_path(&tenant_shard_id);
2352 0 : fs::File::create(&ignore_mark_file)
2353 0 : .await
2354 0 : .context("Failed to create ignore mark file")
2355 0 : .and_then(|_| {
2356 0 : crashsafe::fsync_file_and_parent(&ignore_mark_file)
2357 0 : .context("Failed to fsync ignore mark file")
2358 0 : })
2359 0 : .with_context(|| format!("Failed to crate ignore mark for tenant {tenant_shard_id}"))?;
2360 0 : Ok(())
2361 0 : })
2362 : .await
2363 : }
2364 :
2365 0 : #[derive(Debug, thiserror::Error)]
2366 : pub(crate) enum TenantMapListError {
2367 : #[error("tenant map is still initiailizing")]
2368 : Initializing,
2369 : }
2370 :
2371 0 : #[derive(Debug, thiserror::Error)]
2372 : pub(crate) enum TenantMapInsertError {
2373 : #[error(transparent)]
2374 : SlotError(#[from] TenantSlotError),
2375 : #[error(transparent)]
2376 : SlotUpsertError(#[from] TenantSlotUpsertError),
2377 : #[error(transparent)]
2378 : Other(#[from] anyhow::Error),
2379 : }
2380 :
2381 : /// Superset of TenantMapError: issues that can occur when acquiring a slot
2382 : /// for a particular tenant ID.
2383 0 : #[derive(Debug, thiserror::Error)]
2384 : pub(crate) enum TenantSlotError {
2385 : /// When acquiring a slot with the expectation that the tenant already exists.
2386 : #[error("Tenant {0} not found")]
2387 : NotFound(TenantShardId),
2388 :
2389 : /// When acquiring a slot with the expectation that the tenant does not already exist.
2390 : #[error("tenant {0} already exists, state: {1:?}")]
2391 : AlreadyExists(TenantShardId, TenantState),
2392 :
2393 : // Tried to read a slot that is currently being mutated by another administrative
2394 : // operation.
2395 : #[error("tenant has a state change in progress, try again later")]
2396 : InProgress,
2397 :
2398 : #[error(transparent)]
2399 : MapState(#[from] TenantMapError),
2400 : }
2401 :
2402 : /// Superset of TenantMapError: issues that can occur when using a SlotGuard
2403 : /// to insert a new value.
2404 0 : #[derive(thiserror::Error)]
2405 : pub(crate) enum TenantSlotUpsertError {
2406 : /// An error where the slot is in an unexpected state, indicating a code bug
2407 : #[error("Internal error updating Tenant")]
2408 : InternalError(Cow<'static, str>),
2409 :
2410 : #[error(transparent)]
2411 : MapState(TenantMapError),
2412 :
2413 : // If we encounter TenantManager shutdown during upsert, we must carry the Completion
2414 : // from the SlotGuard, so that the caller can hold it while they clean up: otherwise
2415 : // TenantManager shutdown might race ahead before we're done cleaning up any Tenant that
2416 : // was protected by the SlotGuard.
2417 : #[error("Shutting down")]
2418 : ShuttingDown((TenantSlot, utils::completion::Completion)),
2419 : }
2420 :
2421 : impl std::fmt::Debug for TenantSlotUpsertError {
2422 0 : fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
2423 0 : match self {
2424 0 : Self::InternalError(reason) => write!(f, "Internal Error {reason}"),
2425 0 : Self::MapState(map_error) => write!(f, "Tenant map state: {map_error:?}"),
2426 0 : Self::ShuttingDown(_completion) => write!(f, "Tenant map shutting down"),
2427 : }
2428 0 : }
2429 : }
2430 :
2431 0 : #[derive(Debug, thiserror::Error)]
2432 : enum TenantSlotDropError {
2433 : /// It is only legal to drop a TenantSlot if its contents are fully shut down
2434 : #[error("Tenant was not shut down")]
2435 : NotShutdown,
2436 : }
2437 :
2438 : /// Errors that can happen any time we are walking the tenant map to try and acquire
2439 : /// the TenantSlot for a particular tenant.
2440 0 : #[derive(Debug, thiserror::Error)]
2441 : pub enum TenantMapError {
2442 : // Tried to read while initializing
2443 : #[error("tenant map is still initializing")]
2444 : StillInitializing,
2445 :
2446 : // Tried to read while shutting down
2447 : #[error("tenant map is shutting down")]
2448 : ShuttingDown,
2449 : }
2450 :
2451 : /// Guards a particular tenant_id's content in the TenantsMap. While this
2452 : /// structure exists, the TenantsMap will contain a [`TenantSlot::InProgress`]
2453 : /// for this tenant, which acts as a marker for any operations targeting
2454 : /// this tenant to retry later, or wait for the InProgress state to end.
2455 : ///
2456 : /// This structure enforces the important invariant that we do not have overlapping
2457 : /// tasks that will try use local storage for a the same tenant ID: we enforce that
2458 : /// the previous contents of a slot have been shut down before the slot can be
2459 : /// left empty or used for something else
2460 : ///
2461 : /// Holders of a SlotGuard should explicitly dispose of it, using either `upsert`
2462 : /// to provide a new value, or `revert` to put the slot back into its initial
2463 : /// state. If the SlotGuard is dropped without calling either of these, then
2464 : /// we will leave the slot empty if our `old_value` is already shut down, else
2465 : /// we will replace the slot with `old_value` (equivalent to doing a revert).
2466 : ///
2467 : /// The `old_value` may be dropped before the SlotGuard is dropped, by calling
2468 : /// `drop_old_value`. It is an error to call this without shutting down
2469 : /// the conents of `old_value`.
2470 : pub struct SlotGuard {
2471 : tenant_shard_id: TenantShardId,
2472 : old_value: Option<TenantSlot>,
2473 : upserted: bool,
2474 :
2475 : /// [`TenantSlot::InProgress`] carries the corresponding Barrier: it will
2476 : /// release any waiters as soon as this SlotGuard is dropped.
2477 : completion: utils::completion::Completion,
2478 : }
2479 :
2480 : impl SlotGuard {
2481 2 : fn new(
2482 2 : tenant_shard_id: TenantShardId,
2483 2 : old_value: Option<TenantSlot>,
2484 2 : completion: utils::completion::Completion,
2485 2 : ) -> Self {
2486 2 : Self {
2487 2 : tenant_shard_id,
2488 2 : old_value,
2489 2 : upserted: false,
2490 2 : completion,
2491 2 : }
2492 2 : }
2493 :
2494 : /// Get any value that was present in the slot before we acquired ownership
2495 : /// of it: in state transitions, this will be the old state.
2496 2 : fn get_old_value(&self) -> &Option<TenantSlot> {
2497 2 : &self.old_value
2498 2 : }
2499 :
2500 : /// Emplace a new value in the slot. This consumes the guard, and after
2501 : /// returning, the slot is no longer protected from concurrent changes.
2502 0 : fn upsert(mut self, new_value: TenantSlot) -> Result<(), TenantSlotUpsertError> {
2503 0 : if !self.old_value_is_shutdown() {
2504 : // This is a bug: callers should never try to drop an old value without
2505 : // shutting it down
2506 0 : return Err(TenantSlotUpsertError::InternalError(
2507 0 : "Old TenantSlot value not shut down".into(),
2508 0 : ));
2509 0 : }
2510 :
2511 0 : let replaced = {
2512 0 : let mut locked = TENANTS.write().unwrap();
2513 0 :
2514 0 : if let TenantSlot::InProgress(_) = new_value {
2515 : // It is never expected to try and upsert InProgress via this path: it should
2516 : // only be written via the tenant_map_acquire_slot path. If we hit this it's a bug.
2517 0 : return Err(TenantSlotUpsertError::InternalError(
2518 0 : "Attempt to upsert an InProgress state".into(),
2519 0 : ));
2520 0 : }
2521 :
2522 0 : let m = match &mut *locked {
2523 : TenantsMap::Initializing => {
2524 0 : return Err(TenantSlotUpsertError::MapState(
2525 0 : TenantMapError::StillInitializing,
2526 0 : ))
2527 : }
2528 : TenantsMap::ShuttingDown(_) => {
2529 0 : return Err(TenantSlotUpsertError::ShuttingDown((
2530 0 : new_value,
2531 0 : self.completion.clone(),
2532 0 : )));
2533 : }
2534 0 : TenantsMap::Open(m) => m,
2535 0 : };
2536 0 :
2537 0 : METRICS.slot_inserted(&new_value);
2538 0 :
2539 0 : let replaced = m.insert(self.tenant_shard_id, new_value);
2540 0 : self.upserted = true;
2541 0 : if let Some(replaced) = replaced.as_ref() {
2542 0 : METRICS.slot_removed(replaced);
2543 0 : }
2544 :
2545 0 : replaced
2546 : };
2547 :
2548 : // Sanity check: on an upsert we should always be replacing an InProgress marker
2549 0 : match replaced {
2550 : Some(TenantSlot::InProgress(_)) => {
2551 : // Expected case: we find our InProgress in the map: nothing should have
2552 : // replaced it because the code that acquires slots will not grant another
2553 : // one for the same TenantId.
2554 0 : Ok(())
2555 : }
2556 : None => {
2557 0 : METRICS.unexpected_errors.inc();
2558 0 : error!(
2559 : tenant_shard_id = %self.tenant_shard_id,
2560 0 : "Missing InProgress marker during tenant upsert, this is a bug."
2561 : );
2562 0 : Err(TenantSlotUpsertError::InternalError(
2563 0 : "Missing InProgress marker during tenant upsert".into(),
2564 0 : ))
2565 : }
2566 0 : Some(slot) => {
2567 0 : METRICS.unexpected_errors.inc();
2568 0 : error!(tenant_shard_id=%self.tenant_shard_id, "Unexpected contents of TenantSlot during upsert, this is a bug. Contents: {:?}", slot);
2569 0 : Err(TenantSlotUpsertError::InternalError(
2570 0 : "Unexpected contents of TenantSlot".into(),
2571 0 : ))
2572 : }
2573 : }
2574 0 : }
2575 :
2576 : /// Replace the InProgress slot with whatever was in the guard when we started
2577 0 : fn revert(mut self) {
2578 0 : if let Some(value) = self.old_value.take() {
2579 0 : match self.upsert(value) {
2580 0 : Err(TenantSlotUpsertError::InternalError(_)) => {
2581 0 : // We already logged the error, nothing else we can do.
2582 0 : }
2583 : Err(
2584 : TenantSlotUpsertError::MapState(_) | TenantSlotUpsertError::ShuttingDown(_),
2585 0 : ) => {
2586 0 : // If the map is shutting down, we need not replace anything
2587 0 : }
2588 0 : Ok(()) => {}
2589 : }
2590 0 : }
2591 0 : }
2592 :
2593 : /// We may never drop our old value until it is cleanly shut down: otherwise we might leave
2594 : /// rogue background tasks that would write to the local tenant directory that this guard
2595 : /// is responsible for protecting
2596 2 : fn old_value_is_shutdown(&self) -> bool {
2597 2 : match self.old_value.as_ref() {
2598 2 : Some(TenantSlot::Attached(tenant)) => tenant.gate.close_complete(),
2599 0 : Some(TenantSlot::Secondary(secondary_tenant)) => secondary_tenant.gate.close_complete(),
2600 : Some(TenantSlot::InProgress(_)) => {
2601 : // A SlotGuard cannot be constructed for a slot that was already InProgress
2602 0 : unreachable!()
2603 : }
2604 0 : None => true,
2605 : }
2606 2 : }
2607 :
2608 : /// The guard holder is done with the old value of the slot: they are obliged to already
2609 : /// shut it down before we reach this point.
2610 2 : fn drop_old_value(&mut self) -> Result<(), TenantSlotDropError> {
2611 2 : if !self.old_value_is_shutdown() {
2612 0 : Err(TenantSlotDropError::NotShutdown)
2613 : } else {
2614 2 : self.old_value.take();
2615 2 : Ok(())
2616 : }
2617 2 : }
2618 : }
2619 :
2620 : impl Drop for SlotGuard {
2621 2 : fn drop(&mut self) {
2622 2 : if self.upserted {
2623 0 : return;
2624 2 : }
2625 2 : // Our old value is already shutdown, or it never existed: it is safe
2626 2 : // for us to fully release the TenantSlot back into an empty state
2627 2 :
2628 2 : let mut locked = TENANTS.write().unwrap();
2629 :
2630 2 : let m = match &mut *locked {
2631 : TenantsMap::Initializing => {
2632 : // There is no map, this should never happen.
2633 2 : return;
2634 : }
2635 : TenantsMap::ShuttingDown(_) => {
2636 : // When we transition to shutdown, InProgress elements are removed
2637 : // from the map, so we do not need to clean up our Inprogress marker.
2638 : // See [`shutdown_all_tenants0`]
2639 0 : return;
2640 : }
2641 0 : TenantsMap::Open(m) => m,
2642 0 : };
2643 0 :
2644 0 : use std::collections::btree_map::Entry;
2645 0 : match m.entry(self.tenant_shard_id) {
2646 0 : Entry::Occupied(mut entry) => {
2647 0 : if !matches!(entry.get(), TenantSlot::InProgress(_)) {
2648 0 : METRICS.unexpected_errors.inc();
2649 0 : error!(tenant_shard_id=%self.tenant_shard_id, "Unexpected contents of TenantSlot during drop, this is a bug. Contents: {:?}", entry.get());
2650 0 : }
2651 :
2652 0 : if self.old_value_is_shutdown() {
2653 0 : METRICS.slot_removed(entry.get());
2654 0 : entry.remove();
2655 0 : } else {
2656 0 : let inserting = self.old_value.take().unwrap();
2657 0 : METRICS.slot_inserted(&inserting);
2658 0 : let replaced = entry.insert(inserting);
2659 0 : METRICS.slot_removed(&replaced);
2660 0 : }
2661 : }
2662 : Entry::Vacant(_) => {
2663 0 : METRICS.unexpected_errors.inc();
2664 0 : error!(
2665 : tenant_shard_id = %self.tenant_shard_id,
2666 0 : "Missing InProgress marker during SlotGuard drop, this is a bug."
2667 : );
2668 : }
2669 : }
2670 2 : }
2671 : }
2672 :
2673 : enum TenantSlotPeekMode {
2674 : /// In Read mode, peek will be permitted to see the slots even if the pageserver is shutting down
2675 : Read,
2676 : /// In Write mode, trying to peek at a slot while the pageserver is shutting down is an error
2677 : Write,
2678 : }
2679 :
2680 0 : fn tenant_map_peek_slot<'a>(
2681 0 : tenants: &'a std::sync::RwLockReadGuard<'a, TenantsMap>,
2682 0 : tenant_shard_id: &TenantShardId,
2683 0 : mode: TenantSlotPeekMode,
2684 0 : ) -> Result<Option<&'a TenantSlot>, TenantMapError> {
2685 0 : match tenants.deref() {
2686 0 : TenantsMap::Initializing => Err(TenantMapError::StillInitializing),
2687 0 : TenantsMap::ShuttingDown(m) => match mode {
2688 : TenantSlotPeekMode::Read => Ok(Some(
2689 : // When reading in ShuttingDown state, we must translate None results
2690 : // into a ShuttingDown error, because absence of a tenant shard ID in the map
2691 : // isn't a reliable indicator of the tenant being gone: it might have been
2692 : // InProgress when shutdown started, and cleaned up from that state such
2693 : // that it's now no longer in the map. Callers will have to wait until
2694 : // we next start up to get a proper answer. This avoids incorrect 404 API responses.
2695 0 : m.get(tenant_shard_id).ok_or(TenantMapError::ShuttingDown)?,
2696 : )),
2697 0 : TenantSlotPeekMode::Write => Err(TenantMapError::ShuttingDown),
2698 : },
2699 0 : TenantsMap::Open(m) => Ok(m.get(tenant_shard_id)),
2700 : }
2701 0 : }
2702 :
2703 : enum TenantSlotAcquireMode {
2704 : /// Acquire the slot irrespective of current state, or whether it already exists
2705 : Any,
2706 : /// Return an error if trying to acquire a slot and it doesn't already exist
2707 : MustExist,
2708 : /// Return an error if trying to acquire a slot and it already exists
2709 : MustNotExist,
2710 : }
2711 :
2712 0 : fn tenant_map_acquire_slot(
2713 0 : tenant_shard_id: &TenantShardId,
2714 0 : mode: TenantSlotAcquireMode,
2715 0 : ) -> Result<SlotGuard, TenantSlotError> {
2716 0 : tenant_map_acquire_slot_impl(tenant_shard_id, &TENANTS, mode)
2717 0 : }
2718 :
2719 2 : fn tenant_map_acquire_slot_impl(
2720 2 : tenant_shard_id: &TenantShardId,
2721 2 : tenants: &std::sync::RwLock<TenantsMap>,
2722 2 : mode: TenantSlotAcquireMode,
2723 2 : ) -> Result<SlotGuard, TenantSlotError> {
2724 2 : use TenantSlotAcquireMode::*;
2725 2 : METRICS.tenant_slot_writes.inc();
2726 2 :
2727 2 : let mut locked = tenants.write().unwrap();
2728 2 : let span = tracing::info_span!("acquire_slot", tenant_id=%tenant_shard_id.tenant_id, shard_id = %tenant_shard_id.shard_slug());
2729 2 : let _guard = span.enter();
2730 :
2731 2 : let m = match &mut *locked {
2732 0 : TenantsMap::Initializing => return Err(TenantMapError::StillInitializing.into()),
2733 0 : TenantsMap::ShuttingDown(_) => return Err(TenantMapError::ShuttingDown.into()),
2734 2 : TenantsMap::Open(m) => m,
2735 2 : };
2736 2 :
2737 2 : use std::collections::btree_map::Entry;
2738 2 :
2739 2 : let entry = m.entry(*tenant_shard_id);
2740 2 :
2741 2 : match entry {
2742 0 : Entry::Vacant(v) => match mode {
2743 : MustExist => {
2744 0 : tracing::debug!("Vacant && MustExist: return NotFound");
2745 0 : Err(TenantSlotError::NotFound(*tenant_shard_id))
2746 : }
2747 : _ => {
2748 0 : let (completion, barrier) = utils::completion::channel();
2749 0 : let inserting = TenantSlot::InProgress(barrier);
2750 0 : METRICS.slot_inserted(&inserting);
2751 0 : v.insert(inserting);
2752 0 : tracing::debug!("Vacant, inserted InProgress");
2753 0 : Ok(SlotGuard::new(*tenant_shard_id, None, completion))
2754 : }
2755 : },
2756 2 : Entry::Occupied(mut o) => {
2757 2 : // Apply mode-driven checks
2758 2 : match (o.get(), mode) {
2759 : (TenantSlot::InProgress(_), _) => {
2760 0 : tracing::debug!("Occupied, failing for InProgress");
2761 0 : Err(TenantSlotError::InProgress)
2762 : }
2763 0 : (slot, MustNotExist) => match slot {
2764 0 : TenantSlot::Attached(tenant) => {
2765 0 : tracing::debug!("Attached && MustNotExist, return AlreadyExists");
2766 0 : Err(TenantSlotError::AlreadyExists(
2767 0 : *tenant_shard_id,
2768 0 : tenant.current_state(),
2769 0 : ))
2770 : }
2771 : _ => {
2772 : // FIXME: the AlreadyExists error assumes that we have a Tenant
2773 : // to get the state from
2774 0 : tracing::debug!("Occupied & MustNotExist, return AlreadyExists");
2775 0 : Err(TenantSlotError::AlreadyExists(
2776 0 : *tenant_shard_id,
2777 0 : TenantState::Broken {
2778 0 : reason: "Present but not attached".to_string(),
2779 0 : backtrace: "".to_string(),
2780 0 : },
2781 0 : ))
2782 : }
2783 : },
2784 : _ => {
2785 : // Happy case: the slot was not in any state that violated our mode
2786 2 : let (completion, barrier) = utils::completion::channel();
2787 2 : let in_progress = TenantSlot::InProgress(barrier);
2788 2 : METRICS.slot_inserted(&in_progress);
2789 2 : let old_value = o.insert(in_progress);
2790 2 : METRICS.slot_removed(&old_value);
2791 2 : tracing::debug!("Occupied, replaced with InProgress");
2792 2 : Ok(SlotGuard::new(
2793 2 : *tenant_shard_id,
2794 2 : Some(old_value),
2795 2 : completion,
2796 2 : ))
2797 : }
2798 : }
2799 : }
2800 : }
2801 2 : }
2802 :
2803 : /// Stops and removes the tenant from memory, if it's not [`TenantState::Stopping`] already, bails otherwise.
2804 : /// Allows to remove other tenant resources manually, via `tenant_cleanup`.
2805 : /// If the cleanup fails, tenant will stay in memory in [`TenantState::Broken`] state, and another removal
2806 : /// operation would be needed to remove it.
2807 2 : async fn remove_tenant_from_memory<V, F>(
2808 2 : tenants: &std::sync::RwLock<TenantsMap>,
2809 2 : tenant_shard_id: TenantShardId,
2810 2 : tenant_cleanup: F,
2811 2 : ) -> Result<V, TenantStateError>
2812 2 : where
2813 2 : F: std::future::Future<Output = anyhow::Result<V>>,
2814 2 : {
2815 2 : let mut slot_guard =
2816 2 : tenant_map_acquire_slot_impl(&tenant_shard_id, tenants, TenantSlotAcquireMode::MustExist)?;
2817 :
2818 : // allow pageserver shutdown to await for our completion
2819 2 : let (_guard, progress) = completion::channel();
2820 :
2821 : // The SlotGuard allows us to manipulate the Tenant object without fear of some
2822 : // concurrent API request doing something else for the same tenant ID.
2823 2 : let attached_tenant = match slot_guard.get_old_value() {
2824 2 : Some(TenantSlot::Attached(tenant)) => {
2825 2 : // whenever we remove a tenant from memory, we don't want to flush and wait for upload
2826 2 : let shutdown_mode = ShutdownMode::Hard;
2827 2 :
2828 2 : // shutdown is sure to transition tenant to stopping, and wait for all tasks to complete, so
2829 2 : // that we can continue safely to cleanup.
2830 2 : match tenant.shutdown(progress, shutdown_mode).await {
2831 2 : Ok(()) => {}
2832 0 : Err(_other) => {
2833 0 : // if pageserver shutdown or other detach/ignore is already ongoing, we don't want to
2834 0 : // wait for it but return an error right away because these are distinct requests.
2835 0 : slot_guard.revert();
2836 0 : return Err(TenantStateError::IsStopping(tenant_shard_id));
2837 : }
2838 : }
2839 2 : Some(tenant)
2840 : }
2841 0 : Some(TenantSlot::Secondary(secondary_state)) => {
2842 0 : tracing::info!("Shutting down in secondary mode");
2843 0 : secondary_state.shutdown().await;
2844 0 : None
2845 : }
2846 : Some(TenantSlot::InProgress(_)) => {
2847 : // Acquiring a slot guarantees its old value was not InProgress
2848 0 : unreachable!();
2849 : }
2850 0 : None => None,
2851 : };
2852 :
2853 2 : match tenant_cleanup
2854 2 : .await
2855 2 : .with_context(|| format!("Failed to run cleanup for tenant {tenant_shard_id}"))
2856 : {
2857 2 : Ok(hook_value) => {
2858 2 : // Success: drop the old TenantSlot::Attached.
2859 2 : slot_guard
2860 2 : .drop_old_value()
2861 2 : .expect("We just called shutdown");
2862 2 :
2863 2 : Ok(hook_value)
2864 : }
2865 0 : Err(e) => {
2866 : // If we had a Tenant, set it to Broken and put it back in the TenantsMap
2867 0 : if let Some(attached_tenant) = attached_tenant {
2868 0 : attached_tenant.set_broken(e.to_string()).await;
2869 0 : }
2870 : // Leave the broken tenant in the map
2871 0 : slot_guard.revert();
2872 0 :
2873 0 : Err(TenantStateError::Other(e))
2874 : }
2875 : }
2876 2 : }
2877 :
2878 : use {
2879 : crate::repository::GcResult, pageserver_api::models::TimelineGcRequest,
2880 : utils::http::error::ApiError,
2881 : };
2882 :
2883 0 : pub(crate) fn immediate_gc(
2884 0 : tenant_shard_id: TenantShardId,
2885 0 : timeline_id: TimelineId,
2886 0 : gc_req: TimelineGcRequest,
2887 0 : cancel: CancellationToken,
2888 0 : ctx: &RequestContext,
2889 0 : ) -> Result<tokio::sync::oneshot::Receiver<Result<GcResult, anyhow::Error>>, ApiError> {
2890 0 : let guard = TENANTS.read().unwrap();
2891 :
2892 0 : let tenant = guard
2893 0 : .get(&tenant_shard_id)
2894 0 : .cloned()
2895 0 : .with_context(|| format!("tenant {tenant_shard_id}"))
2896 0 : .map_err(|e| ApiError::NotFound(e.into()))?;
2897 :
2898 0 : let gc_horizon = gc_req.gc_horizon.unwrap_or_else(|| tenant.get_gc_horizon());
2899 0 : // Use tenant's pitr setting
2900 0 : let pitr = tenant.get_pitr_interval();
2901 0 :
2902 0 : // Run in task_mgr to avoid race with tenant_detach operation
2903 0 : let ctx = ctx.detached_child(TaskKind::GarbageCollector, DownloadBehavior::Download);
2904 0 : let (task_done, wait_task_done) = tokio::sync::oneshot::channel();
2905 0 : let span = info_span!("manual_gc", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id);
2906 :
2907 : // TODO: spawning is redundant now, need to hold the gate
2908 0 : task_mgr::spawn(
2909 0 : &tokio::runtime::Handle::current(),
2910 0 : TaskKind::GarbageCollector,
2911 0 : Some(tenant_shard_id),
2912 0 : Some(timeline_id),
2913 0 : &format!("timeline_gc_handler garbage collection run for tenant {tenant_shard_id} timeline {timeline_id}"),
2914 0 : false,
2915 0 : async move {
2916 : fail::fail_point!("immediate_gc_task_pre");
2917 :
2918 : #[allow(unused_mut)]
2919 0 : let mut result = tenant
2920 0 : .gc_iteration(Some(timeline_id), gc_horizon, pitr, &cancel, &ctx)
2921 0 : .await;
2922 : // FIXME: `gc_iteration` can return an error for multiple reasons; we should handle it
2923 : // better once the types support it.
2924 :
2925 : #[cfg(feature = "testing")]
2926 : {
2927 : // we need to synchronize with drop completion for python tests without polling for
2928 : // log messages
2929 0 : if let Ok(result) = result.as_mut() {
2930 0 : let mut js = tokio::task::JoinSet::new();
2931 0 : for layer in std::mem::take(&mut result.doomed_layers) {
2932 0 : js.spawn(layer.wait_drop());
2933 0 : }
2934 0 : tracing::info!(total = js.len(), "starting to wait for the gc'd layers to be dropped");
2935 0 : while let Some(res) = js.join_next().await {
2936 0 : res.expect("wait_drop should not panic");
2937 0 : }
2938 0 : }
2939 :
2940 0 : let timeline = tenant.get_timeline(timeline_id, false).ok();
2941 0 : let rtc = timeline.as_ref().and_then(|x| x.remote_client.as_ref());
2942 :
2943 0 : if let Some(rtc) = rtc {
2944 : // layer drops schedule actions on remote timeline client to actually do the
2945 : // deletions; don't care about the shutdown error, just exit fast
2946 0 : drop(rtc.wait_completion().await);
2947 0 : }
2948 : }
2949 :
2950 0 : match task_done.send(result) {
2951 0 : Ok(_) => (),
2952 0 : Err(result) => error!("failed to send gc result: {result:?}"),
2953 : }
2954 0 : Ok(())
2955 0 : }
2956 0 : .instrument(span)
2957 0 : );
2958 0 :
2959 0 : // drop the guard until after we've spawned the task so that timeline shutdown will wait for the task
2960 0 : drop(guard);
2961 0 :
2962 0 : Ok(wait_task_done)
2963 0 : }
2964 :
2965 : #[cfg(test)]
2966 : mod tests {
2967 : use std::collections::BTreeMap;
2968 : use std::sync::Arc;
2969 : use tracing::Instrument;
2970 :
2971 : use crate::tenant::mgr::TenantSlot;
2972 :
2973 : use super::{super::harness::TenantHarness, TenantsMap};
2974 :
2975 : #[tokio::test(start_paused = true)]
2976 2 : async fn shutdown_awaits_in_progress_tenant() {
2977 2 : // Test that if an InProgress tenant is in the map during shutdown, the shutdown will gracefully
2978 2 : // wait for it to complete before proceeding.
2979 2 :
2980 2 : let h = TenantHarness::create("shutdown_awaits_in_progress_tenant").unwrap();
2981 8 : let (t, _ctx) = h.load().await;
2982 2 :
2983 2 : // harness loads it to active, which is forced and nothing is running on the tenant
2984 2 :
2985 2 : let id = t.tenant_shard_id();
2986 2 :
2987 2 : // tenant harness configures the logging and we cannot escape it
2988 2 : let span = h.span();
2989 2 : let _e = span.enter();
2990 2 :
2991 2 : let tenants = BTreeMap::from([(id, TenantSlot::Attached(t.clone()))]);
2992 2 : let tenants = Arc::new(std::sync::RwLock::new(TenantsMap::Open(tenants)));
2993 2 :
2994 2 : // Invoke remove_tenant_from_memory with a cleanup hook that blocks until we manually
2995 2 : // permit it to proceed: that will stick the tenant in InProgress
2996 2 :
2997 2 : let (until_cleanup_completed, can_complete_cleanup) = utils::completion::channel();
2998 2 : let (until_cleanup_started, cleanup_started) = utils::completion::channel();
2999 2 : let mut remove_tenant_from_memory_task = {
3000 2 : let jh = tokio::spawn({
3001 2 : let tenants = tenants.clone();
3002 2 : async move {
3003 2 : let cleanup = async move {
3004 2 : drop(until_cleanup_started);
3005 2 : can_complete_cleanup.wait().await;
3006 2 : anyhow::Ok(())
3007 2 : };
3008 2 : super::remove_tenant_from_memory(&tenants, id, cleanup).await
3009 2 : }
3010 2 : .instrument(h.span())
3011 2 : });
3012 2 :
3013 2 : // now the long cleanup should be in place, with the stopping state
3014 2 : cleanup_started.wait().await;
3015 2 : jh
3016 2 : };
3017 2 :
3018 2 : let mut shutdown_task = {
3019 2 : let (until_shutdown_started, shutdown_started) = utils::completion::channel();
3020 2 :
3021 2 : let shutdown_task = tokio::spawn(async move {
3022 2 : drop(until_shutdown_started);
3023 4 : super::shutdown_all_tenants0(&tenants).await;
3024 2 : });
3025 2 :
3026 2 : shutdown_started.wait().await;
3027 2 : shutdown_task
3028 2 : };
3029 2 :
3030 2 : let long_time = std::time::Duration::from_secs(15);
3031 2 : tokio::select! {
3032 2 : _ = &mut shutdown_task => unreachable!("shutdown should block on remove_tenant_from_memory completing"),
3033 2 : _ = &mut remove_tenant_from_memory_task => unreachable!("remove_tenant_from_memory_task should not complete until explicitly unblocked"),
3034 2 : _ = tokio::time::sleep(long_time) => {},
3035 2 : }
3036 2 :
3037 2 : drop(until_cleanup_completed);
3038 2 :
3039 2 : // Now that we allow it to proceed, shutdown should complete immediately
3040 2 : remove_tenant_from_memory_task.await.unwrap().unwrap();
3041 2 : shutdown_task.await.unwrap();
3042 2 : }
3043 : }
|