Line data Source code
1 : use std::sync::Arc;
2 :
3 : use anyhow::Context;
4 : use camino::{Utf8Path, Utf8PathBuf};
5 : use pageserver_api::{models::TenantState, shard::TenantShardId};
6 : use remote_storage::{GenericRemoteStorage, RemotePath};
7 : use tokio::sync::OwnedMutexGuard;
8 : use tokio_util::sync::CancellationToken;
9 : use tracing::{error, instrument, Instrument};
10 :
11 : use utils::{backoff, completion, crashsafe, fs_ext, id::TimelineId};
12 :
13 : use crate::{
14 : config::PageServerConf,
15 : context::RequestContext,
16 : task_mgr::{self, TaskKind},
17 : tenant::mgr::{TenantSlot, TenantsMapRemoveResult},
18 : };
19 :
20 : use super::{
21 : mgr::{GetTenantError, TenantSlotError, TenantSlotUpsertError, TenantsMap},
22 : remote_timeline_client::{FAILED_REMOTE_OP_RETRIES, FAILED_UPLOAD_WARN_THRESHOLD},
23 : span,
24 : timeline::delete::DeleteTimelineFlow,
25 : tree_sort_timelines, DeleteTimelineError, Tenant, TenantPreload,
26 : };
27 :
28 204 : #[derive(Debug, thiserror::Error)]
29 : pub(crate) enum DeleteTenantError {
30 : #[error("GetTenant {0}")]
31 : Get(#[from] GetTenantError),
32 :
33 : #[error("Tenant not attached")]
34 : NotAttached,
35 :
36 : #[error("Invalid state {0}. Expected Active or Broken")]
37 : InvalidState(TenantState),
38 :
39 : #[error("Tenant deletion is already in progress")]
40 : AlreadyInProgress,
41 :
42 : #[error("Tenant map slot error {0}")]
43 : SlotError(#[from] TenantSlotError),
44 :
45 : #[error("Tenant map slot upsert error {0}")]
46 : SlotUpsertError(#[from] TenantSlotUpsertError),
47 :
48 : #[error("Timeline {0}")]
49 : Timeline(#[from] DeleteTimelineError),
50 :
51 : #[error("Cancelled")]
52 : Cancelled,
53 :
54 : #[error(transparent)]
55 : Other(#[from] anyhow::Error),
56 : }
57 :
58 : type DeletionGuard = tokio::sync::OwnedMutexGuard<DeleteTenantFlow>;
59 :
60 182 : fn remote_tenant_delete_mark_path(
61 182 : conf: &PageServerConf,
62 182 : tenant_shard_id: &TenantShardId,
63 182 : ) -> anyhow::Result<RemotePath> {
64 182 : let tenant_remote_path = conf
65 182 : .tenant_path(tenant_shard_id)
66 182 : .strip_prefix(&conf.workdir)
67 182 : .context("Failed to strip workdir prefix")
68 182 : .and_then(RemotePath::new)
69 182 : .context("tenant path")?;
70 182 : Ok(tenant_remote_path.join(Utf8Path::new("timelines/deleted")))
71 182 : }
72 :
73 95 : async fn create_remote_delete_mark(
74 95 : conf: &PageServerConf,
75 95 : remote_storage: &GenericRemoteStorage,
76 95 : tenant_shard_id: &TenantShardId,
77 95 : cancel: &CancellationToken,
78 95 : ) -> Result<(), DeleteTenantError> {
79 95 : let remote_mark_path = remote_tenant_delete_mark_path(conf, tenant_shard_id)?;
80 :
81 95 : let data: &[u8] = &[];
82 95 : backoff::retry(
83 133 : || async {
84 133 : let data = bytes::Bytes::from_static(data);
85 133 : let stream = futures::stream::once(futures::future::ready(Ok(data)));
86 133 : remote_storage
87 133 : .upload(stream, 0, &remote_mark_path, None)
88 196 : .await
89 266 : },
90 95 : |_e| false,
91 95 : FAILED_UPLOAD_WARN_THRESHOLD,
92 95 : FAILED_REMOTE_OP_RETRIES,
93 95 : "mark_upload",
94 95 : cancel,
95 95 : )
96 196 : .await
97 95 : .ok_or_else(|| anyhow::anyhow!("Cancelled"))
98 95 : .and_then(|x| x)
99 95 : .context("mark_upload")?;
100 :
101 95 : Ok(())
102 95 : }
103 :
104 91 : async fn create_local_delete_mark(
105 91 : conf: &PageServerConf,
106 91 : tenant_shard_id: &TenantShardId,
107 91 : ) -> Result<(), DeleteTenantError> {
108 91 : let marker_path = conf.tenant_deleted_mark_file_path(tenant_shard_id);
109 91 :
110 91 : // Note: we're ok to replace existing file.
111 91 : let _ = std::fs::OpenOptions::new()
112 91 : .write(true)
113 91 : .create(true)
114 91 : .open(&marker_path)
115 91 : .with_context(|| format!("could not create delete marker file {marker_path:?}"))?;
116 :
117 91 : crashsafe::fsync_file_and_parent(&marker_path).context("sync_mark")?;
118 :
119 91 : Ok(())
120 91 : }
121 :
122 108 : async fn schedule_ordered_timeline_deletions(
123 108 : tenant: &Arc<Tenant>,
124 108 : ) -> Result<Vec<(Arc<tokio::sync::Mutex<DeleteTimelineFlow>>, TimelineId)>, DeleteTenantError> {
125 108 : // Tenant is stopping at this point. We know it will be deleted.
126 108 : // No new timelines should be created.
127 108 : // Tree sort timelines to delete from leafs to the root.
128 108 : // NOTE: by calling clone we release the mutex which creates a possibility for a race: pending deletion
129 108 : // can complete and remove timeline from the map in between our call to clone
130 108 : // and `DeleteTimelineFlow::run`, so `run` wont find timeline in `timelines` map.
131 108 : // timelines.lock is currently synchronous so we cant hold it across await point.
132 108 : // So just ignore NotFound error if we get it from `run`.
133 108 : // Beware: in case it becomes async and we try to hold it here, `run` also locks it, which can create a deadlock.
134 108 : let timelines = tenant.timelines.lock().unwrap().clone();
135 108 : let sorted =
136 163 : tree_sort_timelines(timelines, |t| t.get_ancestor_timeline_id()).context("tree sort")?;
137 :
138 108 : let mut already_running_deletions = vec![];
139 :
140 145 : for (timeline_id, _) in sorted.into_iter().rev() {
141 145 : let span = tracing::info_span!("timeline_delete", %timeline_id);
142 145 : let res = DeleteTimelineFlow::run(tenant, timeline_id, true)
143 145 : .instrument(span)
144 5017 : .await;
145 145 : if let Err(e) = res {
146 22 : match e {
147 : DeleteTimelineError::NotFound => {
148 : // Timeline deletion finished after call to clone above but before call
149 : // to `DeleteTimelineFlow::run` and removed timeline from the map.
150 1 : continue;
151 : }
152 4 : DeleteTimelineError::AlreadyInProgress(guard) => {
153 4 : already_running_deletions.push((guard, timeline_id));
154 4 : continue;
155 : }
156 17 : e => return Err(DeleteTenantError::Timeline(e)),
157 : }
158 123 : }
159 : }
160 :
161 91 : Ok(already_running_deletions)
162 108 : }
163 :
164 81 : async fn ensure_timelines_dir_empty(timelines_path: &Utf8Path) -> Result<(), DeleteTenantError> {
165 81 : // Assert timelines dir is empty.
166 81 : if !fs_ext::is_directory_empty(timelines_path).await? {
167 : // Display first 10 items in directory
168 0 : let list = fs_ext::list_dir(timelines_path).await.context("list_dir")?;
169 0 : let list = &list.into_iter().take(10).collect::<Vec<_>>();
170 0 : return Err(DeleteTenantError::Other(anyhow::anyhow!(
171 0 : "Timelines directory is not empty after all timelines deletion: {list:?}"
172 0 : )));
173 81 : }
174 81 :
175 81 : Ok(())
176 81 : }
177 :
178 87 : async fn remove_tenant_remote_delete_mark(
179 87 : conf: &PageServerConf,
180 87 : remote_storage: Option<&GenericRemoteStorage>,
181 87 : tenant_shard_id: &TenantShardId,
182 87 : cancel: &CancellationToken,
183 87 : ) -> Result<(), DeleteTenantError> {
184 87 : if let Some(remote_storage) = remote_storage {
185 87 : let path = remote_tenant_delete_mark_path(conf, tenant_shard_id)?;
186 87 : backoff::retry(
187 316 : || async { remote_storage.delete(&path).await },
188 87 : |_e| false,
189 87 : FAILED_UPLOAD_WARN_THRESHOLD,
190 87 : FAILED_REMOTE_OP_RETRIES,
191 87 : "remove_tenant_remote_delete_mark",
192 87 : cancel,
193 87 : )
194 316 : .await
195 87 : .ok_or_else(|| anyhow::anyhow!("Cancelled"))
196 87 : .and_then(|x| x)
197 87 : .context("remove_tenant_remote_delete_mark")?;
198 0 : }
199 87 : Ok(())
200 87 : }
201 :
202 : // Cleanup fs traces: tenant config, timelines dir local delete mark, tenant dir
203 83 : async fn cleanup_remaining_fs_traces(
204 83 : conf: &PageServerConf,
205 83 : tenant_shard_id: &TenantShardId,
206 83 : ) -> Result<(), DeleteTenantError> {
207 391 : let rm = |p: Utf8PathBuf, is_dir: bool| async move {
208 391 : if is_dir {
209 150 : tokio::fs::remove_dir(&p).await
210 : } else {
211 241 : tokio::fs::remove_file(&p).await
212 : }
213 391 : .or_else(fs_ext::ignore_not_found)
214 391 : .with_context(|| format!("failed to delete {p}"))
215 391 : };
216 :
217 83 : rm(conf.tenant_config_path(tenant_shard_id), false).await?;
218 83 : rm(conf.tenant_location_config_path(tenant_shard_id), false).await?;
219 :
220 83 : fail::fail_point!("tenant-delete-before-remove-timelines-dir", |_| {
221 4 : Err(anyhow::anyhow!(
222 4 : "failpoint: tenant-delete-before-remove-timelines-dir"
223 4 : ))?
224 83 : });
225 :
226 79 : rm(conf.timelines_path(tenant_shard_id), true).await?;
227 :
228 79 : fail::fail_point!("tenant-delete-before-remove-deleted-mark", |_| {
229 4 : Err(anyhow::anyhow!(
230 4 : "failpoint: tenant-delete-before-remove-deleted-mark"
231 4 : ))?
232 79 : });
233 :
234 : // Make sure previous deletions are ordered before mark removal.
235 : // Otherwise there is no guarantee that they reach the disk before mark deletion.
236 : // So its possible for mark to reach disk first and for other deletions
237 : // to be reordered later and thus missed if a crash occurs.
238 : // Note that we dont need to sync after mark file is removed
239 : // because we can tolerate the case when mark file reappears on startup.
240 75 : let tenant_path = &conf.tenant_path(tenant_shard_id);
241 75 : if tenant_path.exists() {
242 75 : crashsafe::fsync_async(&conf.tenant_path(tenant_shard_id))
243 150 : .await
244 75 : .context("fsync_pre_mark_remove")?;
245 0 : }
246 :
247 75 : rm(conf.tenant_deleted_mark_file_path(tenant_shard_id), false).await?;
248 :
249 75 : fail::fail_point!("tenant-delete-before-remove-tenant-dir", |_| {
250 4 : Err(anyhow::anyhow!(
251 4 : "failpoint: tenant-delete-before-remove-tenant-dir"
252 4 : ))?
253 75 : });
254 :
255 71 : rm(conf.tenant_path(tenant_shard_id), true).await?;
256 :
257 71 : Ok(())
258 83 : }
259 :
260 : /// Orchestrates tenant shut down of all tasks, removes its in-memory structures,
261 : /// and deletes its data from both disk and s3.
262 : /// The sequence of steps:
263 : /// 1. Upload remote deletion mark.
264 : /// 2. Create local mark file.
265 : /// 3. Shutdown tasks
266 : /// 4. Run ordered timeline deletions
267 : /// 5. Wait for timeline deletion operations that were scheduled before tenant deletion was requested
268 : /// 6. Remove remote mark
269 : /// 7. Cleanup remaining fs traces, tenant dir, config, timelines dir, local delete mark
270 : /// It is resumable from any step in case a crash/restart occurs.
271 : /// There are two entrypoints to the process:
272 : /// 1. [`DeleteTenantFlow::run`] this is the main one called by a management api handler.
273 : /// 2. [`DeleteTenantFlow::resume_from_attach`] is called when deletion is resumed tenant is found to be deleted during attach process.
274 : /// Note the only other place that messes around timeline delete mark is the `Tenant::spawn_load` function.
275 970 : #[derive(Default)]
276 : pub enum DeleteTenantFlow {
277 : #[default]
278 : NotStarted,
279 : InProgress,
280 : Finished,
281 : }
282 :
283 : impl DeleteTenantFlow {
284 : // These steps are run in the context of management api request handler.
285 : // Long running steps are continued to run in the background.
286 : // NB: If this fails half-way through, and is retried, the retry will go through
287 : // all the same steps again. Make sure the code here is idempotent, and don't
288 : // error out if some of the shutdown tasks have already been completed!
289 : // NOTE: static needed for background part.
290 : // We assume that calling code sets up the span with tenant_id.
291 210 : #[instrument(skip_all)]
292 : pub(crate) async fn run(
293 : conf: &'static PageServerConf,
294 : remote_storage: Option<GenericRemoteStorage>,
295 : tenants: &'static std::sync::RwLock<TenantsMap>,
296 : tenant: Arc<Tenant>,
297 : ) -> Result<(), DeleteTenantError> {
298 : span::debug_assert_current_span_has_tenant_id();
299 :
300 105 : pausable_failpoint!("tenant-delete-before-run");
301 :
302 : let mut guard = Self::prepare(&tenant).await?;
303 :
304 : if let Err(e) = Self::run_inner(&mut guard, conf, remote_storage.as_ref(), &tenant).await {
305 : tenant.set_broken(format!("{e:#}")).await;
306 : return Err(e);
307 : }
308 :
309 : Self::schedule_background(guard, conf, remote_storage, tenants, tenant);
310 :
311 : Ok(())
312 : }
313 :
314 : // Helper function needed to be able to match once on returned error and transition tenant into broken state.
315 : // This is needed because tenant.shutwodn is not idempotent. If tenant state is set to stopping another call to tenant.shutdown
316 : // will result in an error, but here we need to be able to retry shutdown when tenant deletion is retried.
317 : // So the solution is to set tenant state to broken.
318 99 : async fn run_inner(
319 99 : guard: &mut OwnedMutexGuard<Self>,
320 99 : conf: &'static PageServerConf,
321 99 : remote_storage: Option<&GenericRemoteStorage>,
322 99 : tenant: &Tenant,
323 99 : ) -> Result<(), DeleteTenantError> {
324 99 : guard.mark_in_progress()?;
325 :
326 99 : fail::fail_point!("tenant-delete-before-create-remote-mark", |_| {
327 4 : Err(anyhow::anyhow!(
328 4 : "failpoint: tenant-delete-before-create-remote-mark"
329 4 : ))?
330 99 : });
331 :
332 : // IDEA: implement detach as delete without remote storage. Then they would use the same lock (deletion_progress) so wont contend.
333 : // Though sounds scary, different mark name?
334 : // Detach currently uses remove_dir_all so in case of a crash we can end up in a weird state.
335 95 : if let Some(remote_storage) = &remote_storage {
336 95 : create_remote_delete_mark(
337 95 : conf,
338 95 : remote_storage,
339 95 : &tenant.tenant_shard_id,
340 95 : // Can't use tenant.cancel, it's already shut down. TODO: wire in an appropriate token
341 95 : &CancellationToken::new(),
342 95 : )
343 196 : .await
344 95 : .context("remote_mark")?
345 0 : }
346 :
347 95 : fail::fail_point!("tenant-delete-before-create-local-mark", |_| {
348 4 : Err(anyhow::anyhow!(
349 4 : "failpoint: tenant-delete-before-create-local-mark"
350 4 : ))?
351 95 : });
352 :
353 91 : create_local_delete_mark(conf, &tenant.tenant_shard_id)
354 0 : .await
355 91 : .context("local delete mark")?;
356 :
357 91 : fail::fail_point!("tenant-delete-before-background", |_| {
358 4 : Err(anyhow::anyhow!(
359 4 : "failpoint: tenant-delete-before-background"
360 4 : ))?
361 91 : });
362 :
363 87 : Ok(())
364 99 : }
365 :
366 99 : fn mark_in_progress(&mut self) -> anyhow::Result<()> {
367 99 : match self {
368 0 : Self::Finished => anyhow::bail!("Bug. Is in finished state"),
369 24 : Self::InProgress { .. } => { /* We're in a retry */ }
370 75 : Self::NotStarted => { /* Fresh start */ }
371 : }
372 :
373 99 : *self = Self::InProgress;
374 99 :
375 99 : Ok(())
376 99 : }
377 :
378 877 : pub(crate) async fn should_resume_deletion(
379 877 : conf: &'static PageServerConf,
380 877 : remote_mark_exists: bool,
381 877 : tenant: &Tenant,
382 877 : ) -> Result<Option<DeletionGuard>, DeleteTenantError> {
383 877 : let acquire = |t: &Tenant| {
384 21 : Some(
385 21 : Arc::clone(&t.delete_progress)
386 21 : .try_lock_owned()
387 21 : .expect("we're the only owner during init"),
388 21 : )
389 21 : };
390 :
391 877 : if remote_mark_exists {
392 15 : return Ok(acquire(tenant));
393 862 : }
394 862 :
395 862 : // Check local mark first, if its there there is no need to go to s3 to check whether remote one exists.
396 862 : if conf
397 862 : .tenant_deleted_mark_file_path(&tenant.tenant_shard_id)
398 862 : .exists()
399 : {
400 6 : Ok(acquire(tenant))
401 : } else {
402 856 : Ok(None)
403 : }
404 877 : }
405 :
406 21 : pub(crate) async fn resume_from_attach(
407 21 : guard: DeletionGuard,
408 21 : tenant: &Arc<Tenant>,
409 21 : preload: Option<TenantPreload>,
410 21 : tenants: &'static std::sync::RwLock<TenantsMap>,
411 21 : ctx: &RequestContext,
412 21 : ) -> Result<(), DeleteTenantError> {
413 21 : let (_, progress) = completion::channel();
414 21 :
415 21 : tenant
416 21 : .set_stopping(progress, false, true)
417 0 : .await
418 21 : .expect("cant be stopping or broken");
419 21 :
420 21 : tenant
421 21 : .attach(preload, super::SpawnMode::Normal, ctx)
422 60 : .await
423 21 : .context("attach")?;
424 :
425 21 : Self::background(
426 21 : guard,
427 21 : tenant.conf,
428 21 : tenant.remote_storage.clone(),
429 21 : tenants,
430 21 : tenant,
431 21 : )
432 838 : .await
433 21 : }
434 :
435 105 : async fn prepare(
436 105 : tenant: &Arc<Tenant>,
437 105 : ) -> Result<tokio::sync::OwnedMutexGuard<Self>, DeleteTenantError> {
438 105 : // FIXME: unsure about active only. Our init jobs may not be cancellable properly,
439 105 : // so at least for now allow deletions only for active tenants. TODO recheck
440 105 : // Broken and Stopping is needed for retries.
441 105 : if !matches!(
442 105 : tenant.current_state(),
443 : TenantState::Active | TenantState::Broken { .. }
444 : ) {
445 2 : return Err(DeleteTenantError::InvalidState(tenant.current_state()));
446 103 : }
447 :
448 103 : let guard = Arc::clone(&tenant.delete_progress)
449 103 : .try_lock_owned()
450 103 : .map_err(|_| DeleteTenantError::AlreadyInProgress)?;
451 :
452 103 : fail::fail_point!("tenant-delete-before-shutdown", |_| {
453 4 : Err(anyhow::anyhow!("failpoint: tenant-delete-before-shutdown"))?
454 103 : });
455 :
456 : // make pageserver shutdown not to wait for our completion
457 99 : let (_, progress) = completion::channel();
458 99 :
459 99 : // It would be good to only set stopping here and continue shutdown in the background, but shutdown is not idempotent.
460 99 : // i e it is an error to do:
461 99 : // tenant.set_stopping
462 99 : // tenant.shutdown
463 99 : // Its also bad that we're holding tenants.read here.
464 99 : // TODO relax set_stopping to be idempotent?
465 226 : if tenant.shutdown(progress, false).await.is_err() {
466 0 : return Err(DeleteTenantError::Other(anyhow::anyhow!(
467 0 : "tenant shutdown is already in progress"
468 0 : )));
469 99 : }
470 99 :
471 99 : Ok(guard)
472 105 : }
473 :
474 87 : fn schedule_background(
475 87 : guard: OwnedMutexGuard<Self>,
476 87 : conf: &'static PageServerConf,
477 87 : remote_storage: Option<GenericRemoteStorage>,
478 87 : tenants: &'static std::sync::RwLock<TenantsMap>,
479 87 : tenant: Arc<Tenant>,
480 87 : ) {
481 87 : let tenant_shard_id = tenant.tenant_shard_id;
482 87 :
483 87 : task_mgr::spawn(
484 87 : task_mgr::BACKGROUND_RUNTIME.handle(),
485 87 : TaskKind::TimelineDeletionWorker,
486 87 : Some(tenant_shard_id),
487 87 : None,
488 87 : "tenant_delete",
489 : false,
490 87 : async move {
491 37 : if let Err(err) =
492 5269 : Self::background(guard, conf, remote_storage, tenants, &tenant).await
493 : {
494 37 : error!("Error: {err:#}");
495 37 : tenant.set_broken(format!("{err:#}")).await;
496 50 : };
497 87 : Ok(())
498 87 : }
499 87 : .instrument(tracing::info_span!(parent: None, "delete_tenant", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug())),
500 : );
501 87 : }
502 :
503 108 : async fn background(
504 108 : mut guard: OwnedMutexGuard<Self>,
505 108 : conf: &PageServerConf,
506 108 : remote_storage: Option<GenericRemoteStorage>,
507 108 : tenants: &'static std::sync::RwLock<TenantsMap>,
508 108 : tenant: &Arc<Tenant>,
509 108 : ) -> Result<(), DeleteTenantError> {
510 : // Tree sort timelines, schedule delete for them. Mention retries from the console side.
511 : // Note that if deletion fails we dont mark timelines as broken,
512 : // the whole tenant will become broken as by `Self::schedule_background` logic
513 108 : let already_running_timeline_deletions = schedule_ordered_timeline_deletions(tenant)
514 5017 : .await
515 108 : .context("schedule_ordered_timeline_deletions")?;
516 :
517 91 : fail::fail_point!("tenant-delete-before-polling-ongoing-deletions", |_| {
518 4 : Err(anyhow::anyhow!(
519 4 : "failpoint: tenant-delete-before-polling-ongoing-deletions"
520 4 : ))?
521 91 : });
522 :
523 : // Wait for deletions that were already running at the moment when tenant deletion was requested.
524 : // When we can lock deletion guard it means that corresponding timeline deletion finished.
525 91 : for (guard, timeline_id) in already_running_timeline_deletions {
526 4 : let flow = guard.lock().await;
527 4 : if !flow.is_finished() {
528 0 : return Err(DeleteTenantError::Other(anyhow::anyhow!(
529 0 : "already running timeline deletion failed: {timeline_id}"
530 0 : )));
531 4 : }
532 : }
533 :
534 87 : let timelines_path = conf.timelines_path(&tenant.tenant_shard_id);
535 87 : // May not exist if we fail in cleanup_remaining_fs_traces after removing it
536 87 : if timelines_path.exists() {
537 : // sanity check to guard against layout changes
538 81 : ensure_timelines_dir_empty(&timelines_path)
539 81 : .await
540 81 : .context("timelines dir not empty")?;
541 6 : }
542 :
543 87 : remove_tenant_remote_delete_mark(
544 87 : conf,
545 87 : remote_storage.as_ref(),
546 87 : &tenant.tenant_shard_id,
547 87 : // Can't use tenant.cancel, it's already shut down. TODO: wire in an appropriate token
548 87 : &CancellationToken::new(),
549 87 : )
550 316 : .await?;
551 :
552 87 : pausable_failpoint!("tenant-delete-before-cleanup-remaining-fs-traces-pausable");
553 87 : fail::fail_point!("tenant-delete-before-cleanup-remaining-fs-traces", |_| {
554 4 : Err(anyhow::anyhow!(
555 4 : "failpoint: tenant-delete-before-cleanup-remaining-fs-traces"
556 4 : ))?
557 87 : });
558 :
559 83 : cleanup_remaining_fs_traces(conf, &tenant.tenant_shard_id)
560 535 : .await
561 83 : .context("cleanup_remaining_fs_traces")?;
562 :
563 : {
564 71 : pausable_failpoint!("tenant-delete-before-map-remove");
565 :
566 : // This block is simply removing the TenantSlot for this tenant. It requires a loop because
567 : // we might conflict with a TenantSlot::InProgress marker and need to wait for it.
568 : //
569 : // This complexity will go away when we simplify how deletion works:
570 : // https://github.com/neondatabase/neon/issues/5080
571 : loop {
572 : // Under the TenantMap lock, try to remove the tenant. We usually succeed, but if
573 : // we encounter an InProgress marker, yield the barrier it contains and wait on it.
574 1 : let barrier = {
575 72 : let mut locked = tenants.write().unwrap();
576 72 : let removed = locked.remove(tenant.tenant_shard_id);
577 72 :
578 72 : // FIXME: we should not be modifying this from outside of mgr.rs.
579 72 : // This will go away when we simplify deletion (https://github.com/neondatabase/neon/issues/5080)
580 72 : crate::metrics::TENANT_MANAGER
581 72 : .tenant_slots
582 72 : .set(locked.len() as u64);
583 :
584 71 : match removed {
585 71 : TenantsMapRemoveResult::Occupied(TenantSlot::Attached(tenant)) => {
586 71 : match tenant.current_state() {
587 71 : TenantState::Stopping { .. } | TenantState::Broken { .. } => {
588 71 : // Expected: we put the tenant into stopping state before we start deleting it
589 71 : }
590 0 : state => {
591 : // Unexpected state
592 0 : tracing::warn!(
593 0 : "Tenant in unexpected state {state} after deletion"
594 0 : );
595 : }
596 : }
597 71 : break;
598 : }
599 : TenantsMapRemoveResult::Occupied(TenantSlot::Secondary(_)) => {
600 : // This is unexpected: this secondary tenants should not have been created, and we
601 : // are not in a position to shut it down from here.
602 0 : tracing::warn!("Tenant transitioned to secondary mode while deleting!");
603 0 : break;
604 : }
605 : TenantsMapRemoveResult::Occupied(TenantSlot::InProgress(_)) => {
606 0 : unreachable!("TenantsMap::remove handles InProgress separately, should never return it here");
607 : }
608 : TenantsMapRemoveResult::Vacant => {
609 0 : tracing::warn!(
610 0 : "Tenant removed from TenantsMap before deletion completed"
611 0 : );
612 0 : break;
613 : }
614 1 : TenantsMapRemoveResult::InProgress(barrier) => {
615 1 : // An InProgress entry was found, we must wait on its barrier
616 1 : barrier
617 : }
618 : }
619 : };
620 :
621 1 : tracing::info!(
622 1 : "Waiting for competing operation to complete before deleting state for tenant"
623 1 : );
624 1 : barrier.wait().await;
625 : }
626 : }
627 :
628 71 : *guard = Self::Finished;
629 71 :
630 71 : Ok(())
631 108 : }
632 : }
|