Line data Source code
1 : use std::{
2 : ops::{Deref, DerefMut},
3 : sync::Arc,
4 : };
5 :
6 : use anyhow::Context;
7 : use pageserver_api::{models::TimelineState, shard::TenantShardId};
8 : use tokio::sync::OwnedMutexGuard;
9 : use tracing::{debug, error, info, instrument, warn, Instrument, Span};
10 : use utils::{crashsafe, fs_ext, id::TimelineId};
11 :
12 : use crate::{
13 : config::PageServerConf,
14 : deletion_queue::DeletionQueueClient,
15 : task_mgr::{self, TaskKind},
16 : tenant::{
17 : debug_assert_current_span_has_tenant_and_timeline_id,
18 : metadata::TimelineMetadata,
19 : remote_timeline_client::{
20 : self, PersistIndexPartWithDeletedFlagError, RemoteTimelineClient,
21 : },
22 : CreateTimelineCause, DeleteTimelineError, Tenant,
23 : },
24 : };
25 :
26 : use super::{Timeline, TimelineResources};
27 :
28 : /// Now that the Timeline is in Stopping state, request all the related tasks to shut down.
29 197 : async fn stop_tasks(timeline: &Timeline) -> Result<(), DeleteTimelineError> {
30 197 : debug_assert_current_span_has_tenant_and_timeline_id();
31 : // Notify any timeline work to drop out of loops/requests
32 0 : tracing::debug!("Cancelling CancellationToken");
33 197 : timeline.cancel.cancel();
34 :
35 : // Stop the walreceiver first.
36 0 : debug!("waiting for wal receiver to shutdown");
37 197 : let maybe_started_walreceiver = { timeline.walreceiver.lock().unwrap().take() };
38 197 : if let Some(walreceiver) = maybe_started_walreceiver {
39 160 : walreceiver.stop().await;
40 37 : }
41 0 : debug!("wal receiver shutdown confirmed");
42 :
43 : // Shut down the layer flush task before the remote client, as one depends on the other
44 197 : task_mgr::shutdown_tasks(
45 197 : Some(TaskKind::LayerFlushTask),
46 197 : Some(timeline.tenant_shard_id),
47 197 : Some(timeline.timeline_id),
48 197 : )
49 26 : .await;
50 :
51 : // Prevent new uploads from starting.
52 197 : if let Some(remote_client) = timeline.remote_client.as_ref() {
53 197 : let res = remote_client.stop();
54 197 : match res {
55 197 : Ok(()) => {}
56 0 : Err(e) => match e {
57 0 : remote_timeline_client::StopError::QueueUninitialized => {
58 0 : // This case shouldn't happen currently because the
59 0 : // load and attach code bails out if _any_ of the timeline fails to fetch its IndexPart.
60 0 : // That is, before we declare the Tenant as Active.
61 0 : // But we only allow calls to delete_timeline on Active tenants.
62 0 : return Err(DeleteTimelineError::Other(anyhow::anyhow!("upload queue is uninitialized, likely the timeline was in Broken state prior to this call because it failed to fetch IndexPart during load or attach, check the logs")));
63 : }
64 : },
65 : }
66 0 : }
67 :
68 : // Stop & wait for the remaining timeline tasks, including upload tasks.
69 : // NB: This and other delete_timeline calls do not run as a task_mgr task,
70 : // so, they are not affected by this shutdown_tasks() call.
71 197 : info!("waiting for timeline tasks to shutdown");
72 197 : task_mgr::shutdown_tasks(
73 197 : None,
74 197 : Some(timeline.tenant_shard_id),
75 197 : Some(timeline.timeline_id),
76 197 : )
77 121 : .await;
78 :
79 197 : fail::fail_point!("timeline-delete-before-index-deleted-at", |_| {
80 6 : Err(anyhow::anyhow!(
81 6 : "failpoint: timeline-delete-before-index-deleted-at"
82 6 : ))?
83 197 : });
84 :
85 0 : tracing::debug!("Waiting for gate...");
86 191 : timeline.gate.close().await;
87 0 : tracing::debug!("Shutdown complete");
88 :
89 191 : Ok(())
90 197 : }
91 :
92 : /// Mark timeline as deleted in S3 so we won't pick it up next time
93 : /// during attach or pageserver restart.
94 : /// See comment in persist_index_part_with_deleted_flag.
95 191 : async fn set_deleted_in_remote_index(timeline: &Timeline) -> Result<(), DeleteTimelineError> {
96 191 : if let Some(remote_client) = timeline.remote_client.as_ref() {
97 835 : match remote_client.persist_index_part_with_deleted_flag().await {
98 : // If we (now, or already) marked it successfully as deleted, we can proceed
99 191 : Ok(()) | Err(PersistIndexPartWithDeletedFlagError::AlreadyDeleted(_)) => (),
100 : // Bail out otherwise
101 : //
102 : // AlreadyInProgress shouldn't happen, because the 'delete_lock' prevents
103 : // two tasks from performing the deletion at the same time. The first task
104 : // that starts deletion should run it to completion.
105 0 : Err(e @ PersistIndexPartWithDeletedFlagError::AlreadyInProgress(_))
106 0 : | Err(e @ PersistIndexPartWithDeletedFlagError::Other(_)) => {
107 0 : return Err(DeleteTimelineError::Other(anyhow::anyhow!(e)));
108 : }
109 : }
110 0 : }
111 191 : Ok(())
112 191 : }
113 :
114 : /// Grab the compaction and gc locks, and actually perform the deletion.
115 : ///
116 : /// The locks prevent GC or compaction from running at the same time. The background tasks do not
117 : /// register themselves with the timeline it's operating on, so it might still be running even
118 : /// though we called `shutdown_tasks`.
119 : ///
120 : /// Note that there are still other race conditions between
121 : /// GC, compaction and timeline deletion. See
122 : /// <https://github.com/neondatabase/neon/issues/2671>
123 : ///
124 : /// No timeout here, GC & Compaction should be responsive to the
125 : /// `TimelineState::Stopping` change.
126 : // pub(super): documentation link
127 201 : pub(super) async fn delete_local_layer_files(
128 201 : conf: &PageServerConf,
129 201 : tenant_shard_id: TenantShardId,
130 201 : timeline: &Timeline,
131 201 : ) -> anyhow::Result<()> {
132 201 : let guards = async { tokio::join!(timeline.gc_lock.lock(), timeline.compaction_lock.lock()) };
133 201 : let guards = crate::timed(
134 201 : guards,
135 201 : "acquire gc and compaction locks",
136 201 : std::time::Duration::from_secs(5),
137 201 : )
138 0 : .await;
139 :
140 : // NB: storage_sync upload tasks that reference these layers have been cancelled
141 : // by the caller.
142 :
143 201 : let local_timeline_directory = conf.timeline_path(&tenant_shard_id, &timeline.timeline_id);
144 201 :
145 201 : fail::fail_point!("timeline-delete-before-rm", |_| {
146 7 : Err(anyhow::anyhow!("failpoint: timeline-delete-before-rm"))?
147 201 : });
148 :
149 : // NB: This need not be atomic because the deleted flag in the IndexPart
150 : // will be observed during tenant/timeline load. The deletion will be resumed there.
151 : //
152 : // For configurations without remote storage, we guarantee crash-safety by persising delete mark file.
153 : //
154 : // Note that here we do not bail out on std::io::ErrorKind::NotFound.
155 : // This can happen if we're called a second time, e.g.,
156 : // because of a previous failure/cancellation at/after
157 : // failpoint timeline-delete-after-rm.
158 : //
159 : // ErrorKind::NotFound can also happen if we race with tenant detach, because,
160 : // no locks are shared.
161 : //
162 : // For now, log and continue.
163 : // warn! level is technically not appropriate for the
164 : // first case because we should expect retries to happen.
165 : // But the error is so rare, it seems better to get attention if it happens.
166 : //
167 : // Note that metadata removal is skipped, this is not technically needed,
168 : // but allows to reuse timeline loading code during resumed deletion.
169 : // (we always expect that metadata is in place when timeline is being loaded)
170 :
171 : #[cfg(feature = "testing")]
172 194 : let mut counter = 0;
173 194 :
174 194 : // Timeline directory may not exist if we failed to delete mark file and request was retried.
175 194 : if !local_timeline_directory.exists() {
176 5 : return Ok(());
177 189 : }
178 189 :
179 189 : let metadata_path = conf.metadata_path(&tenant_shard_id, &timeline.timeline_id);
180 :
181 4533 : for entry in walkdir::WalkDir::new(&local_timeline_directory).contents_first(true) {
182 : #[cfg(feature = "testing")]
183 : {
184 4533 : counter += 1;
185 4533 : if counter == 2 {
186 188 : fail::fail_point!("timeline-delete-during-rm", |_| {
187 3 : Err(anyhow::anyhow!("failpoint: timeline-delete-during-rm"))?
188 188 : });
189 4345 : }
190 : }
191 :
192 4530 : let entry = entry?;
193 4530 : if entry.path() == metadata_path {
194 0 : debug!("found metadata, skipping");
195 185 : continue;
196 4345 : }
197 4345 :
198 4345 : if entry.path() == local_timeline_directory {
199 : // Keeping directory because metedata file is still there
200 0 : debug!("found timeline dir itself, skipping");
201 186 : continue;
202 4159 : }
203 :
204 4159 : let metadata = match entry.metadata() {
205 4159 : Ok(metadata) => metadata,
206 0 : Err(e) => {
207 0 : if crate::is_walkdir_io_not_found(&e) {
208 0 : warn!(
209 0 : timeline_dir=?local_timeline_directory,
210 0 : path=?entry.path().display(),
211 0 : "got not found err while removing timeline dir, proceeding anyway"
212 0 : );
213 0 : continue;
214 0 : }
215 0 : anyhow::bail!(e);
216 : }
217 : };
218 :
219 4159 : if metadata.is_dir() {
220 0 : warn!(path=%entry.path().display(), "unexpected directory under timeline dir");
221 0 : tokio::fs::remove_dir(entry.path()).await
222 : } else {
223 4159 : tokio::fs::remove_file(entry.path()).await
224 : }
225 4159 : .with_context(|| format!("Failed to remove: {}", entry.path().display()))?;
226 : }
227 :
228 186 : info!("finished deleting layer files, releasing locks");
229 186 : drop(guards);
230 186 :
231 186 : fail::fail_point!("timeline-delete-after-rm", |_| {
232 2 : Err(anyhow::anyhow!("failpoint: timeline-delete-after-rm"))?
233 186 : });
234 :
235 184 : Ok(())
236 201 : }
237 :
238 : /// Removes remote layers and an index file after them.
239 189 : async fn delete_remote_layers_and_index(timeline: &Timeline) -> anyhow::Result<()> {
240 189 : if let Some(remote_client) = &timeline.remote_client {
241 1150 : remote_client.delete_all().await.context("delete_all")?
242 0 : };
243 :
244 179 : Ok(())
245 189 : }
246 :
247 : // This function removs remaining traces of a timeline on disk.
248 : // Namely: metadata file, timeline directory, delete mark.
249 : // Note: io::ErrorKind::NotFound are ignored for metadata and timeline dir.
250 : // delete mark should be present because it is the last step during deletion.
251 : // (nothing can fail after its deletion)
252 179 : async fn cleanup_remaining_timeline_fs_traces(
253 179 : conf: &PageServerConf,
254 179 : tenant_shard_id: TenantShardId,
255 179 : timeline_id: TimelineId,
256 179 : ) -> anyhow::Result<()> {
257 179 : // Remove local metadata
258 179 : tokio::fs::remove_file(conf.metadata_path(&tenant_shard_id, &timeline_id))
259 176 : .await
260 179 : .or_else(fs_ext::ignore_not_found)
261 179 : .context("remove metadata")?;
262 :
263 179 : fail::fail_point!("timeline-delete-after-rm-metadata", |_| {
264 2 : Err(anyhow::anyhow!(
265 2 : "failpoint: timeline-delete-after-rm-metadata"
266 2 : ))?
267 179 : });
268 :
269 : // Remove timeline dir
270 177 : tokio::fs::remove_dir(conf.timeline_path(&tenant_shard_id, &timeline_id))
271 176 : .await
272 177 : .or_else(fs_ext::ignore_not_found)
273 177 : .context("timeline dir")?;
274 :
275 177 : fail::fail_point!("timeline-delete-after-rm-dir", |_| {
276 6 : Err(anyhow::anyhow!("failpoint: timeline-delete-after-rm-dir"))?
277 177 : });
278 :
279 : // Make sure previous deletions are ordered before mark removal.
280 : // Otherwise there is no guarantee that they reach the disk before mark deletion.
281 : // So its possible for mark to reach disk first and for other deletions
282 : // to be reordered later and thus missed if a crash occurs.
283 : // Note that we dont need to sync after mark file is removed
284 : // because we can tolerate the case when mark file reappears on startup.
285 171 : let timeline_path = conf.timelines_path(&tenant_shard_id);
286 171 : crashsafe::fsync_async(timeline_path)
287 340 : .await
288 171 : .context("fsync_pre_mark_remove")?;
289 :
290 : // Remove delete mark
291 : // TODO: once we are confident that no more exist in the field, remove this
292 : // line. It cleans up a legacy marker file that might in rare cases be present.
293 171 : tokio::fs::remove_file(conf.timeline_delete_mark_file_path(tenant_shard_id, timeline_id))
294 170 : .await
295 171 : .or_else(fs_ext::ignore_not_found)
296 171 : .context("remove delete mark")
297 179 : }
298 :
299 : /// It is important that this gets called when DeletionGuard is being held.
300 : /// For more context see comments in [`DeleteTimelineFlow::prepare`]
301 171 : async fn remove_timeline_from_tenant(
302 171 : tenant: &Tenant,
303 171 : timeline_id: TimelineId,
304 171 : _: &DeletionGuard, // using it as a witness
305 171 : ) -> anyhow::Result<()> {
306 171 : // Remove the timeline from the map.
307 171 : let mut timelines = tenant.timelines.lock().unwrap();
308 171 : let children_exist = timelines
309 171 : .iter()
310 270 : .any(|(_, entry)| entry.get_ancestor_timeline_id() == Some(timeline_id));
311 171 : // XXX this can happen because `branch_timeline` doesn't check `TimelineState::Stopping`.
312 171 : // We already deleted the layer files, so it's probably best to panic.
313 171 : // (Ideally, above remove_dir_all is atomic so we don't see this timeline after a restart)
314 171 : if children_exist {
315 0 : panic!("Timeline grew children while we removed layer files");
316 171 : }
317 171 :
318 171 : timelines
319 171 : .remove(&timeline_id)
320 171 : .expect("timeline that we were deleting was concurrently removed from 'timelines' map");
321 171 :
322 171 : drop(timelines);
323 171 :
324 171 : Ok(())
325 171 : }
326 :
327 : /// Orchestrates timeline shut down of all timeline tasks, removes its in-memory structures,
328 : /// and deletes its data from both disk and s3.
329 : /// The sequence of steps:
330 : /// 1. Set deleted_at in remote index part.
331 : /// 2. Create local mark file.
332 : /// 3. Delete local files except metadata (it is simpler this way, to be able to reuse timeline initialization code that expects metadata)
333 : /// 4. Delete remote layers
334 : /// 5. Delete index part
335 : /// 6. Delete meta, timeline directory
336 : /// 7. Delete mark file
337 : /// It is resumable from any step in case a crash/restart occurs.
338 : /// There are three entrypoints to the process:
339 : /// 1. [`DeleteTimelineFlow::run`] this is the main one called by a management api handler.
340 : /// 2. [`DeleteTimelineFlow::resume_deletion`] is called during restarts when local metadata is still present
341 : /// and we possibly neeed to continue deletion of remote files.
342 : /// 3. [`DeleteTimelineFlow::cleanup_remaining_timeline_fs_traces`] is used when we deleted remote
343 : /// index but still have local metadata, timeline directory and delete mark.
344 : /// Note the only other place that messes around timeline delete mark is the logic that scans directory with timelines during tenant load.
345 1568 : #[derive(Default)]
346 : pub enum DeleteTimelineFlow {
347 : #[default]
348 : NotStarted,
349 : InProgress,
350 : Finished,
351 : }
352 :
353 : impl DeleteTimelineFlow {
354 : // These steps are run in the context of management api request handler.
355 : // Long running steps are continued to run in the background.
356 : // NB: If this fails half-way through, and is retried, the retry will go through
357 : // all the same steps again. Make sure the code here is idempotent, and don't
358 : // error out if some of the shutdown tasks have already been completed!
359 0 : #[instrument(skip_all, fields(%inplace))]
360 : pub async fn run(
361 : tenant: &Arc<Tenant>,
362 : timeline_id: TimelineId,
363 : inplace: bool,
364 : ) -> Result<(), DeleteTimelineError> {
365 : super::debug_assert_current_span_has_tenant_and_timeline_id();
366 :
367 : let (timeline, mut guard) = Self::prepare(tenant, timeline_id)?;
368 :
369 : guard.mark_in_progress()?;
370 :
371 : stop_tasks(&timeline).await?;
372 :
373 : set_deleted_in_remote_index(&timeline).await?;
374 :
375 2 : fail::fail_point!("timeline-delete-before-schedule", |_| {
376 2 : Err(anyhow::anyhow!(
377 2 : "failpoint: timeline-delete-before-schedule"
378 2 : ))?
379 2 : });
380 :
381 : if inplace {
382 : Self::background(guard, tenant.conf, tenant, &timeline).await?
383 : } else {
384 : Self::schedule_background(guard, tenant.conf, Arc::clone(tenant), timeline);
385 : }
386 :
387 : Ok(())
388 : }
389 :
390 209 : fn mark_in_progress(&mut self) -> anyhow::Result<()> {
391 209 : match self {
392 0 : Self::Finished => anyhow::bail!("Bug. Is in finished state"),
393 17 : Self::InProgress { .. } => { /* We're in a retry */ }
394 192 : Self::NotStarted => { /* Fresh start */ }
395 : }
396 :
397 209 : *self = Self::InProgress;
398 209 :
399 209 : Ok(())
400 209 : }
401 :
402 : /// Shortcut to create Timeline in stopping state and spawn deletion task.
403 : /// See corresponding parts of [`crate::tenant::delete::DeleteTenantFlow`]
404 12 : #[instrument(skip_all, fields(%timeline_id))]
405 : pub async fn resume_deletion(
406 : tenant: Arc<Tenant>,
407 : timeline_id: TimelineId,
408 : local_metadata: &TimelineMetadata,
409 : remote_client: Option<RemoteTimelineClient>,
410 : deletion_queue_client: DeletionQueueClient,
411 : ) -> anyhow::Result<()> {
412 : // Note: here we even skip populating layer map. Timeline is essentially uninitialized.
413 : // RemoteTimelineClient is the only functioning part.
414 : let timeline = tenant
415 : .create_timeline_struct(
416 : timeline_id,
417 : local_metadata,
418 : None, // Ancestor is not needed for deletion.
419 : TimelineResources {
420 : remote_client,
421 : deletion_queue_client,
422 : },
423 : // Important. We dont pass ancestor above because it can be missing.
424 : // Thus we need to skip the validation here.
425 : CreateTimelineCause::Delete,
426 : )
427 : .context("create_timeline_struct")?;
428 :
429 : let mut guard = DeletionGuard(
430 : Arc::clone(&timeline.delete_progress)
431 : .try_lock_owned()
432 : .expect("cannot happen because we're the only owner"),
433 : );
434 :
435 : // We meed to do this because when console retries delete request we shouldnt answer with 404
436 : // because 404 means successful deletion.
437 : {
438 : let mut locked = tenant.timelines.lock().unwrap();
439 : locked.insert(timeline_id, Arc::clone(&timeline));
440 : }
441 :
442 : guard.mark_in_progress()?;
443 :
444 : Self::schedule_background(guard, tenant.conf, tenant, timeline);
445 :
446 : Ok(())
447 : }
448 :
449 0 : #[instrument(skip_all, fields(%timeline_id))]
450 : pub async fn cleanup_remaining_timeline_fs_traces(
451 : tenant: &Tenant,
452 : timeline_id: TimelineId,
453 : ) -> anyhow::Result<()> {
454 : let r =
455 : cleanup_remaining_timeline_fs_traces(tenant.conf, tenant.tenant_shard_id, timeline_id)
456 : .await;
457 0 : info!("Done");
458 : r
459 : }
460 :
461 209 : fn prepare(
462 209 : tenant: &Tenant,
463 209 : timeline_id: TimelineId,
464 209 : ) -> Result<(Arc<Timeline>, DeletionGuard), DeleteTimelineError> {
465 209 : // Note the interaction between this guard and deletion guard.
466 209 : // Here we attempt to lock deletion guard when we're holding a lock on timelines.
467 209 : // This is important because when you take into account `remove_timeline_from_tenant`
468 209 : // we remove timeline from memory when we still hold the deletion guard.
469 209 : // So here when timeline deletion is finished timeline wont be present in timelines map at all
470 209 : // which makes the following sequence impossible:
471 209 : // T1: get preempted right before the try_lock on `Timeline::delete_progress`
472 209 : // T2: do a full deletion, acquire and drop `Timeline::delete_progress`
473 209 : // T1: acquire deletion lock, do another `DeleteTimelineFlow::run`
474 209 : // For more context see this discussion: `https://github.com/neondatabase/neon/pull/4552#discussion_r1253437346`
475 209 : let timelines = tenant.timelines.lock().unwrap();
476 :
477 209 : let timeline = match timelines.get(&timeline_id) {
478 205 : Some(t) => t,
479 4 : None => return Err(DeleteTimelineError::NotFound),
480 : };
481 :
482 : // Ensure that there are no child timelines **attached to that pageserver**,
483 : // because detach removes files, which will break child branches
484 205 : let children: Vec<TimelineId> = timelines
485 205 : .iter()
486 346 : .filter_map(|(id, entry)| {
487 346 : if entry.get_ancestor_timeline_id() == Some(timeline_id) {
488 1 : Some(*id)
489 : } else {
490 345 : None
491 : }
492 346 : })
493 205 : .collect();
494 205 :
495 205 : if !children.is_empty() {
496 1 : return Err(DeleteTimelineError::HasChildren(children));
497 204 : }
498 204 :
499 204 : // Note that using try_lock here is important to avoid a deadlock.
500 204 : // Here we take lock on timelines and then the deletion guard.
501 204 : // At the end of the operation we're holding the guard and need to lock timelines map
502 204 : // to remove the timeline from it.
503 204 : // Always if you have two locks that are taken in different order this can result in a deadlock.
504 204 :
505 204 : let delete_progress = Arc::clone(&timeline.delete_progress);
506 204 : let delete_lock_guard = match delete_progress.try_lock_owned() {
507 197 : Ok(guard) => DeletionGuard(guard),
508 : Err(_) => {
509 : // Unfortunately if lock fails arc is consumed.
510 7 : return Err(DeleteTimelineError::AlreadyInProgress(Arc::clone(
511 7 : &timeline.delete_progress,
512 7 : )));
513 : }
514 : };
515 :
516 197 : timeline.set_state(TimelineState::Stopping);
517 197 :
518 197 : Ok((Arc::clone(timeline), delete_lock_guard))
519 209 : }
520 :
521 65 : fn schedule_background(
522 65 : guard: DeletionGuard,
523 65 : conf: &'static PageServerConf,
524 65 : tenant: Arc<Tenant>,
525 65 : timeline: Arc<Timeline>,
526 65 : ) {
527 65 : let tenant_shard_id = timeline.tenant_shard_id;
528 65 : let timeline_id = timeline.timeline_id;
529 65 :
530 65 : task_mgr::spawn(
531 65 : task_mgr::BACKGROUND_RUNTIME.handle(),
532 65 : TaskKind::TimelineDeletionWorker,
533 65 : Some(tenant_shard_id),
534 65 : Some(timeline_id),
535 65 : "timeline_delete",
536 : false,
537 65 : async move {
538 2083 : if let Err(err) = Self::background(guard, conf, &tenant, &timeline).await {
539 17 : error!("Error: {err:#}");
540 17 : timeline.set_broken(format!("{err:#}"))
541 48 : };
542 65 : Ok(())
543 65 : }
544 65 : .instrument({
545 65 : let span =
546 65 : tracing::info_span!(parent: None, "delete_timeline", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),timeline_id=%timeline_id);
547 65 : span.follows_from(Span::current());
548 65 : span
549 65 : }),
550 65 : );
551 65 : }
552 :
553 201 : async fn background(
554 201 : mut guard: DeletionGuard,
555 201 : conf: &PageServerConf,
556 201 : tenant: &Tenant,
557 201 : timeline: &Timeline,
558 201 : ) -> Result<(), DeleteTimelineError> {
559 4148 : delete_local_layer_files(conf, tenant.tenant_shard_id, timeline).await?;
560 :
561 1150 : delete_remote_layers_and_index(timeline).await?;
562 :
563 179 : pausable_failpoint!("in_progress_delete");
564 :
565 179 : cleanup_remaining_timeline_fs_traces(conf, tenant.tenant_shard_id, timeline.timeline_id)
566 862 : .await?;
567 :
568 171 : remove_timeline_from_tenant(tenant, timeline.timeline_id, &guard).await?;
569 :
570 171 : *guard = Self::Finished;
571 171 :
572 171 : Ok(())
573 201 : }
574 :
575 4 : pub(crate) fn is_finished(&self) -> bool {
576 4 : matches!(self, Self::Finished)
577 4 : }
578 : }
579 :
580 : struct DeletionGuard(OwnedMutexGuard<DeleteTimelineFlow>);
581 :
582 : impl Deref for DeletionGuard {
583 : type Target = DeleteTimelineFlow;
584 :
585 0 : fn deref(&self) -> &Self::Target {
586 0 : &self.0
587 0 : }
588 : }
589 :
590 : impl DerefMut for DeletionGuard {
591 380 : fn deref_mut(&mut self) -> &mut Self::Target {
592 380 : &mut self.0
593 380 : }
594 : }
|