LCOV - code coverage report
Current view: top level - pageserver/src/tenant/timeline - delete.rs (source / functions) Coverage Total Hit
Test: 91bf6c8f32e5e69adde6241313e732fdd6d6e277.info Lines: 36.2 % 240 87
Test Date: 2025-03-04 12:19:20 Functions: 16.7 % 24 4

            Line data    Source code
       1              : use std::ops::{Deref, DerefMut};
       2              : use std::sync::Arc;
       3              : 
       4              : use anyhow::Context;
       5              : use pageserver_api::models::TimelineState;
       6              : use pageserver_api::shard::TenantShardId;
       7              : use remote_storage::DownloadError;
       8              : use tokio::sync::OwnedMutexGuard;
       9              : use tracing::{Instrument, error, info, info_span, instrument};
      10              : use utils::id::TimelineId;
      11              : use utils::{crashsafe, fs_ext, pausable_failpoint};
      12              : 
      13              : use crate::config::PageServerConf;
      14              : use crate::task_mgr::{self, TaskKind};
      15              : use crate::tenant::metadata::TimelineMetadata;
      16              : use crate::tenant::remote_timeline_client::{
      17              :     PersistIndexPartWithDeletedFlagError, RemoteTimelineClient,
      18              : };
      19              : use crate::tenant::{
      20              :     CreateTimelineCause, DeleteTimelineError, MaybeDeletedIndexPart, Tenant, TenantManifestError,
      21              :     Timeline, TimelineOrOffloaded,
      22              : };
      23              : use crate::virtual_file::MaybeFatalIo;
      24              : 
      25              : /// Mark timeline as deleted in S3 so we won't pick it up next time
      26              : /// during attach or pageserver restart.
      27              : /// See comment in persist_index_part_with_deleted_flag.
      28            0 : async fn set_deleted_in_remote_index(
      29            0 :     remote_client: &Arc<RemoteTimelineClient>,
      30            0 : ) -> Result<(), DeleteTimelineError> {
      31            0 :     let res = remote_client.persist_index_part_with_deleted_flag().await;
      32            0 :     match res {
      33              :         // If we (now, or already) marked it successfully as deleted, we can proceed
      34            0 :         Ok(()) | Err(PersistIndexPartWithDeletedFlagError::AlreadyDeleted(_)) => (),
      35              :         // Bail out otherwise
      36              :         //
      37              :         // AlreadyInProgress shouldn't happen, because the 'delete_lock' prevents
      38              :         // two tasks from performing the deletion at the same time. The first task
      39              :         // that starts deletion should run it to completion.
      40            0 :         Err(e @ PersistIndexPartWithDeletedFlagError::AlreadyInProgress(_))
      41            0 :         | Err(e @ PersistIndexPartWithDeletedFlagError::Other(_)) => {
      42            0 :             return Err(DeleteTimelineError::Other(anyhow::anyhow!(e)));
      43              :         }
      44              :     }
      45            0 :     Ok(())
      46            0 : }
      47              : 
      48              : /// Grab the compaction and gc locks, and actually perform the deletion.
      49              : ///
      50              : /// The locks prevent GC or compaction from running at the same time. The background tasks do not
      51              : /// register themselves with the timeline it's operating on, so it might still be running even
      52              : /// though we called `shutdown_tasks`.
      53              : ///
      54              : /// Note that there are still other race conditions between
      55              : /// GC, compaction and timeline deletion. See
      56              : /// <https://github.com/neondatabase/neon/issues/2671>
      57              : ///
      58              : /// No timeout here, GC & Compaction should be responsive to the
      59              : /// `TimelineState::Stopping` change.
      60              : // pub(super): documentation link
      61            4 : pub(super) async fn delete_local_timeline_directory(
      62            4 :     conf: &PageServerConf,
      63            4 :     tenant_shard_id: TenantShardId,
      64            4 :     timeline: &Timeline,
      65            4 : ) {
      66            4 :     // Always ensure the lock order is compaction -> gc.
      67            4 :     let compaction_lock = timeline.compaction_lock.lock();
      68            4 :     let _compaction_lock = crate::timed(
      69            4 :         compaction_lock,
      70            4 :         "acquires compaction lock",
      71            4 :         std::time::Duration::from_secs(5),
      72            4 :     )
      73            4 :     .await;
      74              : 
      75            4 :     let gc_lock = timeline.gc_lock.lock();
      76            4 :     let _gc_lock = crate::timed(
      77            4 :         gc_lock,
      78            4 :         "acquires gc lock",
      79            4 :         std::time::Duration::from_secs(5),
      80            4 :     )
      81            4 :     .await;
      82              : 
      83              :     // NB: storage_sync upload tasks that reference these layers have been cancelled
      84              :     //     by the caller.
      85              : 
      86            4 :     let local_timeline_directory = conf.timeline_path(&tenant_shard_id, &timeline.timeline_id);
      87            4 : 
      88            4 :     // NB: This need not be atomic because the deleted flag in the IndexPart
      89            4 :     // will be observed during tenant/timeline load. The deletion will be resumed there.
      90            4 :     //
      91            4 :     // ErrorKind::NotFound can happen e.g. if we race with tenant detach, because,
      92            4 :     // no locks are shared.
      93            4 :     tokio::fs::remove_dir_all(local_timeline_directory)
      94            4 :         .await
      95            4 :         .or_else(fs_ext::ignore_not_found)
      96            4 :         .fatal_err("removing timeline directory");
      97            4 : 
      98            4 :     // Make sure previous deletions are ordered before mark removal.
      99            4 :     // Otherwise there is no guarantee that they reach the disk before mark deletion.
     100            4 :     // So its possible for mark to reach disk first and for other deletions
     101            4 :     // to be reordered later and thus missed if a crash occurs.
     102            4 :     // Note that we dont need to sync after mark file is removed
     103            4 :     // because we can tolerate the case when mark file reappears on startup.
     104            4 :     let timeline_path = conf.timelines_path(&tenant_shard_id);
     105            4 :     crashsafe::fsync_async(timeline_path)
     106            4 :         .await
     107            4 :         .fatal_err("fsync after removing timeline directory");
     108            4 : 
     109            4 :     info!("finished deleting layer files, releasing locks");
     110            4 : }
     111              : 
     112              : /// It is important that this gets called when DeletionGuard is being held.
     113              : /// For more context see comments in [`make_timeline_delete_guard`]
     114            0 : async fn remove_maybe_offloaded_timeline_from_tenant(
     115            0 :     tenant: &Tenant,
     116            0 :     timeline: &TimelineOrOffloaded,
     117            0 :     _: &DeletionGuard, // using it as a witness
     118            0 : ) -> anyhow::Result<()> {
     119            0 :     // Remove the timeline from the map.
     120            0 :     // This observes the locking order between timelines and timelines_offloaded
     121            0 :     let mut timelines = tenant.timelines.lock().unwrap();
     122            0 :     let mut timelines_offloaded = tenant.timelines_offloaded.lock().unwrap();
     123            0 :     let offloaded_children_exist = timelines_offloaded
     124            0 :         .iter()
     125            0 :         .any(|(_, entry)| entry.ancestor_timeline_id == Some(timeline.timeline_id()));
     126            0 :     let children_exist = timelines
     127            0 :         .iter()
     128            0 :         .any(|(_, entry)| entry.get_ancestor_timeline_id() == Some(timeline.timeline_id()));
     129            0 :     // XXX this can happen because of race conditions with branch creation.
     130            0 :     // We already deleted the remote layer files, so it's probably best to panic.
     131            0 :     if children_exist || offloaded_children_exist {
     132            0 :         panic!("Timeline grew children while we removed layer files");
     133            0 :     }
     134            0 : 
     135            0 :     match timeline {
     136            0 :         TimelineOrOffloaded::Timeline(timeline) => {
     137            0 :             timelines.remove(&timeline.timeline_id).expect(
     138            0 :                 "timeline that we were deleting was concurrently removed from 'timelines' map",
     139            0 :             );
     140            0 :             tenant
     141            0 :                 .scheduled_compaction_tasks
     142            0 :                 .lock()
     143            0 :                 .unwrap()
     144            0 :                 .remove(&timeline.timeline_id);
     145            0 :         }
     146            0 :         TimelineOrOffloaded::Offloaded(timeline) => {
     147            0 :             let offloaded_timeline = timelines_offloaded
     148            0 :                 .remove(&timeline.timeline_id)
     149            0 :                 .expect("timeline that we were deleting was concurrently removed from 'timelines_offloaded' map");
     150            0 :             offloaded_timeline.delete_from_ancestor_with_timelines(&timelines);
     151            0 :         }
     152              :     }
     153              : 
     154            0 :     drop(timelines_offloaded);
     155            0 :     drop(timelines);
     156            0 : 
     157            0 :     Ok(())
     158            0 : }
     159              : 
     160              : /// Orchestrates timeline shut down of all timeline tasks, removes its in-memory structures,
     161              : /// and deletes its data from both disk and s3.
     162              : /// The sequence of steps:
     163              : /// 1. Set deleted_at in remote index part.
     164              : /// 2. Create local mark file.
     165              : /// 3. Delete local files except metadata (it is simpler this way, to be able to reuse timeline initialization code that expects metadata)
     166              : /// 4. Delete remote layers
     167              : /// 5. Delete index part
     168              : /// 6. Delete meta, timeline directory
     169              : /// 7. Delete mark file
     170              : ///
     171              : /// It is resumable from any step in case a crash/restart occurs.
     172              : /// There are two entrypoints to the process:
     173              : /// 1. [`DeleteTimelineFlow::run`] this is the main one called by a management api handler.
     174              : /// 2. [`DeleteTimelineFlow::resume_deletion`] is called during restarts when local metadata is still present
     175              : ///    and we possibly neeed to continue deletion of remote files.
     176              : ///
     177              : /// Note the only other place that messes around timeline delete mark is the logic that scans directory with timelines during tenant load.
     178              : #[derive(Default)]
     179              : pub enum DeleteTimelineFlow {
     180              :     #[default]
     181              :     NotStarted,
     182              :     InProgress,
     183              :     Finished,
     184              : }
     185              : 
     186              : impl DeleteTimelineFlow {
     187              :     // These steps are run in the context of management api request handler.
     188              :     // Long running steps are continued to run in the background.
     189              :     // NB: If this fails half-way through, and is retried, the retry will go through
     190              :     // all the same steps again. Make sure the code here is idempotent, and don't
     191              :     // error out if some of the shutdown tasks have already been completed!
     192              :     #[instrument(skip_all)]
     193              :     pub async fn run(
     194              :         tenant: &Arc<Tenant>,
     195              :         timeline_id: TimelineId,
     196              :     ) -> Result<(), DeleteTimelineError> {
     197              :         super::debug_assert_current_span_has_tenant_and_timeline_id();
     198              : 
     199              :         let (timeline, mut guard) =
     200              :             make_timeline_delete_guard(tenant, timeline_id, TimelineDeleteGuardKind::Delete)?;
     201              : 
     202              :         guard.mark_in_progress()?;
     203              : 
     204              :         // Now that the Timeline is in Stopping state, request all the related tasks to shut down.
     205              :         if let TimelineOrOffloaded::Timeline(timeline) = &timeline {
     206              :             timeline.shutdown(super::ShutdownMode::Hard).await;
     207              :         }
     208              : 
     209              :         tenant.gc_block.before_delete(&timeline.timeline_id());
     210              : 
     211            0 :         fail::fail_point!("timeline-delete-before-index-deleted-at", |_| {
     212            0 :             Err(anyhow::anyhow!(
     213            0 :                 "failpoint: timeline-delete-before-index-deleted-at"
     214            0 :             ))?
     215            0 :         });
     216              : 
     217              :         let remote_client = match timeline.maybe_remote_client() {
     218              :             Some(remote_client) => remote_client,
     219              :             None => {
     220              :                 let remote_client = tenant
     221              :                     .build_timeline_client(timeline.timeline_id(), tenant.remote_storage.clone());
     222              :                 let result = match remote_client
     223              :                     .download_index_file(&tenant.cancel)
     224              :                     .instrument(info_span!("download_index_file"))
     225              :                     .await
     226              :                 {
     227              :                     Ok(r) => r,
     228              :                     Err(DownloadError::NotFound) => {
     229              :                         // Deletion is already complete
     230              :                         tracing::info!("Timeline already deleted in remote storage");
     231              :                         return Ok(());
     232              :                     }
     233              :                     Err(e) => {
     234              :                         return Err(DeleteTimelineError::Other(anyhow::anyhow!(
     235              :                             "error: {:?}",
     236              :                             e
     237              :                         )));
     238              :                     }
     239              :                 };
     240              :                 let index_part = match result {
     241              :                     MaybeDeletedIndexPart::Deleted(p) => {
     242              :                         tracing::info!("Timeline already set as deleted in remote index");
     243              :                         p
     244              :                     }
     245              :                     MaybeDeletedIndexPart::IndexPart(p) => p,
     246              :                 };
     247              :                 let remote_client = Arc::new(remote_client);
     248              : 
     249              :                 remote_client
     250              :                     .init_upload_queue(&index_part)
     251              :                     .map_err(DeleteTimelineError::Other)?;
     252              :                 remote_client.shutdown().await;
     253              :                 remote_client
     254              :             }
     255              :         };
     256              :         set_deleted_in_remote_index(&remote_client).await?;
     257              : 
     258            0 :         fail::fail_point!("timeline-delete-before-schedule", |_| {
     259            0 :             Err(anyhow::anyhow!(
     260            0 :                 "failpoint: timeline-delete-before-schedule"
     261            0 :             ))?
     262            0 :         });
     263              : 
     264              :         Self::schedule_background(
     265              :             guard,
     266              :             tenant.conf,
     267              :             Arc::clone(tenant),
     268              :             timeline,
     269              :             remote_client,
     270              :         );
     271              : 
     272              :         Ok(())
     273              :     }
     274              : 
     275            0 :     fn mark_in_progress(&mut self) -> anyhow::Result<()> {
     276            0 :         match self {
     277            0 :             Self::Finished => anyhow::bail!("Bug. Is in finished state"),
     278            0 :             Self::InProgress { .. } => { /* We're in a retry */ }
     279            0 :             Self::NotStarted => { /* Fresh start */ }
     280              :         }
     281              : 
     282            0 :         *self = Self::InProgress;
     283            0 : 
     284            0 :         Ok(())
     285            0 :     }
     286              : 
     287              :     /// Shortcut to create Timeline in stopping state and spawn deletion task.
     288              :     #[instrument(skip_all, fields(%timeline_id))]
     289              :     pub(crate) async fn resume_deletion(
     290              :         tenant: Arc<Tenant>,
     291              :         timeline_id: TimelineId,
     292              :         local_metadata: &TimelineMetadata,
     293              :         remote_client: RemoteTimelineClient,
     294              :     ) -> anyhow::Result<()> {
     295              :         // Note: here we even skip populating layer map. Timeline is essentially uninitialized.
     296              :         // RemoteTimelineClient is the only functioning part.
     297              :         let timeline = tenant
     298              :             .create_timeline_struct(
     299              :                 timeline_id,
     300              :                 local_metadata,
     301              :                 None, // Ancestor is not needed for deletion.
     302              :                 None, // Previous heatmap is not needed for deletion
     303              :                 tenant.get_timeline_resources_for(remote_client),
     304              :                 // Important. We dont pass ancestor above because it can be missing.
     305              :                 // Thus we need to skip the validation here.
     306              :                 CreateTimelineCause::Delete,
     307              :                 crate::tenant::CreateTimelineIdempotency::FailWithConflict, // doesn't matter what we put here
     308              :                 None, // doesn't matter what we put here
     309              :                 None, // doesn't matter what we put here
     310              :             )
     311              :             .context("create_timeline_struct")?;
     312              : 
     313              :         let mut guard = DeletionGuard(
     314              :             Arc::clone(&timeline.delete_progress)
     315              :                 .try_lock_owned()
     316              :                 .expect("cannot happen because we're the only owner"),
     317              :         );
     318              : 
     319              :         // We meed to do this because when console retries delete request we shouldnt answer with 404
     320              :         // because 404 means successful deletion.
     321              :         {
     322              :             let mut locked = tenant.timelines.lock().unwrap();
     323              :             locked.insert(timeline_id, Arc::clone(&timeline));
     324              :         }
     325              : 
     326              :         guard.mark_in_progress()?;
     327              : 
     328              :         let remote_client = timeline.remote_client.clone();
     329              :         let timeline = TimelineOrOffloaded::Timeline(timeline);
     330              :         Self::schedule_background(guard, tenant.conf, tenant, timeline, remote_client);
     331              : 
     332              :         Ok(())
     333              :     }
     334              : 
     335            0 :     fn schedule_background(
     336            0 :         guard: DeletionGuard,
     337            0 :         conf: &'static PageServerConf,
     338            0 :         tenant: Arc<Tenant>,
     339            0 :         timeline: TimelineOrOffloaded,
     340            0 :         remote_client: Arc<RemoteTimelineClient>,
     341            0 :     ) {
     342            0 :         let tenant_shard_id = timeline.tenant_shard_id();
     343            0 :         let timeline_id = timeline.timeline_id();
     344              : 
     345              :         // Take a tenant gate guard, because timeline deletion needs access to the tenant to update its manifest.
     346            0 :         let Ok(tenant_guard) = tenant.gate.enter() else {
     347              :             // It is safe to simply skip here, because we only schedule background work once the timeline is durably marked for deletion.
     348            0 :             info!("Tenant is shutting down, timeline deletion will be resumed when it next starts");
     349            0 :             return;
     350              :         };
     351              : 
     352              :         task_mgr::spawn(
     353            0 :             task_mgr::BACKGROUND_RUNTIME.handle(),
     354            0 :             TaskKind::TimelineDeletionWorker,
     355            0 :             tenant_shard_id,
     356            0 :             Some(timeline_id),
     357            0 :             "timeline_delete",
     358            0 :             async move {
     359            0 :                 let _guard = tenant_guard;
     360              : 
     361            0 :                 if let Err(err) = Self::background(guard, conf, &tenant, &timeline, remote_client).await {
     362              :                     // Only log as an error if it's not a cancellation.
     363            0 :                     if matches!(err, DeleteTimelineError::Cancelled) {
     364            0 :                         info!("Shutdown during timeline deletion");
     365              :                     }else {
     366            0 :                         error!("Error: {err:#}");
     367              :                     }
     368            0 :                     if let TimelineOrOffloaded::Timeline(timeline) = timeline {
     369            0 :                         timeline.set_broken(format!("{err:#}"))
     370            0 :                     }
     371            0 :                 };
     372            0 :                 Ok(())
     373            0 :             }
     374            0 :             .instrument(tracing::info_span!(parent: None, "delete_timeline", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(),timeline_id=%timeline_id)),
     375              :         );
     376            0 :     }
     377              : 
     378            0 :     async fn background(
     379            0 :         mut guard: DeletionGuard,
     380            0 :         conf: &PageServerConf,
     381            0 :         tenant: &Tenant,
     382            0 :         timeline: &TimelineOrOffloaded,
     383            0 :         remote_client: Arc<RemoteTimelineClient>,
     384            0 :     ) -> Result<(), DeleteTimelineError> {
     385            0 :         fail::fail_point!("timeline-delete-before-rm", |_| {
     386            0 :             Err(anyhow::anyhow!("failpoint: timeline-delete-before-rm"))?
     387            0 :         });
     388              : 
     389              :         // Offloaded timelines have no local state
     390              :         // TODO: once we persist offloaded information, delete the timeline from there, too
     391            0 :         if let TimelineOrOffloaded::Timeline(timeline) = timeline {
     392            0 :             delete_local_timeline_directory(conf, tenant.tenant_shard_id, timeline).await;
     393            0 :         }
     394              : 
     395            0 :         fail::fail_point!("timeline-delete-after-rm", |_| {
     396            0 :             Err(anyhow::anyhow!("failpoint: timeline-delete-after-rm"))?
     397            0 :         });
     398              : 
     399            0 :         remote_client.delete_all().await?;
     400              : 
     401            0 :         pausable_failpoint!("in_progress_delete");
     402              : 
     403            0 :         remove_maybe_offloaded_timeline_from_tenant(tenant, timeline, &guard).await?;
     404              : 
     405              :         // This is susceptible to race conditions, i.e. we won't continue deletions if there is a crash
     406              :         // between the deletion of the index-part.json and reaching of this code.
     407              :         // So indeed, the tenant manifest might refer to an offloaded timeline which has already been deleted.
     408              :         // However, we handle this case in tenant loading code so the next time we attach, the issue is
     409              :         // resolved.
     410            0 :         tenant.store_tenant_manifest().await.map_err(|e| match e {
     411            0 :             TenantManifestError::Cancelled => DeleteTimelineError::Cancelled,
     412            0 :             _ => DeleteTimelineError::Other(e.into()),
     413            0 :         })?;
     414              : 
     415            0 :         *guard = Self::Finished;
     416            0 : 
     417            0 :         Ok(())
     418            0 :     }
     419              : 
     420            0 :     pub(crate) fn is_not_started(&self) -> bool {
     421            0 :         matches!(self, Self::NotStarted)
     422            0 :     }
     423              : }
     424              : 
     425              : #[derive(Copy, Clone, PartialEq, Eq)]
     426              : pub(super) enum TimelineDeleteGuardKind {
     427              :     Offload,
     428              :     Delete,
     429              : }
     430              : 
     431            4 : pub(super) fn make_timeline_delete_guard(
     432            4 :     tenant: &Tenant,
     433            4 :     timeline_id: TimelineId,
     434            4 :     guard_kind: TimelineDeleteGuardKind,
     435            4 : ) -> Result<(TimelineOrOffloaded, DeletionGuard), DeleteTimelineError> {
     436            4 :     // Note the interaction between this guard and deletion guard.
     437            4 :     // Here we attempt to lock deletion guard when we're holding a lock on timelines.
     438            4 :     // This is important because when you take into account `remove_timeline_from_tenant`
     439            4 :     // we remove timeline from memory when we still hold the deletion guard.
     440            4 :     // So here when timeline deletion is finished timeline wont be present in timelines map at all
     441            4 :     // which makes the following sequence impossible:
     442            4 :     // T1: get preempted right before the try_lock on `Timeline::delete_progress`
     443            4 :     // T2: do a full deletion, acquire and drop `Timeline::delete_progress`
     444            4 :     // T1: acquire deletion lock, do another `DeleteTimelineFlow::run`
     445            4 :     // For more context see this discussion: `https://github.com/neondatabase/neon/pull/4552#discussion_r1253437346`
     446            4 :     let timelines = tenant.timelines.lock().unwrap();
     447            4 :     let timelines_offloaded = tenant.timelines_offloaded.lock().unwrap();
     448              : 
     449            4 :     let timeline = match timelines.get(&timeline_id) {
     450            4 :         Some(t) => TimelineOrOffloaded::Timeline(Arc::clone(t)),
     451            0 :         None => match timelines_offloaded.get(&timeline_id) {
     452            0 :             Some(t) => TimelineOrOffloaded::Offloaded(Arc::clone(t)),
     453            0 :             None => return Err(DeleteTimelineError::NotFound),
     454              :         },
     455              :     };
     456              : 
     457              :     // Ensure that there are no child timelines, because we are about to remove files,
     458              :     // which will break child branches
     459            4 :     let mut children = Vec::new();
     460            4 :     if guard_kind == TimelineDeleteGuardKind::Delete {
     461            0 :         children.extend(timelines_offloaded.iter().filter_map(|(id, entry)| {
     462            0 :             (entry.ancestor_timeline_id == Some(timeline_id)).then_some(*id)
     463            0 :         }));
     464            4 :     }
     465            8 :     children.extend(timelines.iter().filter_map(|(id, entry)| {
     466            8 :         (entry.get_ancestor_timeline_id() == Some(timeline_id)).then_some(*id)
     467            8 :     }));
     468            4 : 
     469            4 :     if !children.is_empty() {
     470            0 :         return Err(DeleteTimelineError::HasChildren(children));
     471            4 :     }
     472            4 : 
     473            4 :     // Note that using try_lock here is important to avoid a deadlock.
     474            4 :     // Here we take lock on timelines and then the deletion guard.
     475            4 :     // At the end of the operation we're holding the guard and need to lock timelines map
     476            4 :     // to remove the timeline from it.
     477            4 :     // Always if you have two locks that are taken in different order this can result in a deadlock.
     478            4 : 
     479            4 :     let delete_progress = Arc::clone(timeline.delete_progress());
     480            4 :     let delete_lock_guard = match delete_progress.try_lock_owned() {
     481            4 :         Ok(guard) => DeletionGuard(guard),
     482              :         Err(_) => {
     483              :             // Unfortunately if lock fails arc is consumed.
     484            0 :             return Err(DeleteTimelineError::AlreadyInProgress(Arc::clone(
     485            0 :                 timeline.delete_progress(),
     486            0 :             )));
     487              :         }
     488              :     };
     489              : 
     490            4 :     if guard_kind == TimelineDeleteGuardKind::Delete {
     491            0 :         if let TimelineOrOffloaded::Timeline(timeline) = &timeline {
     492            0 :             timeline.set_state(TimelineState::Stopping);
     493            0 :         }
     494            4 :     }
     495              : 
     496            4 :     Ok((timeline, delete_lock_guard))
     497            4 : }
     498              : 
     499              : pub(super) struct DeletionGuard(OwnedMutexGuard<DeleteTimelineFlow>);
     500              : 
     501              : impl Deref for DeletionGuard {
     502              :     type Target = DeleteTimelineFlow;
     503              : 
     504            0 :     fn deref(&self) -> &Self::Target {
     505            0 :         &self.0
     506            0 :     }
     507              : }
     508              : 
     509              : impl DerefMut for DeletionGuard {
     510            0 :     fn deref_mut(&mut self) -> &mut Self::Target {
     511            0 :         &mut self.0
     512            0 :     }
     513              : }
        

Generated by: LCOV version 2.1-beta