LCOV - code coverage report
Current view: top level - pageserver/src/tenant - remote_timeline_client.rs (source / functions) Coverage Total Hit
Test: 691a4c28fe7169edd60b367c52d448a0a6605f1f.info Lines: 73.4 % 1607 1179
Test Date: 2024-05-10 13:18:37 Functions: 60.1 % 143 86

            Line data    Source code
       1              : //! This module manages synchronizing local FS with remote storage.
       2              : //!
       3              : //! # Overview
       4              : //!
       5              : //! * [`RemoteTimelineClient`] provides functions related to upload/download of a particular timeline.
       6              : //!   It contains a queue of pending uploads, and manages the queue, performing uploads in parallel
       7              : //!   when it's safe to do so.
       8              : //!
       9              : //! * Stand-alone function, [`list_remote_timelines`], to get list of timelines of a tenant.
      10              : //!
      11              : //! These functions use the low-level remote storage client, [`remote_storage::RemoteStorage`].
      12              : //!
      13              : //! # APIs & How To Use Them
      14              : //!
      15              : //! There is a [RemoteTimelineClient] for each [Timeline][`crate::tenant::Timeline`] in the system,
      16              : //! unless the pageserver is configured without remote storage.
      17              : //!
      18              : //! We allocate the client instance in [Timeline][`crate::tenant::Timeline`], i.e.,
      19              : //! either in [`crate::tenant::mgr`] during startup or when creating a new
      20              : //! timeline.
      21              : //! However, the client does not become ready for use until we've initialized its upload queue:
      22              : //!
      23              : //! - For timelines that already have some state on the remote storage, we use
      24              : //!   [`RemoteTimelineClient::init_upload_queue`] .
      25              : //! - For newly created timelines, we use
      26              : //!   [`RemoteTimelineClient::init_upload_queue_for_empty_remote`].
      27              : //!
      28              : //! The former takes the remote's [`IndexPart`] as an argument, possibly retrieved
      29              : //! using [`list_remote_timelines`]. We'll elaborate on [`IndexPart`] in the next section.
      30              : //!
      31              : //! Whenever we've created/updated/deleted a file in a timeline directory, we schedule
      32              : //! the corresponding remote operation with the timeline's [`RemoteTimelineClient`]:
      33              : //!
      34              : //! - [`RemoteTimelineClient::schedule_layer_file_upload`]  when we've created a new layer file.
      35              : //! - [`RemoteTimelineClient::schedule_index_upload_for_metadata_update`] when we've updated the timeline metadata file.
      36              : //! - [`RemoteTimelineClient::schedule_index_upload_for_file_changes`] to upload an updated index file, after we've scheduled file uploads
      37              : //! - [`RemoteTimelineClient::schedule_layer_file_deletion`] when we've deleted one or more layer files.
      38              : //!
      39              : //! Internally, these functions create [`UploadOp`]s and put them in a queue.
      40              : //!
      41              : //! There are also APIs for downloading files.
      42              : //! These are not part of the aforementioned queuing and will not be discussed
      43              : //! further here, except in the section covering tenant attach.
      44              : //!
      45              : //! # Remote Storage Structure & [`IndexPart`] Index File
      46              : //!
      47              : //! The "directory structure" in the remote storage mirrors the local directory structure, with paths
      48              : //! like `tenants/<tenant_id>/timelines/<timeline_id>/<layer filename>`.
      49              : //! Yet instead of keeping the `metadata` file remotely, we wrap it with more
      50              : //! data in an "index file" aka [`IndexPart`], containing the list of **all** remote
      51              : //! files for a given timeline.
      52              : //! If a file is not referenced from [`IndexPart`], it's not part of the remote storage state.
      53              : //!
      54              : //! Having the `IndexPart` also avoids expensive and slow `S3 list` commands.
      55              : //!
      56              : //! # Consistency
      57              : //!
      58              : //! To have a consistent remote structure, it's important that uploads and
      59              : //! deletions are performed in the right order. For example, the index file
      60              : //! contains a list of layer files, so it must not be uploaded until all the
      61              : //! layer files that are in its list have been successfully uploaded.
      62              : //!
      63              : //! The contract between client and its user is that the user is responsible of
      64              : //! scheduling operations in an order that keeps the remote consistent as
      65              : //! described above.
      66              : //! From the user's perspective, the operations are executed sequentially.
      67              : //! Internally, the client knows which operations can be performed in parallel,
      68              : //! and which operations act like a "barrier" that require preceding operations
      69              : //! to finish. The calling code just needs to call the schedule-functions in the
      70              : //! correct order, and the client will parallelize the operations in a way that
      71              : //! is safe.
      72              : //!
      73              : //! The caller should be careful with deletion, though. They should not delete
      74              : //! local files that have been scheduled for upload but not yet finished uploading.
      75              : //! Otherwise the upload will fail. To wait for an upload to finish, use
      76              : //! the 'wait_completion' function (more on that later.)
      77              : //!
      78              : //! All of this relies on the following invariants:
      79              : //!
      80              : //! - We rely on read-after write consistency in the remote storage.
      81              : //! - Layer files are immutable
      82              : //!
      83              : //! NB: Pageserver assumes that it has exclusive write access to the tenant in remote
      84              : //! storage. Different tenants can be attached to different pageservers, but if the
      85              : //! same tenant is attached to two pageservers at the same time, they will overwrite
      86              : //! each other's index file updates, and confusion will ensue. There's no interlock or
      87              : //! mechanism to detect that in the pageserver, we rely on the control plane to ensure
      88              : //! that that doesn't happen.
      89              : //!
      90              : //! ## Implementation Note
      91              : //!
      92              : //! The *actual* remote state lags behind the *desired* remote state while
      93              : //! there are in-flight operations.
      94              : //! We keep track of the desired remote state in
      95              : //! [`UploadQueueInitialized::latest_files`] and [`UploadQueueInitialized::latest_metadata`].
      96              : //! It is initialized based on the [`IndexPart`] that was passed during init
      97              : //! and updated with every `schedule_*` function call.
      98              : //! All this is necessary necessary to compute the future [`IndexPart`]s
      99              : //! when scheduling an operation while other operations that also affect the
     100              : //! remote [`IndexPart`] are in flight.
     101              : //!
     102              : //! # Retries & Error Handling
     103              : //!
     104              : //! The client retries operations indefinitely, using exponential back-off.
     105              : //! There is no way to force a retry, i.e., interrupt the back-off.
     106              : //! This could be built easily.
     107              : //!
     108              : //! # Cancellation
     109              : //!
     110              : //! The operations execute as plain [`task_mgr`] tasks, scoped to
     111              : //! the client's tenant and timeline.
     112              : //! Dropping the client will drop queued operations but not executing operations.
     113              : //! These will complete unless the `task_mgr` tasks are cancelled using `task_mgr`
     114              : //! APIs, e.g., during pageserver shutdown, timeline delete, or tenant detach.
     115              : //!
     116              : //! # Completion
     117              : //!
     118              : //! Once an operation has completed, we update
     119              : //! [`UploadQueueInitialized::projected_remote_consistent_lsn`] immediately,
     120              : //! and submit a request through the DeletionQueue to update
     121              : //! [`UploadQueueInitialized::visible_remote_consistent_lsn`] after it has
     122              : //! validated that our generation is not stale.  It is this visible value
     123              : //! that is advertized to safekeepers as a signal that that they can
     124              : //! delete the WAL up to that LSN.
     125              : //!
     126              : //! The [`RemoteTimelineClient::wait_completion`] method can be used to wait
     127              : //! for all pending operations to complete. It does not prevent more
     128              : //! operations from getting scheduled.
     129              : //!
     130              : //! # Crash Consistency
     131              : //!
     132              : //! We do not persist the upload queue state.
     133              : //! If we drop the client, or crash, all unfinished operations are lost.
     134              : //!
     135              : //! To recover, the following steps need to be taken:
     136              : //! - Retrieve the current remote [`IndexPart`]. This gives us a
     137              : //!   consistent remote state, assuming the user scheduled the operations in
     138              : //!   the correct order.
     139              : //! - Initiate upload queue with that [`IndexPart`].
     140              : //! - Reschedule all lost operations by comparing the local filesystem state
     141              : //!   and remote state as per [`IndexPart`]. This is done in
     142              : //!   [`Tenant::timeline_init_and_sync`].
     143              : //!
     144              : //! Note that if we crash during file deletion between the index update
     145              : //! that removes the file from the list of files, and deleting the remote file,
     146              : //! the file is leaked in the remote storage. Similarly, if a new file is created
     147              : //! and uploaded, but the pageserver dies permanently before updating the
     148              : //! remote index file, the new file is leaked in remote storage. We accept and
     149              : //! tolerate that for now.
     150              : //! Note further that we cannot easily fix this by scheduling deletes for every
     151              : //! file that is present only on the remote, because we cannot distinguish the
     152              : //! following two cases:
     153              : //! - (1) We had the file locally, deleted it locally, scheduled a remote delete,
     154              : //!   but crashed before it finished remotely.
     155              : //! - (2) We never had the file locally because we haven't on-demand downloaded
     156              : //!   it yet.
     157              : //!
     158              : //! # Downloads
     159              : //!
     160              : //! In addition to the upload queue, [`RemoteTimelineClient`] has functions for
     161              : //! downloading files from the remote storage. Downloads are performed immediately
     162              : //! against the `RemoteStorage`, independently of the upload queue.
     163              : //!
     164              : //! When we attach a tenant, we perform the following steps:
     165              : //! - create `Tenant` object in `TenantState::Attaching` state
     166              : //! - List timelines that are present in remote storage, and for each:
     167              : //!   - download their remote [`IndexPart`]s
     168              : //!   - create `Timeline` struct and a `RemoteTimelineClient`
     169              : //!   - initialize the client's upload queue with its `IndexPart`
     170              : //!   - schedule uploads for layers that are only present locally.
     171              : //! - After the above is done for each timeline, open the tenant for business by
     172              : //!   transitioning it from `TenantState::Attaching` to `TenantState::Active` state.
     173              : //!   This starts the timelines' WAL-receivers and the tenant's GC & Compaction loops.
     174              : //!
     175              : //! # Operating Without Remote Storage
     176              : //!
     177              : //! If no remote storage configuration is provided, the [`RemoteTimelineClient`] is
     178              : //! not created and the uploads are skipped.
     179              : //!
     180              : //! [`Tenant::timeline_init_and_sync`]: super::Tenant::timeline_init_and_sync
     181              : //! [`Timeline::load_layer_map`]: super::Timeline::load_layer_map
     182              : 
     183              : pub(crate) mod download;
     184              : pub mod index;
     185              : pub(crate) mod upload;
     186              : 
     187              : use anyhow::Context;
     188              : use camino::Utf8Path;
     189              : use chrono::{NaiveDateTime, Utc};
     190              : 
     191              : pub(crate) use download::download_initdb_tar_zst;
     192              : use pageserver_api::shard::{ShardIndex, TenantShardId};
     193              : use scopeguard::ScopeGuard;
     194              : use tokio_util::sync::CancellationToken;
     195              : pub(crate) use upload::upload_initdb_dir;
     196              : use utils::backoff::{
     197              :     self, exponential_backoff, DEFAULT_BASE_BACKOFF_SECONDS, DEFAULT_MAX_BACKOFF_SECONDS,
     198              : };
     199              : 
     200              : use std::collections::{HashMap, VecDeque};
     201              : use std::sync::atomic::{AtomicU32, Ordering};
     202              : use std::sync::{Arc, Mutex};
     203              : use std::time::Duration;
     204              : 
     205              : use remote_storage::{
     206              :     DownloadError, GenericRemoteStorage, ListingMode, RemotePath, TimeoutOrCancel,
     207              : };
     208              : use std::ops::DerefMut;
     209              : use tracing::{debug, error, info, instrument, warn};
     210              : use tracing::{info_span, Instrument};
     211              : use utils::lsn::Lsn;
     212              : 
     213              : use crate::context::RequestContext;
     214              : use crate::deletion_queue::{DeletionQueueClient, DeletionQueueError};
     215              : use crate::metrics::{
     216              :     MeasureRemoteOp, RemoteOpFileKind, RemoteOpKind, RemoteTimelineClientMetrics,
     217              :     RemoteTimelineClientMetricsCallTrackSize, REMOTE_ONDEMAND_DOWNLOADED_BYTES,
     218              :     REMOTE_ONDEMAND_DOWNLOADED_LAYERS,
     219              : };
     220              : use crate::task_mgr::shutdown_token;
     221              : use crate::tenant::debug_assert_current_span_has_tenant_and_timeline_id;
     222              : use crate::tenant::remote_timeline_client::download::download_retry;
     223              : use crate::tenant::storage_layer::AsLayerDesc;
     224              : use crate::tenant::upload_queue::{Delete, UploadQueueStoppedDeletable};
     225              : use crate::tenant::TIMELINES_SEGMENT_NAME;
     226              : use crate::{
     227              :     config::PageServerConf,
     228              :     task_mgr,
     229              :     task_mgr::TaskKind,
     230              :     task_mgr::BACKGROUND_RUNTIME,
     231              :     tenant::metadata::TimelineMetadata,
     232              :     tenant::upload_queue::{
     233              :         UploadOp, UploadQueue, UploadQueueInitialized, UploadQueueStopped, UploadTask,
     234              :     },
     235              :     TENANT_HEATMAP_BASENAME,
     236              : };
     237              : 
     238              : use utils::id::{TenantId, TimelineId};
     239              : 
     240              : use self::index::IndexPart;
     241              : 
     242              : use super::metadata::MetadataUpdate;
     243              : use super::storage_layer::{Layer, LayerName, ResidentLayer};
     244              : use super::upload_queue::SetDeletedFlagProgress;
     245              : use super::Generation;
     246              : 
     247              : pub(crate) use download::{
     248              :     download_index_part, is_temp_download_file, list_remote_tenant_shards, list_remote_timelines,
     249              : };
     250              : pub(crate) use index::LayerFileMetadata;
     251              : 
     252              : // Occasional network issues and such can cause remote operations to fail, and
     253              : // that's expected. If a download fails, we log it at info-level, and retry.
     254              : // But after FAILED_DOWNLOAD_WARN_THRESHOLD retries, we start to log it at WARN
     255              : // level instead, as repeated failures can mean a more serious problem. If it
     256              : // fails more than FAILED_DOWNLOAD_RETRIES times, we give up
     257              : pub(crate) const FAILED_DOWNLOAD_WARN_THRESHOLD: u32 = 3;
     258              : pub(crate) const FAILED_REMOTE_OP_RETRIES: u32 = 10;
     259              : 
     260              : // Similarly log failed uploads and deletions at WARN level, after this many
     261              : // retries. Uploads and deletions are retried forever, though.
     262              : pub(crate) const FAILED_UPLOAD_WARN_THRESHOLD: u32 = 3;
     263              : 
     264              : pub(crate) const INITDB_PATH: &str = "initdb.tar.zst";
     265              : 
     266              : pub(crate) const INITDB_PRESERVED_PATH: &str = "initdb-preserved.tar.zst";
     267              : 
     268              : /// Default buffer size when interfacing with [`tokio::fs::File`].
     269              : pub(crate) const BUFFER_SIZE: usize = 32 * 1024;
     270              : 
     271              : /// Doing non-essential flushes of deletion queue is subject to this timeout, after
     272              : /// which we warn and skip.
     273              : const DELETION_QUEUE_FLUSH_TIMEOUT: Duration = Duration::from_secs(10);
     274              : 
     275              : pub enum MaybeDeletedIndexPart {
     276              :     IndexPart(IndexPart),
     277              :     Deleted(IndexPart),
     278              : }
     279              : 
     280            0 : #[derive(Debug, thiserror::Error)]
     281              : pub enum PersistIndexPartWithDeletedFlagError {
     282              :     #[error("another task is already setting the deleted_flag, started at {0:?}")]
     283              :     AlreadyInProgress(NaiveDateTime),
     284              :     #[error("the deleted_flag was already set, value is {0:?}")]
     285              :     AlreadyDeleted(NaiveDateTime),
     286              :     #[error(transparent)]
     287              :     Other(#[from] anyhow::Error),
     288              : }
     289              : 
     290              : /// A client for accessing a timeline's data in remote storage.
     291              : ///
     292              : /// This takes care of managing the number of connections, and balancing them
     293              : /// across tenants. This also handles retries of failed uploads.
     294              : ///
     295              : /// Upload and delete requests are ordered so that before a deletion is
     296              : /// performed, we wait for all preceding uploads to finish. This ensures sure
     297              : /// that if you perform a compaction operation that reshuffles data in layer
     298              : /// files, we don't have a transient state where the old files have already been
     299              : /// deleted, but new files have not yet been uploaded.
     300              : ///
     301              : /// Similarly, this enforces an order between index-file uploads, and layer
     302              : /// uploads.  Before an index-file upload is performed, all preceding layer
     303              : /// uploads must be finished.
     304              : ///
     305              : /// This also maintains a list of remote files, and automatically includes that
     306              : /// in the index part file, whenever timeline metadata is uploaded.
     307              : ///
     308              : /// Downloads are not queued, they are performed immediately.
     309              : pub struct RemoteTimelineClient {
     310              :     conf: &'static PageServerConf,
     311              : 
     312              :     runtime: tokio::runtime::Handle,
     313              : 
     314              :     tenant_shard_id: TenantShardId,
     315              :     timeline_id: TimelineId,
     316              :     generation: Generation,
     317              : 
     318              :     upload_queue: Mutex<UploadQueue>,
     319              : 
     320              :     metrics: Arc<RemoteTimelineClientMetrics>,
     321              : 
     322              :     storage_impl: GenericRemoteStorage,
     323              : 
     324              :     deletion_queue_client: DeletionQueueClient,
     325              : 
     326              :     cancel: CancellationToken,
     327              : }
     328              : 
     329              : impl RemoteTimelineClient {
     330              :     ///
     331              :     /// Create a remote storage client for given timeline
     332              :     ///
     333              :     /// Note: the caller must initialize the upload queue before any uploads can be scheduled,
     334              :     /// by calling init_upload_queue.
     335              :     ///
     336          334 :     pub fn new(
     337          334 :         remote_storage: GenericRemoteStorage,
     338          334 :         deletion_queue_client: DeletionQueueClient,
     339          334 :         conf: &'static PageServerConf,
     340          334 :         tenant_shard_id: TenantShardId,
     341          334 :         timeline_id: TimelineId,
     342          334 :         generation: Generation,
     343          334 :     ) -> RemoteTimelineClient {
     344          334 :         RemoteTimelineClient {
     345          334 :             conf,
     346          334 :             runtime: if cfg!(test) {
     347              :                 // remote_timeline_client.rs tests rely on current-thread runtime
     348          334 :                 tokio::runtime::Handle::current()
     349              :             } else {
     350            0 :                 BACKGROUND_RUNTIME.handle().clone()
     351              :             },
     352          334 :             tenant_shard_id,
     353          334 :             timeline_id,
     354          334 :             generation,
     355          334 :             storage_impl: remote_storage,
     356          334 :             deletion_queue_client,
     357          334 :             upload_queue: Mutex::new(UploadQueue::Uninitialized),
     358          334 :             metrics: Arc::new(RemoteTimelineClientMetrics::new(
     359          334 :                 &tenant_shard_id,
     360          334 :                 &timeline_id,
     361          334 :             )),
     362          334 :             cancel: CancellationToken::new(),
     363          334 :         }
     364          334 :     }
     365              : 
     366              :     /// Initialize the upload queue for a remote storage that already received
     367              :     /// an index file upload, i.e., it's not empty.
     368              :     /// The given `index_part` must be the one on the remote.
     369            6 :     pub fn init_upload_queue(&self, index_part: &IndexPart) -> anyhow::Result<()> {
     370            6 :         let mut upload_queue = self.upload_queue.lock().unwrap();
     371            6 :         upload_queue.initialize_with_current_remote_index_part(index_part)?;
     372            6 :         self.update_remote_physical_size_gauge(Some(index_part));
     373            6 :         info!(
     374            0 :             "initialized upload queue from remote index with {} layer files",
     375            0 :             index_part.layer_metadata.len()
     376              :         );
     377            6 :         Ok(())
     378            6 :     }
     379              : 
     380              :     /// Initialize the upload queue for the case where the remote storage is empty,
     381              :     /// i.e., it doesn't have an `IndexPart`.
     382          328 :     pub fn init_upload_queue_for_empty_remote(
     383          328 :         &self,
     384          328 :         local_metadata: &TimelineMetadata,
     385          328 :     ) -> anyhow::Result<()> {
     386          328 :         let mut upload_queue = self.upload_queue.lock().unwrap();
     387          328 :         upload_queue.initialize_empty_remote(local_metadata)?;
     388          328 :         self.update_remote_physical_size_gauge(None);
     389          328 :         info!("initialized upload queue as empty");
     390          328 :         Ok(())
     391          328 :     }
     392              : 
     393              :     /// Initialize the queue in stopped state. Used in startup path
     394              :     /// to continue deletion operation interrupted by pageserver crash or restart.
     395            0 :     pub fn init_upload_queue_stopped_to_continue_deletion(
     396            0 :         &self,
     397            0 :         index_part: &IndexPart,
     398            0 :     ) -> anyhow::Result<()> {
     399              :         // FIXME: consider newtype for DeletedIndexPart.
     400            0 :         let deleted_at = index_part.deleted_at.ok_or(anyhow::anyhow!(
     401            0 :             "bug: it is responsibility of the caller to provide index part from MaybeDeletedIndexPart::Deleted"
     402            0 :         ))?;
     403              : 
     404            0 :         let mut upload_queue = self.upload_queue.lock().unwrap();
     405            0 :         upload_queue.initialize_with_current_remote_index_part(index_part)?;
     406            0 :         self.update_remote_physical_size_gauge(Some(index_part));
     407            0 :         self.stop_impl(&mut upload_queue);
     408            0 : 
     409            0 :         upload_queue
     410            0 :             .stopped_mut()
     411            0 :             .expect("stopped above")
     412            0 :             .deleted_at = SetDeletedFlagProgress::Successful(deleted_at);
     413            0 : 
     414            0 :         Ok(())
     415            0 :     }
     416              : 
     417            0 :     pub fn remote_consistent_lsn_projected(&self) -> Option<Lsn> {
     418            0 :         match &mut *self.upload_queue.lock().unwrap() {
     419            0 :             UploadQueue::Uninitialized => None,
     420            0 :             UploadQueue::Initialized(q) => q.get_last_remote_consistent_lsn_projected(),
     421            0 :             UploadQueue::Stopped(UploadQueueStopped::Uninitialized) => None,
     422            0 :             UploadQueue::Stopped(UploadQueueStopped::Deletable(q)) => q
     423            0 :                 .upload_queue_for_deletion
     424            0 :                 .get_last_remote_consistent_lsn_projected(),
     425              :         }
     426            0 :     }
     427              : 
     428            0 :     pub fn remote_consistent_lsn_visible(&self) -> Option<Lsn> {
     429            0 :         match &mut *self.upload_queue.lock().unwrap() {
     430            0 :             UploadQueue::Uninitialized => None,
     431            0 :             UploadQueue::Initialized(q) => Some(q.get_last_remote_consistent_lsn_visible()),
     432            0 :             UploadQueue::Stopped(UploadQueueStopped::Uninitialized) => None,
     433            0 :             UploadQueue::Stopped(UploadQueueStopped::Deletable(q)) => Some(
     434            0 :                 q.upload_queue_for_deletion
     435            0 :                     .get_last_remote_consistent_lsn_visible(),
     436            0 :             ),
     437              :         }
     438            0 :     }
     439              : 
     440         1478 :     fn update_remote_physical_size_gauge(&self, current_remote_index_part: Option<&IndexPart>) {
     441         1478 :         let size: u64 = if let Some(current_remote_index_part) = current_remote_index_part {
     442         1150 :             current_remote_index_part
     443         1150 :                 .layer_metadata
     444         1150 :                 .values()
     445         1150 :                 // If we don't have the file size for the layer, don't account for it in the metric.
     446        13167 :                 .map(|ilmd| ilmd.file_size)
     447         1150 :                 .sum()
     448              :         } else {
     449          328 :             0
     450              :         };
     451         1478 :         self.metrics.remote_physical_size_set(size);
     452         1478 :     }
     453              : 
     454            2 :     pub fn get_remote_physical_size(&self) -> u64 {
     455            2 :         self.metrics.remote_physical_size_get()
     456            2 :     }
     457              : 
     458              :     //
     459              :     // Download operations.
     460              :     //
     461              :     // These don't use the per-timeline queue. They do use the global semaphore in
     462              :     // S3Bucket, to limit the total number of concurrent operations, though.
     463              :     //
     464              : 
     465              :     /// Download index file
     466           20 :     pub async fn download_index_file(
     467           20 :         &self,
     468           20 :         cancel: &CancellationToken,
     469           20 :     ) -> Result<MaybeDeletedIndexPart, DownloadError> {
     470           20 :         let _unfinished_gauge_guard = self.metrics.call_begin(
     471           20 :             &RemoteOpFileKind::Index,
     472           20 :             &RemoteOpKind::Download,
     473           20 :             crate::metrics::RemoteTimelineClientMetricsCallTrackSize::DontTrackSize {
     474           20 :                 reason: "no need for a downloads gauge",
     475           20 :             },
     476           20 :         );
     477              : 
     478           20 :         let (index_part, _index_generation) = download::download_index_part(
     479           20 :             &self.storage_impl,
     480           20 :             &self.tenant_shard_id,
     481           20 :             &self.timeline_id,
     482           20 :             self.generation,
     483           20 :             cancel,
     484           20 :         )
     485           20 :         .measure_remote_op(
     486           20 :             RemoteOpFileKind::Index,
     487           20 :             RemoteOpKind::Download,
     488           20 :             Arc::clone(&self.metrics),
     489           20 :         )
     490           90 :         .await?;
     491              : 
     492           20 :         if index_part.deleted_at.is_some() {
     493            0 :             Ok(MaybeDeletedIndexPart::Deleted(index_part))
     494              :         } else {
     495           20 :             Ok(MaybeDeletedIndexPart::IndexPart(index_part))
     496              :         }
     497           20 :     }
     498              : 
     499              :     /// Download a (layer) file from `path`, into local filesystem.
     500              :     ///
     501              :     /// 'layer_metadata' is the metadata from the remote index file.
     502              :     ///
     503              :     /// On success, returns the size of the downloaded file.
     504            6 :     pub async fn download_layer_file(
     505            6 :         &self,
     506            6 :         layer_file_name: &LayerName,
     507            6 :         layer_metadata: &LayerFileMetadata,
     508            6 :         cancel: &CancellationToken,
     509            6 :         ctx: &RequestContext,
     510            6 :     ) -> anyhow::Result<u64> {
     511            6 :         let downloaded_size = {
     512            6 :             let _unfinished_gauge_guard = self.metrics.call_begin(
     513            6 :                 &RemoteOpFileKind::Layer,
     514            6 :                 &RemoteOpKind::Download,
     515            6 :                 crate::metrics::RemoteTimelineClientMetricsCallTrackSize::DontTrackSize {
     516            6 :                     reason: "no need for a downloads gauge",
     517            6 :                 },
     518            6 :             );
     519            6 :             download::download_layer_file(
     520            6 :                 self.conf,
     521            6 :                 &self.storage_impl,
     522            6 :                 self.tenant_shard_id,
     523            6 :                 self.timeline_id,
     524            6 :                 layer_file_name,
     525            6 :                 layer_metadata,
     526            6 :                 cancel,
     527            6 :                 ctx,
     528            6 :             )
     529            6 :             .measure_remote_op(
     530            6 :                 RemoteOpFileKind::Layer,
     531            6 :                 RemoteOpKind::Download,
     532            6 :                 Arc::clone(&self.metrics),
     533            6 :             )
     534           88 :             .await?
     535              :         };
     536              : 
     537            6 :         REMOTE_ONDEMAND_DOWNLOADED_LAYERS.inc();
     538            6 :         REMOTE_ONDEMAND_DOWNLOADED_BYTES.inc_by(downloaded_size);
     539            6 : 
     540            6 :         Ok(downloaded_size)
     541            6 :     }
     542              : 
     543              :     //
     544              :     // Upload operations.
     545              :     //
     546              : 
     547              :     /// Launch an index-file upload operation in the background, with
     548              :     /// fully updated metadata.
     549              :     ///
     550              :     /// This should only be used to upload initial metadata to remote storage.
     551              :     ///
     552              :     /// The upload will be added to the queue immediately, but it
     553              :     /// won't be performed until all previously scheduled layer file
     554              :     /// upload operations have completed successfully.  This is to
     555              :     /// ensure that when the index file claims that layers X, Y and Z
     556              :     /// exist in remote storage, they really do. To wait for the upload
     557              :     /// to complete, use `wait_completion`.
     558              :     ///
     559              :     /// If there were any changes to the list of files, i.e. if any
     560              :     /// layer file uploads were scheduled, since the last index file
     561              :     /// upload, those will be included too.
     562          220 :     pub fn schedule_index_upload_for_full_metadata_update(
     563          220 :         self: &Arc<Self>,
     564          220 :         metadata: &TimelineMetadata,
     565          220 :     ) -> anyhow::Result<()> {
     566          220 :         let mut guard = self.upload_queue.lock().unwrap();
     567          220 :         let upload_queue = guard.initialized_mut()?;
     568              : 
     569              :         // As documented in the struct definition, it's ok for latest_metadata to be
     570              :         // ahead of what's _actually_ on the remote during index upload.
     571          220 :         upload_queue.latest_metadata = metadata.clone();
     572          220 : 
     573          220 :         self.schedule_index_upload(upload_queue);
     574          220 : 
     575          220 :         Ok(())
     576          220 :     }
     577              : 
     578              :     /// Launch an index-file upload operation in the background, with only parts of the metadata
     579              :     /// updated.
     580              :     ///
     581              :     /// This is the regular way of updating metadata on layer flushes or Gc.
     582              :     ///
     583              :     /// Using this lighter update mechanism allows for reparenting and detaching without changes to
     584              :     /// `index_part.json`, while being more clear on what values update regularly.
     585          928 :     pub(crate) fn schedule_index_upload_for_metadata_update(
     586          928 :         self: &Arc<Self>,
     587          928 :         update: &MetadataUpdate,
     588          928 :     ) -> anyhow::Result<()> {
     589          928 :         let mut guard = self.upload_queue.lock().unwrap();
     590          928 :         let upload_queue = guard.initialized_mut()?;
     591              : 
     592          928 :         upload_queue.latest_metadata.apply(update);
     593          928 : 
     594          928 :         self.schedule_index_upload(upload_queue);
     595          928 : 
     596          928 :         Ok(())
     597          928 :     }
     598              : 
     599              :     ///
     600              :     /// Launch an index-file upload operation in the background, if necessary.
     601              :     ///
     602              :     /// Use this function to schedule the update of the index file after
     603              :     /// scheduling file uploads or deletions. If no file uploads or deletions
     604              :     /// have been scheduled since the last index file upload, this does
     605              :     /// nothing.
     606              :     ///
     607              :     /// Like schedule_index_upload_for_metadata_update(), this merely adds
     608              :     /// the upload to the upload queue and returns quickly.
     609          336 :     pub fn schedule_index_upload_for_file_changes(self: &Arc<Self>) -> anyhow::Result<()> {
     610          336 :         let mut guard = self.upload_queue.lock().unwrap();
     611          336 :         let upload_queue = guard.initialized_mut()?;
     612              : 
     613          336 :         if upload_queue.latest_files_changes_since_metadata_upload_scheduled > 0 {
     614            0 :             self.schedule_index_upload(upload_queue);
     615          336 :         }
     616              : 
     617          336 :         Ok(())
     618          336 :     }
     619              : 
     620              :     /// Launch an index-file upload operation in the background (internal function)
     621         1176 :     fn schedule_index_upload(self: &Arc<Self>, upload_queue: &mut UploadQueueInitialized) {
     622         1176 :         let disk_consistent_lsn = upload_queue.latest_metadata.disk_consistent_lsn();
     623         1176 : 
     624         1176 :         info!(
     625            0 :             "scheduling metadata upload up to consistent LSN {disk_consistent_lsn} with {} files ({} changed)",
     626            0 :             upload_queue.latest_files.len(),
     627              :             upload_queue.latest_files_changes_since_metadata_upload_scheduled,
     628              :         );
     629              : 
     630         1176 :         let index_part = IndexPart::from(&*upload_queue);
     631         1176 :         let op = UploadOp::UploadMetadata(index_part, disk_consistent_lsn);
     632         1176 :         self.metric_begin(&op);
     633         1176 :         upload_queue.queued_operations.push_back(op);
     634         1176 :         upload_queue.latest_files_changes_since_metadata_upload_scheduled = 0;
     635         1176 : 
     636         1176 :         // Launch the task immediately, if possible
     637         1176 :         self.launch_queued_tasks(upload_queue);
     638         1176 :     }
     639              : 
     640            0 :     pub(crate) async fn schedule_reparenting_and_wait(
     641            0 :         self: &Arc<Self>,
     642            0 :         new_parent: &TimelineId,
     643            0 :     ) -> anyhow::Result<()> {
     644              :         // FIXME: because of how Timeline::schedule_uploads works when called from layer flushing
     645              :         // and reads the in-memory part we cannot do the detaching like this
     646            0 :         let receiver = {
     647            0 :             let mut guard = self.upload_queue.lock().unwrap();
     648            0 :             let upload_queue = guard.initialized_mut()?;
     649              : 
     650            0 :             upload_queue.latest_metadata.reparent(new_parent);
     651            0 : 
     652            0 :             self.schedule_index_upload(upload_queue);
     653            0 : 
     654            0 :             self.schedule_barrier0(upload_queue)
     655            0 :         };
     656            0 : 
     657            0 :         Self::wait_completion0(receiver).await
     658            0 :     }
     659              : 
     660              :     /// Schedules uploading a new version of `index_part.json` with the given layers added,
     661              :     /// detaching from ancestor and waits for it to complete.
     662              :     ///
     663              :     /// This is used with `Timeline::detach_ancestor` functionality.
     664            0 :     pub(crate) async fn schedule_adding_existing_layers_to_index_detach_and_wait(
     665            0 :         self: &Arc<Self>,
     666            0 :         layers: &[Layer],
     667            0 :         adopted: (TimelineId, Lsn),
     668            0 :     ) -> anyhow::Result<()> {
     669            0 :         let barrier = {
     670            0 :             let mut guard = self.upload_queue.lock().unwrap();
     671            0 :             let upload_queue = guard.initialized_mut()?;
     672              : 
     673            0 :             upload_queue
     674            0 :                 .latest_metadata
     675            0 :                 .detach_from_ancestor(&adopted.0, &adopted.1);
     676              : 
     677            0 :             for layer in layers {
     678            0 :                 upload_queue
     679            0 :                     .latest_files
     680            0 :                     .insert(layer.layer_desc().layer_name(), layer.metadata());
     681            0 :             }
     682              : 
     683            0 :             self.schedule_index_upload(upload_queue);
     684            0 : 
     685            0 :             let barrier = self.schedule_barrier0(upload_queue);
     686            0 :             self.launch_queued_tasks(upload_queue);
     687            0 :             barrier
     688            0 :         };
     689            0 : 
     690            0 :         Self::wait_completion0(barrier).await
     691            0 :     }
     692              : 
     693              :     /// Launch an upload operation in the background; the file is added to be included in next
     694              :     /// `index_part.json` upload.
     695          950 :     pub(crate) fn schedule_layer_file_upload(
     696          950 :         self: &Arc<Self>,
     697          950 :         layer: ResidentLayer,
     698          950 :     ) -> anyhow::Result<()> {
     699          950 :         let mut guard = self.upload_queue.lock().unwrap();
     700          950 :         let upload_queue = guard.initialized_mut()?;
     701              : 
     702          950 :         self.schedule_layer_file_upload0(upload_queue, layer);
     703          950 :         self.launch_queued_tasks(upload_queue);
     704          950 :         Ok(())
     705          950 :     }
     706              : 
     707         1174 :     fn schedule_layer_file_upload0(
     708         1174 :         self: &Arc<Self>,
     709         1174 :         upload_queue: &mut UploadQueueInitialized,
     710         1174 :         layer: ResidentLayer,
     711         1174 :     ) {
     712         1174 :         let metadata = layer.metadata();
     713         1174 : 
     714         1174 :         upload_queue
     715         1174 :             .latest_files
     716         1174 :             .insert(layer.layer_desc().layer_name(), metadata.clone());
     717         1174 :         upload_queue.latest_files_changes_since_metadata_upload_scheduled += 1;
     718         1174 : 
     719         1174 :         info!(
     720              :             gen=?metadata.generation,
     721              :             shard=?metadata.shard,
     722            0 :             "scheduled layer file upload {layer}",
     723              :         );
     724              : 
     725         1174 :         let op = UploadOp::UploadLayer(layer, metadata);
     726         1174 :         self.metric_begin(&op);
     727         1174 :         upload_queue.queued_operations.push_back(op);
     728         1174 :     }
     729              : 
     730              :     /// Launch a delete operation in the background.
     731              :     ///
     732              :     /// The operation does not modify local filesystem state.
     733              :     ///
     734              :     /// Note: This schedules an index file upload before the deletions.  The
     735              :     /// deletion won't actually be performed, until all previously scheduled
     736              :     /// upload operations, and the index file upload, have completed
     737              :     /// successfully.
     738            8 :     pub fn schedule_layer_file_deletion(
     739            8 :         self: &Arc<Self>,
     740            8 :         names: &[LayerName],
     741            8 :     ) -> anyhow::Result<()> {
     742            8 :         let mut guard = self.upload_queue.lock().unwrap();
     743            8 :         let upload_queue = guard.initialized_mut()?;
     744              : 
     745            8 :         let with_metadata =
     746            8 :             self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names.iter().cloned());
     747            8 : 
     748            8 :         self.schedule_deletion_of_unlinked0(upload_queue, with_metadata);
     749            8 : 
     750            8 :         // Launch the tasks immediately, if possible
     751            8 :         self.launch_queued_tasks(upload_queue);
     752            8 :         Ok(())
     753            8 :     }
     754              : 
     755              :     /// Unlinks the layer files from `index_part.json` but does not yet schedule deletion for the
     756              :     /// layer files, leaving them dangling.
     757              :     ///
     758              :     /// The files will be leaked in remote storage unless [`Self::schedule_deletion_of_unlinked`]
     759              :     /// is invoked on them.
     760            2 :     pub(crate) fn schedule_gc_update(self: &Arc<Self>, gc_layers: &[Layer]) -> anyhow::Result<()> {
     761            2 :         let mut guard = self.upload_queue.lock().unwrap();
     762            2 :         let upload_queue = guard.initialized_mut()?;
     763              : 
     764              :         // just forget the return value; after uploading the next index_part.json, we can consider
     765              :         // the layer files as "dangling". this is fine, at worst case we create work for the
     766              :         // scrubber.
     767              : 
     768            2 :         let names = gc_layers.iter().map(|x| x.layer_desc().layer_name());
     769            2 : 
     770            2 :         self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names);
     771            2 : 
     772            2 :         self.launch_queued_tasks(upload_queue);
     773            2 : 
     774            2 :         Ok(())
     775            2 :     }
     776              : 
     777              :     /// Update the remote index file, removing the to-be-deleted files from the index,
     778              :     /// allowing scheduling of actual deletions later.
     779           34 :     fn schedule_unlinking_of_layers_from_index_part0<I>(
     780           34 :         self: &Arc<Self>,
     781           34 :         upload_queue: &mut UploadQueueInitialized,
     782           34 :         names: I,
     783           34 :     ) -> Vec<(LayerName, LayerFileMetadata)>
     784           34 :     where
     785           34 :         I: IntoIterator<Item = LayerName>,
     786           34 :     {
     787           34 :         // Decorate our list of names with each name's metadata, dropping
     788           34 :         // names that are unexpectedly missing from our metadata.  This metadata
     789           34 :         // is later used when physically deleting layers, to construct key paths.
     790           34 :         let with_metadata: Vec<_> = names
     791           34 :             .into_iter()
     792          266 :             .filter_map(|name| {
     793          266 :                 let meta = upload_queue.latest_files.remove(&name);
     794              : 
     795          266 :                 if let Some(meta) = meta {
     796          266 :                     upload_queue.latest_files_changes_since_metadata_upload_scheduled += 1;
     797          266 :                     Some((name, meta))
     798              :                 } else {
     799              :                     // This can only happen if we forgot to to schedule the file upload
     800              :                     // before scheduling the delete. Log it because it is a rare/strange
     801              :                     // situation, and in case something is misbehaving, we'd like to know which
     802              :                     // layers experienced this.
     803            0 :                     info!("Deleting layer {name} not found in latest_files list, never uploaded?");
     804            0 :                     None
     805              :                 }
     806          266 :             })
     807           34 :             .collect();
     808              : 
     809              :         #[cfg(feature = "testing")]
     810          300 :         for (name, metadata) in &with_metadata {
     811          266 :             let gen = metadata.generation;
     812          266 :             if let Some(unexpected) = upload_queue.dangling_files.insert(name.to_owned(), gen) {
     813            0 :                 if unexpected == gen {
     814            0 :                     tracing::error!("{name} was unlinked twice with same generation");
     815              :                 } else {
     816            0 :                     tracing::error!("{name} was unlinked twice with different generations {gen:?} and {unexpected:?}");
     817              :                 }
     818          266 :             }
     819              :         }
     820              : 
     821              :         // after unlinking files from the upload_queue.latest_files we must always schedule an
     822              :         // index_part update, because that needs to be uploaded before we can actually delete the
     823              :         // files.
     824           34 :         if upload_queue.latest_files_changes_since_metadata_upload_scheduled > 0 {
     825           28 :             self.schedule_index_upload(upload_queue);
     826           28 :         }
     827              : 
     828           34 :         with_metadata
     829           34 :     }
     830              : 
     831              :     /// Schedules deletion for layer files which have previously been unlinked from the
     832              :     /// `index_part.json` with [`Self::schedule_gc_update`] or [`Self::schedule_compaction_update`].
     833          270 :     pub(crate) fn schedule_deletion_of_unlinked(
     834          270 :         self: &Arc<Self>,
     835          270 :         layers: Vec<(LayerName, LayerFileMetadata)>,
     836          270 :     ) -> anyhow::Result<()> {
     837          270 :         let mut guard = self.upload_queue.lock().unwrap();
     838          270 :         let upload_queue = guard.initialized_mut()?;
     839              : 
     840          270 :         self.schedule_deletion_of_unlinked0(upload_queue, layers);
     841          270 :         self.launch_queued_tasks(upload_queue);
     842          270 :         Ok(())
     843          270 :     }
     844              : 
     845          278 :     fn schedule_deletion_of_unlinked0(
     846          278 :         self: &Arc<Self>,
     847          278 :         upload_queue: &mut UploadQueueInitialized,
     848          278 :         mut with_metadata: Vec<(LayerName, LayerFileMetadata)>,
     849          278 :     ) {
     850          278 :         // Filter out any layers which were not created by this tenant shard.  These are
     851          278 :         // layers that originate from some ancestor shard after a split, and may still
     852          278 :         // be referenced by other shards. We are free to delete them locally and remove
     853          278 :         // them from our index (and would have already done so when we reach this point
     854          278 :         // in the code), but we may not delete them remotely.
     855          278 :         with_metadata.retain(|(name, meta)| {
     856          272 :             let retain = meta.shard.shard_number == self.tenant_shard_id.shard_number
     857          272 :                 && meta.shard.shard_count == self.tenant_shard_id.shard_count;
     858          272 :             if !retain {
     859            0 :                 tracing::debug!(
     860            0 :                     "Skipping deletion of ancestor-shard layer {name}, from shard {}",
     861              :                     meta.shard
     862              :                 );
     863          272 :             }
     864          272 :             retain
     865          278 :         });
     866              : 
     867          550 :         for (name, meta) in &with_metadata {
     868          272 :             info!(
     869            0 :                 "scheduling deletion of layer {}{} (shard {})",
     870            0 :                 name,
     871            0 :                 meta.generation.get_suffix(),
     872              :                 meta.shard
     873              :             );
     874              :         }
     875              : 
     876              :         #[cfg(feature = "testing")]
     877          550 :         for (name, meta) in &with_metadata {
     878          272 :             let gen = meta.generation;
     879          272 :             match upload_queue.dangling_files.remove(name) {
     880          266 :                 Some(same) if same == gen => { /* expected */ }
     881            0 :                 Some(other) => {
     882            0 :                     tracing::error!("{name} was unlinked with {other:?} but deleted with {gen:?}");
     883              :                 }
     884              :                 None => {
     885            6 :                     tracing::error!("{name} was unlinked but was not dangling");
     886              :                 }
     887              :             }
     888              :         }
     889              : 
     890              :         // schedule the actual deletions
     891          278 :         if with_metadata.is_empty() {
     892              :             // avoid scheduling the op & bumping the metric
     893            6 :             return;
     894          272 :         }
     895          272 :         let op = UploadOp::Delete(Delete {
     896          272 :             layers: with_metadata,
     897          272 :         });
     898          272 :         self.metric_begin(&op);
     899          272 :         upload_queue.queued_operations.push_back(op);
     900          278 :     }
     901              : 
     902              :     /// Schedules a compaction update to the remote `index_part.json`.
     903              :     ///
     904              :     /// `compacted_from` represent the L0 names which have been `compacted_to` L1 layers.
     905           24 :     pub(crate) fn schedule_compaction_update(
     906           24 :         self: &Arc<Self>,
     907           24 :         compacted_from: &[Layer],
     908           24 :         compacted_to: &[ResidentLayer],
     909           24 :     ) -> anyhow::Result<()> {
     910           24 :         let mut guard = self.upload_queue.lock().unwrap();
     911           24 :         let upload_queue = guard.initialized_mut()?;
     912              : 
     913          248 :         for layer in compacted_to {
     914          224 :             self.schedule_layer_file_upload0(upload_queue, layer.clone());
     915          224 :         }
     916              : 
     917          262 :         let names = compacted_from.iter().map(|x| x.layer_desc().layer_name());
     918           24 : 
     919           24 :         self.schedule_unlinking_of_layers_from_index_part0(upload_queue, names);
     920           24 :         self.launch_queued_tasks(upload_queue);
     921           24 : 
     922           24 :         Ok(())
     923           24 :     }
     924              : 
     925              :     /// Wait for all previously scheduled uploads/deletions to complete
     926          110 :     pub(crate) async fn wait_completion(self: &Arc<Self>) -> anyhow::Result<()> {
     927          110 :         let receiver = {
     928          110 :             let mut guard = self.upload_queue.lock().unwrap();
     929          110 :             let upload_queue = guard.initialized_mut()?;
     930          110 :             self.schedule_barrier0(upload_queue)
     931          110 :         };
     932          110 : 
     933          110 :         Self::wait_completion0(receiver).await
     934          110 :     }
     935              : 
     936          110 :     async fn wait_completion0(
     937          110 :         mut receiver: tokio::sync::watch::Receiver<()>,
     938          110 :     ) -> anyhow::Result<()> {
     939          110 :         if receiver.changed().await.is_err() {
     940            0 :             anyhow::bail!("wait_completion aborted because upload queue was stopped");
     941          110 :         }
     942          110 : 
     943          110 :         Ok(())
     944          110 :     }
     945              : 
     946            6 :     pub(crate) fn schedule_barrier(self: &Arc<Self>) -> anyhow::Result<()> {
     947            6 :         let mut guard = self.upload_queue.lock().unwrap();
     948            6 :         let upload_queue = guard.initialized_mut()?;
     949            6 :         self.schedule_barrier0(upload_queue);
     950            6 :         Ok(())
     951            6 :     }
     952              : 
     953          116 :     fn schedule_barrier0(
     954          116 :         self: &Arc<Self>,
     955          116 :         upload_queue: &mut UploadQueueInitialized,
     956          116 :     ) -> tokio::sync::watch::Receiver<()> {
     957          116 :         let (sender, receiver) = tokio::sync::watch::channel(());
     958          116 :         let barrier_op = UploadOp::Barrier(sender);
     959          116 : 
     960          116 :         upload_queue.queued_operations.push_back(barrier_op);
     961          116 :         // Don't count this kind of operation!
     962          116 : 
     963          116 :         // Launch the task immediately, if possible
     964          116 :         self.launch_queued_tasks(upload_queue);
     965          116 : 
     966          116 :         receiver
     967          116 :     }
     968              : 
     969              :     /// Wait for all previously scheduled operations to complete, and then stop.
     970              :     ///
     971              :     /// Not cancellation safe
     972            6 :     pub(crate) async fn shutdown(self: &Arc<Self>) {
     973            6 :         // On cancellation the queue is left in ackward state of refusing new operations but
     974            6 :         // proper stop is yet to be called. On cancel the original or some later task must call
     975            6 :         // `stop` or `shutdown`.
     976            6 :         let sg = scopeguard::guard((), |_| {
     977            0 :             tracing::error!("RemoteTimelineClient::shutdown was cancelled; this should not happen, do not make this into an allowed_error")
     978            6 :         });
     979              : 
     980            6 :         let fut = {
     981            6 :             let mut guard = self.upload_queue.lock().unwrap();
     982            6 :             let upload_queue = match &mut *guard {
     983            0 :                 UploadQueue::Stopped(_) => return,
     984              :                 UploadQueue::Uninitialized => {
     985              :                     // transition into Stopped state
     986            0 :                     self.stop_impl(&mut guard);
     987            0 :                     return;
     988              :                 }
     989            6 :                 UploadQueue::Initialized(ref mut init) => init,
     990            6 :             };
     991            6 : 
     992            6 :             // if the queue is already stuck due to a shutdown operation which was cancelled, then
     993            6 :             // just don't add more of these as they would never complete.
     994            6 :             //
     995            6 :             // TODO: if launch_queued_tasks were to be refactored to accept a &mut UploadQueue
     996            6 :             // in every place we would not have to jump through this hoop, and this method could be
     997            6 :             // made cancellable.
     998            6 :             if !upload_queue.shutting_down {
     999            6 :                 upload_queue.shutting_down = true;
    1000            6 :                 upload_queue.queued_operations.push_back(UploadOp::Shutdown);
    1001            6 :                 // this operation is not counted similar to Barrier
    1002            6 : 
    1003            6 :                 self.launch_queued_tasks(upload_queue);
    1004            6 :             }
    1005              : 
    1006            6 :             upload_queue.shutdown_ready.clone().acquire_owned()
    1007              :         };
    1008              : 
    1009            6 :         let res = fut.await;
    1010              : 
    1011            6 :         scopeguard::ScopeGuard::into_inner(sg);
    1012            6 : 
    1013            6 :         match res {
    1014            0 :             Ok(_permit) => unreachable!("shutdown_ready should not have been added permits"),
    1015            6 :             Err(_closed) => {
    1016            6 :                 // expected
    1017            6 :             }
    1018            6 :         }
    1019            6 : 
    1020            6 :         self.stop();
    1021            6 :     }
    1022              : 
    1023              :     /// Set the deleted_at field in the remote index file.
    1024              :     ///
    1025              :     /// This fails if the upload queue has not been `stop()`ed.
    1026              :     ///
    1027              :     /// The caller is responsible for calling `stop()` AND for waiting
    1028              :     /// for any ongoing upload tasks to finish after `stop()` has succeeded.
    1029              :     /// Check method [`RemoteTimelineClient::stop`] for details.
    1030            0 :     #[instrument(skip_all)]
    1031              :     pub(crate) async fn persist_index_part_with_deleted_flag(
    1032              :         self: &Arc<Self>,
    1033              :     ) -> Result<(), PersistIndexPartWithDeletedFlagError> {
    1034              :         let index_part_with_deleted_at = {
    1035              :             let mut locked = self.upload_queue.lock().unwrap();
    1036              : 
    1037              :             // We must be in stopped state because otherwise
    1038              :             // we can have inprogress index part upload that can overwrite the file
    1039              :             // with missing is_deleted flag that we going to set below
    1040              :             let stopped = locked.stopped_mut()?;
    1041              : 
    1042              :             match stopped.deleted_at {
    1043              :                 SetDeletedFlagProgress::NotRunning => (), // proceed
    1044              :                 SetDeletedFlagProgress::InProgress(at) => {
    1045              :                     return Err(PersistIndexPartWithDeletedFlagError::AlreadyInProgress(at));
    1046              :                 }
    1047              :                 SetDeletedFlagProgress::Successful(at) => {
    1048              :                     return Err(PersistIndexPartWithDeletedFlagError::AlreadyDeleted(at));
    1049              :                 }
    1050              :             };
    1051              :             let deleted_at = Utc::now().naive_utc();
    1052              :             stopped.deleted_at = SetDeletedFlagProgress::InProgress(deleted_at);
    1053              : 
    1054              :             let mut index_part = IndexPart::from(&stopped.upload_queue_for_deletion);
    1055              :             index_part.deleted_at = Some(deleted_at);
    1056              :             index_part
    1057              :         };
    1058              : 
    1059            0 :         let undo_deleted_at = scopeguard::guard(Arc::clone(self), |self_clone| {
    1060            0 :             let mut locked = self_clone.upload_queue.lock().unwrap();
    1061            0 :             let stopped = locked
    1062            0 :                 .stopped_mut()
    1063            0 :                 .expect("there's no way out of Stopping, and we checked it's Stopping above");
    1064            0 :             stopped.deleted_at = SetDeletedFlagProgress::NotRunning;
    1065            0 :         });
    1066              : 
    1067              :         pausable_failpoint!("persist_deleted_index_part");
    1068              : 
    1069              :         backoff::retry(
    1070            0 :             || {
    1071            0 :                 upload::upload_index_part(
    1072            0 :                     &self.storage_impl,
    1073            0 :                     &self.tenant_shard_id,
    1074            0 :                     &self.timeline_id,
    1075            0 :                     self.generation,
    1076            0 :                     &index_part_with_deleted_at,
    1077            0 :                     &self.cancel,
    1078            0 :                 )
    1079            0 :             },
    1080            0 :             |_e| false,
    1081              :             1,
    1082              :             // have just a couple of attempts
    1083              :             // when executed as part of timeline deletion this happens in context of api call
    1084              :             // when executed as part of tenant deletion this happens in the background
    1085              :             2,
    1086              :             "persist_index_part_with_deleted_flag",
    1087              :             &self.cancel,
    1088              :         )
    1089              :         .await
    1090            0 :         .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
    1091            0 :         .and_then(|x| x)?;
    1092              : 
    1093              :         // all good, disarm the guard and mark as success
    1094              :         ScopeGuard::into_inner(undo_deleted_at);
    1095              :         {
    1096              :             let mut locked = self.upload_queue.lock().unwrap();
    1097              : 
    1098              :             let stopped = locked
    1099              :                 .stopped_mut()
    1100              :                 .expect("there's no way out of Stopping, and we checked it's Stopping above");
    1101              :             stopped.deleted_at = SetDeletedFlagProgress::Successful(
    1102              :                 index_part_with_deleted_at
    1103              :                     .deleted_at
    1104              :                     .expect("we set it above"),
    1105              :             );
    1106              :         }
    1107              : 
    1108              :         Ok(())
    1109              :     }
    1110              : 
    1111            0 :     pub(crate) async fn preserve_initdb_archive(
    1112            0 :         self: &Arc<Self>,
    1113            0 :         tenant_id: &TenantId,
    1114            0 :         timeline_id: &TimelineId,
    1115            0 :         cancel: &CancellationToken,
    1116            0 :     ) -> anyhow::Result<()> {
    1117            0 :         backoff::retry(
    1118            0 :             || async {
    1119            0 :                 upload::preserve_initdb_archive(&self.storage_impl, tenant_id, timeline_id, cancel)
    1120            0 :                     .await
    1121            0 :             },
    1122            0 :             TimeoutOrCancel::caused_by_cancel,
    1123            0 :             FAILED_DOWNLOAD_WARN_THRESHOLD,
    1124            0 :             FAILED_REMOTE_OP_RETRIES,
    1125            0 :             "preserve_initdb_tar_zst",
    1126            0 :             &cancel.clone(),
    1127            0 :         )
    1128            0 :         .await
    1129            0 :         .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
    1130            0 :         .and_then(|x| x)
    1131            0 :         .context("backing up initdb archive")?;
    1132            0 :         Ok(())
    1133            0 :     }
    1134              : 
    1135              :     /// Uploads the given layer **without** adding it to be part of a future `index_part.json` upload.
    1136              :     ///
    1137              :     /// This is not normally needed.
    1138            0 :     pub(crate) async fn upload_layer_file(
    1139            0 :         self: &Arc<Self>,
    1140            0 :         uploaded: &ResidentLayer,
    1141            0 :         cancel: &CancellationToken,
    1142            0 :     ) -> anyhow::Result<()> {
    1143            0 :         let remote_path = remote_layer_path(
    1144            0 :             &self.tenant_shard_id.tenant_id,
    1145            0 :             &self.timeline_id,
    1146            0 :             self.tenant_shard_id.to_index(),
    1147            0 :             &uploaded.layer_desc().layer_name(),
    1148            0 :             uploaded.metadata().generation,
    1149            0 :         );
    1150            0 : 
    1151            0 :         backoff::retry(
    1152            0 :             || async {
    1153            0 :                 upload::upload_timeline_layer(
    1154            0 :                     &self.storage_impl,
    1155            0 :                     uploaded.local_path(),
    1156            0 :                     &remote_path,
    1157            0 :                     uploaded.metadata().file_size(),
    1158            0 :                     cancel,
    1159            0 :                 )
    1160            0 :                 .await
    1161            0 :             },
    1162            0 :             TimeoutOrCancel::caused_by_cancel,
    1163            0 :             FAILED_UPLOAD_WARN_THRESHOLD,
    1164            0 :             FAILED_REMOTE_OP_RETRIES,
    1165            0 :             "upload a layer without adding it to latest files",
    1166            0 :             cancel,
    1167            0 :         )
    1168            0 :         .await
    1169            0 :         .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
    1170            0 :         .and_then(|x| x)
    1171            0 :         .context("upload a layer without adding it to latest files")
    1172            0 :     }
    1173              : 
    1174              :     /// Copies the `adopted` remote existing layer to the remote path of `adopted_as`. The layer is
    1175              :     /// not added to be part of a future `index_part.json` upload.
    1176            0 :     pub(crate) async fn copy_timeline_layer(
    1177            0 :         self: &Arc<Self>,
    1178            0 :         adopted: &Layer,
    1179            0 :         adopted_as: &Layer,
    1180            0 :         cancel: &CancellationToken,
    1181            0 :     ) -> anyhow::Result<()> {
    1182            0 :         let source_remote_path = remote_layer_path(
    1183            0 :             &self.tenant_shard_id.tenant_id,
    1184            0 :             &adopted
    1185            0 :                 .get_timeline_id()
    1186            0 :                 .expect("Source timeline should be alive"),
    1187            0 :             self.tenant_shard_id.to_index(),
    1188            0 :             &adopted.layer_desc().layer_name(),
    1189            0 :             adopted.metadata().generation,
    1190            0 :         );
    1191            0 : 
    1192            0 :         let target_remote_path = remote_layer_path(
    1193            0 :             &self.tenant_shard_id.tenant_id,
    1194            0 :             &self.timeline_id,
    1195            0 :             self.tenant_shard_id.to_index(),
    1196            0 :             &adopted_as.layer_desc().layer_name(),
    1197            0 :             adopted_as.metadata().generation,
    1198            0 :         );
    1199            0 : 
    1200            0 :         backoff::retry(
    1201            0 :             || async {
    1202            0 :                 upload::copy_timeline_layer(
    1203            0 :                     &self.storage_impl,
    1204            0 :                     &source_remote_path,
    1205            0 :                     &target_remote_path,
    1206            0 :                     cancel,
    1207            0 :                 )
    1208            0 :                 .await
    1209            0 :             },
    1210            0 :             TimeoutOrCancel::caused_by_cancel,
    1211            0 :             FAILED_UPLOAD_WARN_THRESHOLD,
    1212            0 :             FAILED_REMOTE_OP_RETRIES,
    1213            0 :             "copy timeline layer",
    1214            0 :             cancel,
    1215            0 :         )
    1216            0 :         .await
    1217            0 :         .ok_or_else(|| anyhow::Error::new(TimeoutOrCancel::Cancel))
    1218            0 :         .and_then(|x| x)
    1219            0 :         .context("remote copy timeline layer")
    1220            0 :     }
    1221              : 
    1222            0 :     async fn flush_deletion_queue(&self) -> Result<(), DeletionQueueError> {
    1223            0 :         match tokio::time::timeout(
    1224            0 :             DELETION_QUEUE_FLUSH_TIMEOUT,
    1225            0 :             self.deletion_queue_client.flush_immediate(),
    1226            0 :         )
    1227            0 :         .await
    1228              :         {
    1229            0 :             Ok(result) => result,
    1230            0 :             Err(_timeout) => {
    1231            0 :                 // Flushing remote deletions is not mandatory: we flush here to make the system easier to test, and
    1232            0 :                 // to ensure that _usually_ objects are really gone after a DELETE is acked.  However, in case of deletion
    1233            0 :                 // queue issues (https://github.com/neondatabase/neon/issues/6440), we don't want to wait indefinitely here.
    1234            0 :                 tracing::warn!(
    1235            0 :                     "Timed out waiting for deletion queue flush, acking deletion anyway"
    1236              :                 );
    1237            0 :                 Ok(())
    1238              :             }
    1239              :         }
    1240            0 :     }
    1241              : 
    1242              :     /// Prerequisites: UploadQueue should be in stopped state and deleted_at should be successfuly set.
    1243              :     /// The function deletes layer files one by one, then lists the prefix to see if we leaked something
    1244              :     /// deletes leaked files if any and proceeds with deletion of index file at the end.
    1245            0 :     pub(crate) async fn delete_all(self: &Arc<Self>) -> anyhow::Result<()> {
    1246            0 :         debug_assert_current_span_has_tenant_and_timeline_id();
    1247              : 
    1248            0 :         let layers: Vec<RemotePath> = {
    1249            0 :             let mut locked = self.upload_queue.lock().unwrap();
    1250            0 :             let stopped = locked.stopped_mut()?;
    1251              : 
    1252            0 :             if !matches!(stopped.deleted_at, SetDeletedFlagProgress::Successful(_)) {
    1253            0 :                 anyhow::bail!("deleted_at is not set")
    1254            0 :             }
    1255            0 : 
    1256            0 :             debug_assert!(stopped.upload_queue_for_deletion.no_pending_work());
    1257              : 
    1258            0 :             stopped
    1259            0 :                 .upload_queue_for_deletion
    1260            0 :                 .latest_files
    1261            0 :                 .drain()
    1262            0 :                 .map(|(file_name, meta)| {
    1263            0 :                     remote_layer_path(
    1264            0 :                         &self.tenant_shard_id.tenant_id,
    1265            0 :                         &self.timeline_id,
    1266            0 :                         meta.shard,
    1267            0 :                         &file_name,
    1268            0 :                         meta.generation,
    1269            0 :                     )
    1270            0 :                 })
    1271            0 :                 .collect()
    1272            0 :         };
    1273            0 : 
    1274            0 :         let layer_deletion_count = layers.len();
    1275            0 :         self.deletion_queue_client.push_immediate(layers).await?;
    1276              : 
    1277              :         // Delete the initdb.tar.zst, which is not always present, but deletion attempts of
    1278              :         // inexistant objects are not considered errors.
    1279            0 :         let initdb_path =
    1280            0 :             remote_initdb_archive_path(&self.tenant_shard_id.tenant_id, &self.timeline_id);
    1281            0 :         self.deletion_queue_client
    1282            0 :             .push_immediate(vec![initdb_path])
    1283            0 :             .await?;
    1284              : 
    1285              :         // Do not delete index part yet, it is needed for possible retry. If we remove it first
    1286              :         // and retry will arrive to different pageserver there wont be any traces of it on remote storage
    1287            0 :         let timeline_storage_path = remote_timeline_path(&self.tenant_shard_id, &self.timeline_id);
    1288            0 : 
    1289            0 :         // Execute all pending deletions, so that when we proceed to do a listing below, we aren't
    1290            0 :         // taking the burden of listing all the layers that we already know we should delete.
    1291            0 :         self.flush_deletion_queue().await?;
    1292              : 
    1293            0 :         let cancel = shutdown_token();
    1294              : 
    1295            0 :         let remaining = download_retry(
    1296            0 :             || async {
    1297            0 :                 self.storage_impl
    1298            0 :                     .list(
    1299            0 :                         Some(&timeline_storage_path),
    1300            0 :                         ListingMode::NoDelimiter,
    1301            0 :                         None,
    1302            0 :                         &cancel,
    1303            0 :                     )
    1304            0 :                     .await
    1305            0 :             },
    1306            0 :             "list remaining files",
    1307            0 :             &cancel,
    1308            0 :         )
    1309            0 :         .await
    1310            0 :         .context("list files remaining files")?
    1311              :         .keys;
    1312              : 
    1313              :         // We will delete the current index_part object last, since it acts as a deletion
    1314              :         // marker via its deleted_at attribute
    1315            0 :         let latest_index = remaining
    1316            0 :             .iter()
    1317            0 :             .filter(|p| {
    1318            0 :                 p.object_name()
    1319            0 :                     .map(|n| n.starts_with(IndexPart::FILE_NAME))
    1320            0 :                     .unwrap_or(false)
    1321            0 :             })
    1322            0 :             .filter_map(|path| parse_remote_index_path(path.clone()).map(|gen| (path, gen)))
    1323            0 :             .max_by_key(|i| i.1)
    1324            0 :             .map(|i| i.0.clone())
    1325            0 :             .unwrap_or(
    1326            0 :                 // No generation-suffixed indices, assume we are dealing with
    1327            0 :                 // a legacy index.
    1328            0 :                 remote_index_path(&self.tenant_shard_id, &self.timeline_id, Generation::none()),
    1329            0 :             );
    1330            0 : 
    1331            0 :         let remaining_layers: Vec<RemotePath> = remaining
    1332            0 :             .into_iter()
    1333            0 :             .filter(|p| {
    1334            0 :                 if p == &latest_index {
    1335            0 :                     return false;
    1336            0 :                 }
    1337            0 :                 if p.object_name() == Some(INITDB_PRESERVED_PATH) {
    1338            0 :                     return false;
    1339            0 :                 }
    1340            0 :                 true
    1341            0 :             })
    1342            0 :             .inspect(|path| {
    1343            0 :                 if let Some(name) = path.object_name() {
    1344            0 :                     info!(%name, "deleting a file not referenced from index_part.json");
    1345              :                 } else {
    1346            0 :                     warn!(%path, "deleting a nameless or non-utf8 object not referenced from index_part.json");
    1347              :                 }
    1348            0 :             })
    1349            0 :             .collect();
    1350            0 : 
    1351            0 :         let not_referenced_count = remaining_layers.len();
    1352            0 :         if !remaining_layers.is_empty() {
    1353            0 :             self.deletion_queue_client
    1354            0 :                 .push_immediate(remaining_layers)
    1355            0 :                 .await?;
    1356            0 :         }
    1357              : 
    1358            0 :         fail::fail_point!("timeline-delete-before-index-delete", |_| {
    1359            0 :             Err(anyhow::anyhow!(
    1360            0 :                 "failpoint: timeline-delete-before-index-delete"
    1361            0 :             ))?
    1362            0 :         });
    1363              : 
    1364            0 :         debug!("enqueuing index part deletion");
    1365            0 :         self.deletion_queue_client
    1366            0 :             .push_immediate([latest_index].to_vec())
    1367            0 :             .await?;
    1368              : 
    1369              :         // Timeline deletion is rare and we have probably emitted a reasonably number of objects: wait
    1370              :         // for a flush to a persistent deletion list so that we may be sure deletion will occur.
    1371            0 :         self.flush_deletion_queue().await?;
    1372              : 
    1373            0 :         fail::fail_point!("timeline-delete-after-index-delete", |_| {
    1374            0 :             Err(anyhow::anyhow!(
    1375            0 :                 "failpoint: timeline-delete-after-index-delete"
    1376            0 :             ))?
    1377            0 :         });
    1378              : 
    1379            0 :         info!(prefix=%timeline_storage_path, referenced=layer_deletion_count, not_referenced=%not_referenced_count, "done deleting in timeline prefix, including index_part.json");
    1380              : 
    1381            0 :         Ok(())
    1382            0 :     }
    1383              : 
    1384              :     ///
    1385              :     /// Pick next tasks from the queue, and start as many of them as possible without violating
    1386              :     /// the ordering constraints.
    1387              :     ///
    1388              :     /// The caller needs to already hold the `upload_queue` lock.
    1389         4825 :     fn launch_queued_tasks(self: &Arc<Self>, upload_queue: &mut UploadQueueInitialized) {
    1390         7438 :         while let Some(next_op) = upload_queue.queued_operations.front() {
    1391              :             // Can we run this task now?
    1392         4115 :             let can_run_now = match next_op {
    1393              :                 UploadOp::UploadLayer(..) => {
    1394              :                     // Can always be scheduled.
    1395         1173 :                     true
    1396              :                 }
    1397              :                 UploadOp::UploadMetadata(_, _) => {
    1398              :                     // These can only be performed after all the preceding operations
    1399              :                     // have finished.
    1400         2527 :                     upload_queue.inprogress_tasks.is_empty()
    1401              :                 }
    1402              :                 UploadOp::Delete(_) => {
    1403              :                     // Wait for preceding uploads to finish. Concurrent deletions are OK, though.
    1404          192 :                     upload_queue.num_inprogress_deletions == upload_queue.inprogress_tasks.len()
    1405              :                 }
    1406              : 
    1407              :                 UploadOp::Barrier(_) | UploadOp::Shutdown => {
    1408          223 :                     upload_queue.inprogress_tasks.is_empty()
    1409              :                 }
    1410              :             };
    1411              : 
    1412              :             // If we cannot launch this task, don't look any further.
    1413              :             //
    1414              :             // In some cases, we could let some non-frontmost tasks to "jump the queue" and launch
    1415              :             // them now, but we don't try to do that currently.  For example, if the frontmost task
    1416              :             // is an index-file upload that cannot proceed until preceding uploads have finished, we
    1417              :             // could still start layer uploads that were scheduled later.
    1418         4115 :             if !can_run_now {
    1419         1496 :                 break;
    1420         2619 :             }
    1421         2619 : 
    1422         2619 :             if let UploadOp::Shutdown = next_op {
    1423              :                 // leave the op in the queue but do not start more tasks; it will be dropped when
    1424              :                 // the stop is called.
    1425            6 :                 upload_queue.shutdown_ready.close();
    1426            6 :                 break;
    1427         2613 :             }
    1428         2613 : 
    1429         2613 :             // We can launch this task. Remove it from the queue first.
    1430         2613 :             let next_op = upload_queue.queued_operations.pop_front().unwrap();
    1431         2613 : 
    1432         2613 :             debug!("starting op: {}", next_op);
    1433              : 
    1434              :             // Update the counters
    1435         2613 :             match next_op {
    1436         1173 :                 UploadOp::UploadLayer(_, _) => {
    1437         1173 :                     upload_queue.num_inprogress_layer_uploads += 1;
    1438         1173 :                 }
    1439         1154 :                 UploadOp::UploadMetadata(_, _) => {
    1440         1154 :                     upload_queue.num_inprogress_metadata_uploads += 1;
    1441         1154 :                 }
    1442          170 :                 UploadOp::Delete(_) => {
    1443          170 :                     upload_queue.num_inprogress_deletions += 1;
    1444          170 :                 }
    1445          116 :                 UploadOp::Barrier(sender) => {
    1446          116 :                     sender.send_replace(());
    1447          116 :                     continue;
    1448              :                 }
    1449            0 :                 UploadOp::Shutdown => unreachable!("shutdown is intentionally never popped off"),
    1450              :             };
    1451              : 
    1452              :             // Assign unique ID to this task
    1453         2497 :             upload_queue.task_counter += 1;
    1454         2497 :             let upload_task_id = upload_queue.task_counter;
    1455         2497 : 
    1456         2497 :             // Add it to the in-progress map
    1457         2497 :             let task = Arc::new(UploadTask {
    1458         2497 :                 task_id: upload_task_id,
    1459         2497 :                 op: next_op,
    1460         2497 :                 retries: AtomicU32::new(0),
    1461         2497 :             });
    1462         2497 :             upload_queue
    1463         2497 :                 .inprogress_tasks
    1464         2497 :                 .insert(task.task_id, Arc::clone(&task));
    1465         2497 : 
    1466         2497 :             // Spawn task to perform the task
    1467         2497 :             let self_rc = Arc::clone(self);
    1468         2497 :             let tenant_shard_id = self.tenant_shard_id;
    1469         2497 :             let timeline_id = self.timeline_id;
    1470         2497 :             task_mgr::spawn(
    1471         2497 :                 &self.runtime,
    1472         2497 :                 TaskKind::RemoteUploadTask,
    1473         2497 :                 Some(self.tenant_shard_id),
    1474         2497 :                 Some(self.timeline_id),
    1475         2497 :                 "remote upload",
    1476              :                 false,
    1477         2393 :                 async move {
    1478        35492 :                     self_rc.perform_upload_task(task).await;
    1479         2273 :                     Ok(())
    1480         2273 :                 }
    1481         2497 :                 .instrument(info_span!(parent: None, "remote_upload", tenant_id=%tenant_shard_id.tenant_id, shard_id=%tenant_shard_id.shard_slug(), %timeline_id, %upload_task_id)),
    1482              :             );
    1483              : 
    1484              :             // Loop back to process next task
    1485              :         }
    1486         4825 :     }
    1487              : 
    1488              :     ///
    1489              :     /// Perform an upload task.
    1490              :     ///
    1491              :     /// The task is in the `inprogress_tasks` list. This function will try to
    1492              :     /// execute it, retrying forever. On successful completion, the task is
    1493              :     /// removed it from the `inprogress_tasks` list, and any next task(s) in the
    1494              :     /// queue that were waiting by the completion are launched.
    1495              :     ///
    1496              :     /// The task can be shut down, however. That leads to stopping the whole
    1497              :     /// queue.
    1498              :     ///
    1499         2393 :     async fn perform_upload_task(self: &Arc<Self>, task: Arc<UploadTask>) {
    1500         2393 :         let cancel = shutdown_token();
    1501              :         // Loop to retry until it completes.
    1502         2393 :         loop {
    1503         2393 :             // If we're requested to shut down, close up shop and exit.
    1504         2393 :             //
    1505         2393 :             // Note: We only check for the shutdown requests between retries, so
    1506         2393 :             // if a shutdown request arrives while we're busy uploading, in the
    1507         2393 :             // upload::upload:*() call below, we will wait not exit until it has
    1508         2393 :             // finished. We probably could cancel the upload by simply dropping
    1509         2393 :             // the Future, but we're not 100% sure if the remote storage library
    1510         2393 :             // is cancellation safe, so we don't dare to do that. Hopefully, the
    1511         2393 :             // upload finishes or times out soon enough.
    1512         2393 :             if cancel.is_cancelled() {
    1513            0 :                 info!("upload task cancelled by shutdown request");
    1514            0 :                 self.stop();
    1515            0 :                 return;
    1516         2393 :             }
    1517              : 
    1518         2393 :             let upload_result: anyhow::Result<()> = match &task.op {
    1519         1072 :                 UploadOp::UploadLayer(ref layer, ref layer_metadata) => {
    1520         1072 :                     let local_path = layer.local_path();
    1521         1072 : 
    1522         1072 :                     // We should only be uploading layers created by this `Tenant`'s lifetime, so
    1523         1072 :                     // the metadata in the upload should always match our current generation.
    1524         1072 :                     assert_eq!(layer_metadata.generation, self.generation);
    1525              : 
    1526         1072 :                     let remote_path = remote_layer_path(
    1527         1072 :                         &self.tenant_shard_id.tenant_id,
    1528         1072 :                         &self.timeline_id,
    1529         1072 :                         layer_metadata.shard,
    1530         1072 :                         &layer.layer_desc().layer_name(),
    1531         1072 :                         layer_metadata.generation,
    1532         1072 :                     );
    1533         1072 : 
    1534         1072 :                     upload::upload_timeline_layer(
    1535         1072 :                         &self.storage_impl,
    1536         1072 :                         local_path,
    1537         1072 :                         &remote_path,
    1538         1072 :                         layer_metadata.file_size(),
    1539         1072 :                         &self.cancel,
    1540         1072 :                     )
    1541         1072 :                     .measure_remote_op(
    1542         1072 :                         RemoteOpFileKind::Layer,
    1543         1072 :                         RemoteOpKind::Upload,
    1544         1072 :                         Arc::clone(&self.metrics),
    1545         1072 :                     )
    1546        30597 :                     .await
    1547              :                 }
    1548         1151 :                 UploadOp::UploadMetadata(ref index_part, _lsn) => {
    1549         1151 :                     let mention_having_future_layers = if cfg!(feature = "testing") {
    1550         1151 :                         index_part
    1551         1151 :                             .layer_metadata
    1552         1151 :                             .keys()
    1553        13200 :                             .any(|x| x.is_in_future(*_lsn))
    1554              :                     } else {
    1555            0 :                         false
    1556              :                     };
    1557              : 
    1558         1151 :                     let res = upload::upload_index_part(
    1559         1151 :                         &self.storage_impl,
    1560         1151 :                         &self.tenant_shard_id,
    1561         1151 :                         &self.timeline_id,
    1562         1151 :                         self.generation,
    1563         1151 :                         index_part,
    1564         1151 :                         &self.cancel,
    1565         1151 :                     )
    1566         1151 :                     .measure_remote_op(
    1567         1151 :                         RemoteOpFileKind::Index,
    1568         1151 :                         RemoteOpKind::Upload,
    1569         1151 :                         Arc::clone(&self.metrics),
    1570         1151 :                     )
    1571         4725 :                     .await;
    1572         1144 :                     if res.is_ok() {
    1573         1144 :                         self.update_remote_physical_size_gauge(Some(index_part));
    1574         1144 :                         if mention_having_future_layers {
    1575              :                             // find rationale near crate::tenant::timeline::init::cleanup_future_layer
    1576            4 :                             tracing::info!(disk_consistent_lsn=%_lsn, "uploaded an index_part.json with future layers -- this is ok! if shutdown now, expect future layer cleanup");
    1577         1140 :                         }
    1578            0 :                     }
    1579         1144 :                     res
    1580              :                 }
    1581          170 :                 UploadOp::Delete(delete) => {
    1582              :                     pausable_failpoint!("before-delete-layer-pausable");
    1583          170 :                     self.deletion_queue_client
    1584          170 :                         .push_layers(
    1585          170 :                             self.tenant_shard_id,
    1586          170 :                             self.timeline_id,
    1587          170 :                             self.generation,
    1588          170 :                             delete.layers.clone(),
    1589          170 :                         )
    1590            0 :                         .await
    1591          170 :                         .map_err(|e| anyhow::anyhow!(e))
    1592              :                 }
    1593            0 :                 unexpected @ UploadOp::Barrier(_) | unexpected @ UploadOp::Shutdown => {
    1594              :                     // unreachable. Barrier operations are handled synchronously in
    1595              :                     // launch_queued_tasks
    1596            0 :                     warn!("unexpected {unexpected:?} operation in perform_upload_task");
    1597            0 :                     break;
    1598              :                 }
    1599              :             };
    1600              : 
    1601            0 :             match upload_result {
    1602              :                 Ok(()) => {
    1603         2273 :                     break;
    1604              :                 }
    1605            0 :                 Err(e) if TimeoutOrCancel::caused_by_cancel(&e) => {
    1606            0 :                     // loop around to do the proper stopping
    1607            0 :                     continue;
    1608              :                 }
    1609            0 :                 Err(e) => {
    1610            0 :                     let retries = task.retries.fetch_add(1, Ordering::SeqCst);
    1611            0 : 
    1612            0 :                     // Uploads can fail due to rate limits (IAM, S3), spurious network problems,
    1613            0 :                     // or other external reasons. Such issues are relatively regular, so log them
    1614            0 :                     // at info level at first, and only WARN if the operation fails repeatedly.
    1615            0 :                     //
    1616            0 :                     // (See similar logic for downloads in `download::download_retry`)
    1617            0 :                     if retries < FAILED_UPLOAD_WARN_THRESHOLD {
    1618            0 :                         info!(
    1619            0 :                             "failed to perform remote task {}, will retry (attempt {}): {:#}",
    1620            0 :                             task.op, retries, e
    1621              :                         );
    1622              :                     } else {
    1623            0 :                         warn!(
    1624            0 :                             "failed to perform remote task {}, will retry (attempt {}): {:?}",
    1625            0 :                             task.op, retries, e
    1626              :                         );
    1627              :                     }
    1628              : 
    1629              :                     // sleep until it's time to retry, or we're cancelled
    1630            0 :                     exponential_backoff(
    1631            0 :                         retries,
    1632            0 :                         DEFAULT_BASE_BACKOFF_SECONDS,
    1633            0 :                         DEFAULT_MAX_BACKOFF_SECONDS,
    1634            0 :                         &cancel,
    1635            0 :                     )
    1636            0 :                     .await;
    1637              :                 }
    1638              :             }
    1639              :         }
    1640              : 
    1641         2273 :         let retries = task.retries.load(Ordering::SeqCst);
    1642         2273 :         if retries > 0 {
    1643            0 :             info!(
    1644            0 :                 "remote task {} completed successfully after {} retries",
    1645            0 :                 task.op, retries
    1646              :             );
    1647              :         } else {
    1648         2273 :             debug!("remote task {} completed successfully", task.op);
    1649              :         }
    1650              : 
    1651              :         // The task has completed successfully. Remove it from the in-progress list.
    1652         2273 :         let lsn_update = {
    1653         2273 :             let mut upload_queue_guard = self.upload_queue.lock().unwrap();
    1654         2273 :             let upload_queue = match upload_queue_guard.deref_mut() {
    1655            0 :                 UploadQueue::Uninitialized => panic!("callers are responsible for ensuring this is only called on an initialized queue"),
    1656            0 :                 UploadQueue::Stopped(_stopped) => {
    1657            0 :                     None
    1658              :                 },
    1659         2273 :                 UploadQueue::Initialized(qi) => { Some(qi) }
    1660              :             };
    1661              : 
    1662         2273 :             let upload_queue = match upload_queue {
    1663         2273 :                 Some(upload_queue) => upload_queue,
    1664              :                 None => {
    1665            0 :                     info!("another concurrent task already stopped the queue");
    1666            0 :                     return;
    1667              :                 }
    1668              :             };
    1669              : 
    1670         2273 :             upload_queue.inprogress_tasks.remove(&task.task_id);
    1671              : 
    1672         2273 :             let lsn_update = match task.op {
    1673              :                 UploadOp::UploadLayer(_, _) => {
    1674          959 :                     upload_queue.num_inprogress_layer_uploads -= 1;
    1675          959 :                     None
    1676              :                 }
    1677         1144 :                 UploadOp::UploadMetadata(_, lsn) => {
    1678         1144 :                     upload_queue.num_inprogress_metadata_uploads -= 1;
    1679         1144 :                     // XXX monotonicity check?
    1680         1144 : 
    1681         1144 :                     upload_queue.projected_remote_consistent_lsn = Some(lsn);
    1682         1144 :                     if self.generation.is_none() {
    1683              :                         // Legacy mode: skip validating generation
    1684            0 :                         upload_queue.visible_remote_consistent_lsn.store(lsn);
    1685            0 :                         None
    1686              :                     } else {
    1687         1144 :                         Some((lsn, upload_queue.visible_remote_consistent_lsn.clone()))
    1688              :                     }
    1689              :                 }
    1690              :                 UploadOp::Delete(_) => {
    1691          170 :                     upload_queue.num_inprogress_deletions -= 1;
    1692          170 :                     None
    1693              :                 }
    1694            0 :                 UploadOp::Barrier(..) | UploadOp::Shutdown => unreachable!(),
    1695              :             };
    1696              : 
    1697              :             // Launch any queued tasks that were unblocked by this one.
    1698         2273 :             self.launch_queued_tasks(upload_queue);
    1699         2273 :             lsn_update
    1700              :         };
    1701              : 
    1702         2273 :         if let Some((lsn, slot)) = lsn_update {
    1703              :             // Updates to the remote_consistent_lsn we advertise to pageservers
    1704              :             // are all routed through the DeletionQueue, to enforce important
    1705              :             // data safety guarantees (see docs/rfcs/025-generation-numbers.md)
    1706         1144 :             self.deletion_queue_client
    1707         1144 :                 .update_remote_consistent_lsn(
    1708         1144 :                     self.tenant_shard_id,
    1709         1144 :                     self.timeline_id,
    1710         1144 :                     self.generation,
    1711         1144 :                     lsn,
    1712         1144 :                     slot,
    1713         1144 :                 )
    1714            0 :                 .await;
    1715         1129 :         }
    1716              : 
    1717         2273 :         self.metric_end(&task.op);
    1718         2273 :     }
    1719              : 
    1720         4901 :     fn metric_impl(
    1721         4901 :         &self,
    1722         4901 :         op: &UploadOp,
    1723         4901 :     ) -> Option<(
    1724         4901 :         RemoteOpFileKind,
    1725         4901 :         RemoteOpKind,
    1726         4901 :         RemoteTimelineClientMetricsCallTrackSize,
    1727         4901 :     )> {
    1728              :         use RemoteTimelineClientMetricsCallTrackSize::DontTrackSize;
    1729         4901 :         let res = match op {
    1730         2133 :             UploadOp::UploadLayer(_, m) => (
    1731         2133 :                 RemoteOpFileKind::Layer,
    1732         2133 :                 RemoteOpKind::Upload,
    1733         2133 :                 RemoteTimelineClientMetricsCallTrackSize::Bytes(m.file_size()),
    1734         2133 :             ),
    1735         2320 :             UploadOp::UploadMetadata(_, _) => (
    1736         2320 :                 RemoteOpFileKind::Index,
    1737         2320 :                 RemoteOpKind::Upload,
    1738         2320 :                 DontTrackSize {
    1739         2320 :                     reason: "metadata uploads are tiny",
    1740         2320 :                 },
    1741         2320 :             ),
    1742          442 :             UploadOp::Delete(_delete) => (
    1743          442 :                 RemoteOpFileKind::Layer,
    1744          442 :                 RemoteOpKind::Delete,
    1745          442 :                 DontTrackSize {
    1746          442 :                     reason: "should we track deletes? positive or negative sign?",
    1747          442 :                 },
    1748          442 :             ),
    1749              :             UploadOp::Barrier(..) | UploadOp::Shutdown => {
    1750              :                 // we do not account these
    1751            6 :                 return None;
    1752              :             }
    1753              :         };
    1754         4895 :         Some(res)
    1755         4901 :     }
    1756              : 
    1757         2622 :     fn metric_begin(&self, op: &UploadOp) {
    1758         2622 :         let (file_kind, op_kind, track_bytes) = match self.metric_impl(op) {
    1759         2622 :             Some(x) => x,
    1760            0 :             None => return,
    1761              :         };
    1762         2622 :         let guard = self.metrics.call_begin(&file_kind, &op_kind, track_bytes);
    1763         2622 :         guard.will_decrement_manually(); // in metric_end(), see right below
    1764         2622 :     }
    1765              : 
    1766         2279 :     fn metric_end(&self, op: &UploadOp) {
    1767         2279 :         let (file_kind, op_kind, track_bytes) = match self.metric_impl(op) {
    1768         2273 :             Some(x) => x,
    1769            6 :             None => return,
    1770              :         };
    1771         2273 :         self.metrics.call_end(&file_kind, &op_kind, track_bytes);
    1772         2279 :     }
    1773              : 
    1774              :     /// Close the upload queue for new operations and cancel queued operations.
    1775              :     ///
    1776              :     /// Use [`RemoteTimelineClient::shutdown`] for graceful stop.
    1777              :     ///
    1778              :     /// In-progress operations will still be running after this function returns.
    1779              :     /// Use `task_mgr::shutdown_tasks(Some(TaskKind::RemoteUploadTask), Some(self.tenant_shard_id), Some(timeline_id))`
    1780              :     /// to wait for them to complete, after calling this function.
    1781           14 :     pub(crate) fn stop(&self) {
    1782           14 :         // Whichever *task* for this RemoteTimelineClient grabs the mutex first will transition the queue
    1783           14 :         // into stopped state, thereby dropping all off the queued *ops* which haven't become *tasks* yet.
    1784           14 :         // The other *tasks* will come here and observe an already shut down queue and hence simply wrap up their business.
    1785           14 :         let mut guard = self.upload_queue.lock().unwrap();
    1786           14 :         self.stop_impl(&mut guard);
    1787           14 :     }
    1788              : 
    1789           14 :     fn stop_impl(&self, guard: &mut std::sync::MutexGuard<UploadQueue>) {
    1790           14 :         match &mut **guard {
    1791              :             UploadQueue::Uninitialized => {
    1792            0 :                 info!("UploadQueue is in state Uninitialized, nothing to do");
    1793            0 :                 **guard = UploadQueue::Stopped(UploadQueueStopped::Uninitialized);
    1794              :             }
    1795              :             UploadQueue::Stopped(_) => {
    1796              :                 // nothing to do
    1797            6 :                 info!("another concurrent task already shut down the queue");
    1798              :             }
    1799            8 :             UploadQueue::Initialized(initialized) => {
    1800            8 :                 info!("shutting down upload queue");
    1801              : 
    1802              :                 // Replace the queue with the Stopped state, taking ownership of the old
    1803              :                 // Initialized queue. We will do some checks on it, and then drop it.
    1804            8 :                 let qi = {
    1805              :                     // Here we preserve working version of the upload queue for possible use during deletions.
    1806              :                     // In-place replace of Initialized to Stopped can be done with the help of https://github.com/Sgeo/take_mut
    1807              :                     // but for this use case it doesnt really makes sense to bring unsafe code only for this usage point.
    1808              :                     // Deletion is not really perf sensitive so there shouldnt be any problems with cloning a fraction of it.
    1809            8 :                     let upload_queue_for_deletion = UploadQueueInitialized {
    1810            8 :                         task_counter: 0,
    1811            8 :                         latest_files: initialized.latest_files.clone(),
    1812            8 :                         latest_files_changes_since_metadata_upload_scheduled: 0,
    1813            8 :                         latest_metadata: initialized.latest_metadata.clone(),
    1814            8 :                         projected_remote_consistent_lsn: None,
    1815            8 :                         visible_remote_consistent_lsn: initialized
    1816            8 :                             .visible_remote_consistent_lsn
    1817            8 :                             .clone(),
    1818            8 :                         num_inprogress_layer_uploads: 0,
    1819            8 :                         num_inprogress_metadata_uploads: 0,
    1820            8 :                         num_inprogress_deletions: 0,
    1821            8 :                         inprogress_tasks: HashMap::default(),
    1822            8 :                         queued_operations: VecDeque::default(),
    1823            8 :                         #[cfg(feature = "testing")]
    1824            8 :                         dangling_files: HashMap::default(),
    1825            8 :                         shutting_down: false,
    1826            8 :                         shutdown_ready: Arc::new(tokio::sync::Semaphore::new(0)),
    1827            8 :                     };
    1828            8 : 
    1829            8 :                     let upload_queue = std::mem::replace(
    1830            8 :                         &mut **guard,
    1831            8 :                         UploadQueue::Stopped(UploadQueueStopped::Deletable(
    1832            8 :                             UploadQueueStoppedDeletable {
    1833            8 :                                 upload_queue_for_deletion,
    1834            8 :                                 deleted_at: SetDeletedFlagProgress::NotRunning,
    1835            8 :                             },
    1836            8 :                         )),
    1837            8 :                     );
    1838            8 :                     if let UploadQueue::Initialized(qi) = upload_queue {
    1839            8 :                         qi
    1840              :                     } else {
    1841            0 :                         unreachable!("we checked in the match above that it is Initialized");
    1842              :                     }
    1843              :                 };
    1844              : 
    1845              :                 // consistency check
    1846            8 :                 assert_eq!(
    1847            8 :                     qi.num_inprogress_layer_uploads
    1848            8 :                         + qi.num_inprogress_metadata_uploads
    1849            8 :                         + qi.num_inprogress_deletions,
    1850            8 :                     qi.inprogress_tasks.len()
    1851            8 :                 );
    1852              : 
    1853              :                 // We don't need to do anything here for in-progress tasks. They will finish
    1854              :                 // on their own, decrement the unfinished-task counter themselves, and observe
    1855              :                 // that the queue is Stopped.
    1856            8 :                 drop(qi.inprogress_tasks);
    1857              : 
    1858              :                 // Tear down queued ops
    1859            8 :                 for op in qi.queued_operations.into_iter() {
    1860            6 :                     self.metric_end(&op);
    1861            6 :                     // Dropping UploadOp::Barrier() here will make wait_completion() return with an Err()
    1862            6 :                     // which is exactly what we want to happen.
    1863            6 :                     drop(op);
    1864            6 :                 }
    1865              :             }
    1866              :         }
    1867           14 :     }
    1868              : }
    1869              : 
    1870            0 : pub fn remote_tenant_path(tenant_shard_id: &TenantShardId) -> RemotePath {
    1871            0 :     let path = format!("tenants/{tenant_shard_id}");
    1872            0 :     RemotePath::from_string(&path).expect("Failed to construct path")
    1873            0 : }
    1874              : 
    1875          148 : pub fn remote_timelines_path(tenant_shard_id: &TenantShardId) -> RemotePath {
    1876          148 :     let path = format!("tenants/{tenant_shard_id}/{TIMELINES_SEGMENT_NAME}");
    1877          148 :     RemotePath::from_string(&path).expect("Failed to construct path")
    1878          148 : }
    1879              : 
    1880            0 : fn remote_timelines_path_unsharded(tenant_id: &TenantId) -> RemotePath {
    1881            0 :     let path = format!("tenants/{tenant_id}/{TIMELINES_SEGMENT_NAME}");
    1882            0 :     RemotePath::from_string(&path).expect("Failed to construct path")
    1883            0 : }
    1884              : 
    1885           30 : pub fn remote_timeline_path(
    1886           30 :     tenant_shard_id: &TenantShardId,
    1887           30 :     timeline_id: &TimelineId,
    1888           30 : ) -> RemotePath {
    1889           30 :     remote_timelines_path(tenant_shard_id).join(Utf8Path::new(&timeline_id.to_string()))
    1890           30 : }
    1891              : 
    1892              : /// Note that the shard component of a remote layer path is _not_ always the same
    1893              : /// as in the TenantShardId of the caller: tenants may reference layers from a different
    1894              : /// ShardIndex.  Use the ShardIndex from the layer's metadata.
    1895         1090 : pub fn remote_layer_path(
    1896         1090 :     tenant_id: &TenantId,
    1897         1090 :     timeline_id: &TimelineId,
    1898         1090 :     shard: ShardIndex,
    1899         1090 :     layer_file_name: &LayerName,
    1900         1090 :     generation: Generation,
    1901         1090 : ) -> RemotePath {
    1902         1090 :     // Generation-aware key format
    1903         1090 :     let path = format!(
    1904         1090 :         "tenants/{tenant_id}{0}/{TIMELINES_SEGMENT_NAME}/{timeline_id}/{1}{2}",
    1905         1090 :         shard.get_suffix(),
    1906         1090 :         layer_file_name,
    1907         1090 :         generation.get_suffix()
    1908         1090 :     );
    1909         1090 : 
    1910         1090 :     RemotePath::from_string(&path).expect("Failed to construct path")
    1911         1090 : }
    1912              : 
    1913            4 : pub fn remote_initdb_archive_path(tenant_id: &TenantId, timeline_id: &TimelineId) -> RemotePath {
    1914            4 :     RemotePath::from_string(&format!(
    1915            4 :         "tenants/{tenant_id}/{TIMELINES_SEGMENT_NAME}/{timeline_id}/{INITDB_PATH}"
    1916            4 :     ))
    1917            4 :     .expect("Failed to construct path")
    1918            4 : }
    1919              : 
    1920            2 : pub fn remote_initdb_preserved_archive_path(
    1921            2 :     tenant_id: &TenantId,
    1922            2 :     timeline_id: &TimelineId,
    1923            2 : ) -> RemotePath {
    1924            2 :     RemotePath::from_string(&format!(
    1925            2 :         "tenants/{tenant_id}/{TIMELINES_SEGMENT_NAME}/{timeline_id}/{INITDB_PRESERVED_PATH}"
    1926            2 :     ))
    1927            2 :     .expect("Failed to construct path")
    1928            2 : }
    1929              : 
    1930         1202 : pub fn remote_index_path(
    1931         1202 :     tenant_shard_id: &TenantShardId,
    1932         1202 :     timeline_id: &TimelineId,
    1933         1202 :     generation: Generation,
    1934         1202 : ) -> RemotePath {
    1935         1202 :     RemotePath::from_string(&format!(
    1936         1202 :         "tenants/{tenant_shard_id}/{TIMELINES_SEGMENT_NAME}/{timeline_id}/{0}{1}",
    1937         1202 :         IndexPart::FILE_NAME,
    1938         1202 :         generation.get_suffix()
    1939         1202 :     ))
    1940         1202 :     .expect("Failed to construct path")
    1941         1202 : }
    1942              : 
    1943            0 : pub(crate) fn remote_heatmap_path(tenant_shard_id: &TenantShardId) -> RemotePath {
    1944            0 :     RemotePath::from_string(&format!(
    1945            0 :         "tenants/{tenant_shard_id}/{TENANT_HEATMAP_BASENAME}"
    1946            0 :     ))
    1947            0 :     .expect("Failed to construct path")
    1948            0 : }
    1949              : 
    1950              : /// Given the key of an index, parse out the generation part of the name
    1951           18 : pub fn parse_remote_index_path(path: RemotePath) -> Option<Generation> {
    1952           18 :     let file_name = match path.get_path().file_name() {
    1953           18 :         Some(f) => f,
    1954              :         None => {
    1955              :             // Unexpected: we should be seeing index_part.json paths only
    1956            0 :             tracing::warn!("Malformed index key {}", path);
    1957            0 :             return None;
    1958              :         }
    1959              :     };
    1960              : 
    1961           18 :     match file_name.split_once('-') {
    1962           12 :         Some((_, gen_suffix)) => Generation::parse_suffix(gen_suffix),
    1963            6 :         None => None,
    1964              :     }
    1965           18 : }
    1966              : 
    1967              : #[cfg(test)]
    1968              : mod tests {
    1969              :     use super::*;
    1970              :     use crate::{
    1971              :         context::RequestContext,
    1972              :         tenant::{
    1973              :             harness::{TenantHarness, TIMELINE_ID},
    1974              :             storage_layer::layer::local_layer_path,
    1975              :             Tenant, Timeline,
    1976              :         },
    1977              :         DEFAULT_PG_VERSION,
    1978              :     };
    1979              : 
    1980              :     use std::collections::HashSet;
    1981              : 
    1982            8 :     pub(super) fn dummy_contents(name: &str) -> Vec<u8> {
    1983            8 :         format!("contents for {name}").into()
    1984            8 :     }
    1985              : 
    1986            2 :     pub(super) fn dummy_metadata(disk_consistent_lsn: Lsn) -> TimelineMetadata {
    1987            2 :         let metadata = TimelineMetadata::new(
    1988            2 :             disk_consistent_lsn,
    1989            2 :             None,
    1990            2 :             None,
    1991            2 :             Lsn(0),
    1992            2 :             Lsn(0),
    1993            2 :             Lsn(0),
    1994            2 :             // Any version will do
    1995            2 :             // but it should be consistent with the one in the tests
    1996            2 :             crate::DEFAULT_PG_VERSION,
    1997            2 :         );
    1998            2 : 
    1999            2 :         // go through serialize + deserialize to fix the header, including checksum
    2000            2 :         TimelineMetadata::from_bytes(&metadata.to_bytes().unwrap()).unwrap()
    2001            2 :     }
    2002              : 
    2003            2 :     fn assert_file_list(a: &HashSet<LayerName>, b: &[&str]) {
    2004            6 :         let mut avec: Vec<String> = a.iter().map(|x| x.to_string()).collect();
    2005            2 :         avec.sort();
    2006            2 : 
    2007            2 :         let mut bvec = b.to_vec();
    2008            2 :         bvec.sort_unstable();
    2009            2 : 
    2010            2 :         assert_eq!(avec, bvec);
    2011            2 :     }
    2012              : 
    2013            4 :     fn assert_remote_files(expected: &[&str], remote_path: &Utf8Path, generation: Generation) {
    2014            4 :         let mut expected: Vec<String> = expected
    2015            4 :             .iter()
    2016           16 :             .map(|x| format!("{}{}", x, generation.get_suffix()))
    2017            4 :             .collect();
    2018            4 :         expected.sort();
    2019            4 : 
    2020            4 :         let mut found: Vec<String> = Vec::new();
    2021           16 :         for entry in std::fs::read_dir(remote_path).unwrap().flatten() {
    2022           16 :             let entry_name = entry.file_name();
    2023           16 :             let fname = entry_name.to_str().unwrap();
    2024           16 :             found.push(String::from(fname));
    2025           16 :         }
    2026            4 :         found.sort();
    2027            4 : 
    2028            4 :         assert_eq!(found, expected);
    2029            4 :     }
    2030              : 
    2031              :     struct TestSetup {
    2032              :         harness: TenantHarness,
    2033              :         tenant: Arc<Tenant>,
    2034              :         timeline: Arc<Timeline>,
    2035              :         tenant_ctx: RequestContext,
    2036              :     }
    2037              : 
    2038              :     impl TestSetup {
    2039            8 :         async fn new(test_name: &str) -> anyhow::Result<Self> {
    2040            8 :             let test_name = Box::leak(Box::new(format!("remote_timeline_client__{test_name}")));
    2041            8 :             let harness = TenantHarness::create(test_name)?;
    2042           32 :             let (tenant, ctx) = harness.load().await;
    2043              : 
    2044            8 :             let timeline = tenant
    2045            8 :                 .create_test_timeline(TIMELINE_ID, Lsn(8), DEFAULT_PG_VERSION, &ctx)
    2046           22 :                 .await?;
    2047              : 
    2048            8 :             Ok(Self {
    2049            8 :                 harness,
    2050            8 :                 tenant,
    2051            8 :                 timeline,
    2052            8 :                 tenant_ctx: ctx,
    2053            8 :             })
    2054            8 :         }
    2055              : 
    2056              :         /// Construct a RemoteTimelineClient in an arbitrary generation
    2057           10 :         fn build_client(&self, generation: Generation) -> Arc<RemoteTimelineClient> {
    2058           10 :             Arc::new(RemoteTimelineClient {
    2059           10 :                 conf: self.harness.conf,
    2060           10 :                 runtime: tokio::runtime::Handle::current(),
    2061           10 :                 tenant_shard_id: self.harness.tenant_shard_id,
    2062           10 :                 timeline_id: TIMELINE_ID,
    2063           10 :                 generation,
    2064           10 :                 storage_impl: self.harness.remote_storage.clone(),
    2065           10 :                 deletion_queue_client: self.harness.deletion_queue.new_client(),
    2066           10 :                 upload_queue: Mutex::new(UploadQueue::Uninitialized),
    2067           10 :                 metrics: Arc::new(RemoteTimelineClientMetrics::new(
    2068           10 :                     &self.harness.tenant_shard_id,
    2069           10 :                     &TIMELINE_ID,
    2070           10 :                 )),
    2071           10 :                 cancel: CancellationToken::new(),
    2072           10 :             })
    2073           10 :         }
    2074              : 
    2075              :         /// A tracing::Span that satisfies remote_timeline_client methods that assert tenant_id
    2076              :         /// and timeline_id are present.
    2077            6 :         fn span(&self) -> tracing::Span {
    2078            6 :             tracing::info_span!(
    2079              :                 "test",
    2080              :                 tenant_id = %self.harness.tenant_shard_id.tenant_id,
    2081            0 :                 shard_id = %self.harness.tenant_shard_id.shard_slug(),
    2082              :                 timeline_id = %TIMELINE_ID
    2083              :             )
    2084            6 :         }
    2085              :     }
    2086              : 
    2087              :     // Test scheduling
    2088              :     #[tokio::test]
    2089            2 :     async fn upload_scheduling() {
    2090            2 :         // Test outline:
    2091            2 :         //
    2092            2 :         // Schedule upload of a bunch of layers. Check that they are started immediately, not queued
    2093            2 :         // Schedule upload of index. Check that it is queued
    2094            2 :         // let the layer file uploads finish. Check that the index-upload is now started
    2095            2 :         // let the index-upload finish.
    2096            2 :         //
    2097            2 :         // Download back the index.json. Check that the list of files is correct
    2098            2 :         //
    2099            2 :         // Schedule upload. Schedule deletion. Check that the deletion is queued
    2100            2 :         // let upload finish. Check that deletion is now started
    2101            2 :         // Schedule another deletion. Check that it's launched immediately.
    2102            2 :         // Schedule index upload. Check that it's queued
    2103            2 : 
    2104           13 :         let test_setup = TestSetup::new("upload_scheduling").await.unwrap();
    2105            2 :         let span = test_setup.span();
    2106            2 :         let _guard = span.enter();
    2107            2 : 
    2108            2 :         let TestSetup {
    2109            2 :             harness,
    2110            2 :             tenant: _tenant,
    2111            2 :             timeline,
    2112            2 :             tenant_ctx: _tenant_ctx,
    2113            2 :         } = test_setup;
    2114            2 : 
    2115            2 :         let client = timeline.remote_client.as_ref().unwrap();
    2116            2 : 
    2117            2 :         // Download back the index.json, and check that the list of files is correct
    2118            2 :         let initial_index_part = match client
    2119            2 :             .download_index_file(&CancellationToken::new())
    2120            8 :             .await
    2121            2 :             .unwrap()
    2122            2 :         {
    2123            2 :             MaybeDeletedIndexPart::IndexPart(index_part) => index_part,
    2124            2 :             MaybeDeletedIndexPart::Deleted(_) => panic!("unexpectedly got deleted index part"),
    2125            2 :         };
    2126            2 :         let initial_layers = initial_index_part
    2127            2 :             .layer_metadata
    2128            2 :             .keys()
    2129            2 :             .map(|f| f.to_owned())
    2130            2 :             .collect::<HashSet<LayerName>>();
    2131            2 :         let initial_layer = {
    2132            2 :             assert!(initial_layers.len() == 1);
    2133            2 :             initial_layers.into_iter().next().unwrap()
    2134            2 :         };
    2135            2 : 
    2136            2 :         let timeline_path = harness.timeline_path(&TIMELINE_ID);
    2137            2 : 
    2138            2 :         println!("workdir: {}", harness.conf.workdir);
    2139            2 : 
    2140            2 :         let remote_timeline_dir = harness
    2141            2 :             .remote_fs_dir
    2142            2 :             .join(timeline_path.strip_prefix(&harness.conf.workdir).unwrap());
    2143            2 :         println!("remote_timeline_dir: {remote_timeline_dir}");
    2144            2 : 
    2145            2 :         let generation = harness.generation;
    2146            2 :         let shard = harness.shard;
    2147            2 : 
    2148            2 :         // Create a couple of dummy files,  schedule upload for them
    2149            2 : 
    2150            2 :         let layers = [
    2151            2 :             ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap(), dummy_contents("foo")),
    2152            2 :             ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D9-00000000016B5A52".parse().unwrap(), dummy_contents("bar")),
    2153            2 :             ("000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59DA-00000000016B5A53".parse().unwrap(), dummy_contents("baz"))
    2154            2 :         ]
    2155            2 :         .into_iter()
    2156            6 :         .map(|(name, contents): (LayerName, Vec<u8>)| {
    2157            6 : 
    2158            6 :             let local_path = local_layer_path(
    2159            6 :                 harness.conf,
    2160            6 :                 &timeline.tenant_shard_id,
    2161            6 :                 &timeline.timeline_id,
    2162            6 :                 &name,
    2163            6 :                 &generation,
    2164            6 :             );
    2165            6 :             std::fs::write(&local_path, &contents).unwrap();
    2166            6 : 
    2167            6 :             Layer::for_resident(
    2168            6 :                 harness.conf,
    2169            6 :                 &timeline,
    2170            6 :                 local_path,
    2171            6 :                 name,
    2172            6 :                 LayerFileMetadata::new(contents.len() as u64, generation, shard),
    2173            6 :             )
    2174            6 :         }).collect::<Vec<_>>();
    2175            2 : 
    2176            2 :         client
    2177            2 :             .schedule_layer_file_upload(layers[0].clone())
    2178            2 :             .unwrap();
    2179            2 :         client
    2180            2 :             .schedule_layer_file_upload(layers[1].clone())
    2181            2 :             .unwrap();
    2182            2 : 
    2183            2 :         // Check that they are started immediately, not queued
    2184            2 :         //
    2185            2 :         // this works because we running within block_on, so any futures are now queued up until
    2186            2 :         // our next await point.
    2187            2 :         {
    2188            2 :             let mut guard = client.upload_queue.lock().unwrap();
    2189            2 :             let upload_queue = guard.initialized_mut().unwrap();
    2190            2 :             assert!(upload_queue.queued_operations.is_empty());
    2191            2 :             assert!(upload_queue.inprogress_tasks.len() == 2);
    2192            2 :             assert!(upload_queue.num_inprogress_layer_uploads == 2);
    2193            2 : 
    2194            2 :             // also check that `latest_file_changes` was updated
    2195            2 :             assert!(upload_queue.latest_files_changes_since_metadata_upload_scheduled == 2);
    2196            2 :         }
    2197            2 : 
    2198            2 :         // Schedule upload of index. Check that it is queued
    2199            2 :         let metadata = dummy_metadata(Lsn(0x20));
    2200            2 :         client
    2201            2 :             .schedule_index_upload_for_full_metadata_update(&metadata)
    2202            2 :             .unwrap();
    2203            2 :         {
    2204            2 :             let mut guard = client.upload_queue.lock().unwrap();
    2205            2 :             let upload_queue = guard.initialized_mut().unwrap();
    2206            2 :             assert!(upload_queue.queued_operations.len() == 1);
    2207            2 :             assert!(upload_queue.latest_files_changes_since_metadata_upload_scheduled == 0);
    2208            2 :         }
    2209            2 : 
    2210            2 :         // Wait for the uploads to finish
    2211            2 :         client.wait_completion().await.unwrap();
    2212            2 :         {
    2213            2 :             let mut guard = client.upload_queue.lock().unwrap();
    2214            2 :             let upload_queue = guard.initialized_mut().unwrap();
    2215            2 : 
    2216            2 :             assert!(upload_queue.queued_operations.is_empty());
    2217            2 :             assert!(upload_queue.inprogress_tasks.is_empty());
    2218            2 :         }
    2219            2 : 
    2220            2 :         // Download back the index.json, and check that the list of files is correct
    2221            2 :         let index_part = match client
    2222            2 :             .download_index_file(&CancellationToken::new())
    2223            6 :             .await
    2224            2 :             .unwrap()
    2225            2 :         {
    2226            2 :             MaybeDeletedIndexPart::IndexPart(index_part) => index_part,
    2227            2 :             MaybeDeletedIndexPart::Deleted(_) => panic!("unexpectedly got deleted index part"),
    2228            2 :         };
    2229            2 : 
    2230            2 :         assert_file_list(
    2231            2 :             &index_part
    2232            2 :                 .layer_metadata
    2233            2 :                 .keys()
    2234            6 :                 .map(|f| f.to_owned())
    2235            2 :                 .collect(),
    2236            2 :             &[
    2237            2 :                 &initial_layer.to_string(),
    2238            2 :                 &layers[0].layer_desc().layer_name().to_string(),
    2239            2 :                 &layers[1].layer_desc().layer_name().to_string(),
    2240            2 :             ],
    2241            2 :         );
    2242            2 :         assert_eq!(index_part.metadata, metadata);
    2243            2 : 
    2244            2 :         // Schedule upload and then a deletion. Check that the deletion is queued
    2245            2 :         client
    2246            2 :             .schedule_layer_file_upload(layers[2].clone())
    2247            2 :             .unwrap();
    2248            2 : 
    2249            2 :         // this is no longer consistent with how deletion works with Layer::drop, but in this test
    2250            2 :         // keep using schedule_layer_file_deletion because we don't have a way to wait for the
    2251            2 :         // spawn_blocking started by the drop.
    2252            2 :         client
    2253            2 :             .schedule_layer_file_deletion(&[layers[0].layer_desc().layer_name()])
    2254            2 :             .unwrap();
    2255            2 :         {
    2256            2 :             let mut guard = client.upload_queue.lock().unwrap();
    2257            2 :             let upload_queue = guard.initialized_mut().unwrap();
    2258            2 : 
    2259            2 :             // Deletion schedules upload of the index file, and the file deletion itself
    2260            2 :             assert_eq!(upload_queue.queued_operations.len(), 2);
    2261            2 :             assert_eq!(upload_queue.inprogress_tasks.len(), 1);
    2262            2 :             assert_eq!(upload_queue.num_inprogress_layer_uploads, 1);
    2263            2 :             assert_eq!(upload_queue.num_inprogress_deletions, 0);
    2264            2 :             assert_eq!(
    2265            2 :                 upload_queue.latest_files_changes_since_metadata_upload_scheduled,
    2266            2 :                 0
    2267            2 :             );
    2268            2 :         }
    2269            2 :         assert_remote_files(
    2270            2 :             &[
    2271            2 :                 &initial_layer.to_string(),
    2272            2 :                 &layers[0].layer_desc().layer_name().to_string(),
    2273            2 :                 &layers[1].layer_desc().layer_name().to_string(),
    2274            2 :                 "index_part.json",
    2275            2 :             ],
    2276            2 :             &remote_timeline_dir,
    2277            2 :             generation,
    2278            2 :         );
    2279            2 : 
    2280            2 :         // Finish them
    2281            2 :         client.wait_completion().await.unwrap();
    2282            2 :         harness.deletion_queue.pump().await;
    2283            2 : 
    2284            2 :         assert_remote_files(
    2285            2 :             &[
    2286            2 :                 &initial_layer.to_string(),
    2287            2 :                 &layers[1].layer_desc().layer_name().to_string(),
    2288            2 :                 &layers[2].layer_desc().layer_name().to_string(),
    2289            2 :                 "index_part.json",
    2290            2 :             ],
    2291            2 :             &remote_timeline_dir,
    2292            2 :             generation,
    2293            2 :         );
    2294            2 :     }
    2295              : 
    2296              :     #[tokio::test]
    2297            2 :     async fn bytes_unfinished_gauge_for_layer_file_uploads() {
    2298            2 :         // Setup
    2299            2 : 
    2300            2 :         let TestSetup {
    2301            2 :             harness,
    2302            2 :             tenant: _tenant,
    2303            2 :             timeline,
    2304            2 :             ..
    2305           13 :         } = TestSetup::new("metrics").await.unwrap();
    2306            2 :         let client = timeline.remote_client.as_ref().unwrap();
    2307            2 : 
    2308            2 :         let layer_file_name_1: LayerName = "000000000000000000000000000000000000-FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF__00000000016B59D8-00000000016B5A51".parse().unwrap();
    2309            2 :         let local_path = local_layer_path(
    2310            2 :             harness.conf,
    2311            2 :             &timeline.tenant_shard_id,
    2312            2 :             &timeline.timeline_id,
    2313            2 :             &layer_file_name_1,
    2314            2 :             &harness.generation,
    2315            2 :         );
    2316            2 :         let content_1 = dummy_contents("foo");
    2317            2 :         std::fs::write(&local_path, &content_1).unwrap();
    2318            2 : 
    2319            2 :         let layer_file_1 = Layer::for_resident(
    2320            2 :             harness.conf,
    2321            2 :             &timeline,
    2322            2 :             local_path,
    2323            2 :             layer_file_name_1.clone(),
    2324            2 :             LayerFileMetadata::new(content_1.len() as u64, harness.generation, harness.shard),
    2325            2 :         );
    2326            2 : 
    2327            2 :         #[derive(Debug, PartialEq, Clone, Copy)]
    2328            2 :         struct BytesStartedFinished {
    2329            2 :             started: Option<usize>,
    2330            2 :             finished: Option<usize>,
    2331            2 :         }
    2332            2 :         impl std::ops::Add for BytesStartedFinished {
    2333            2 :             type Output = Self;
    2334            4 :             fn add(self, rhs: Self) -> Self::Output {
    2335            4 :                 Self {
    2336            4 :                     started: self.started.map(|v| v + rhs.started.unwrap_or(0)),
    2337            4 :                     finished: self.finished.map(|v| v + rhs.finished.unwrap_or(0)),
    2338            4 :                 }
    2339            4 :             }
    2340            2 :         }
    2341            6 :         let get_bytes_started_stopped = || {
    2342            6 :             let started = client
    2343            6 :                 .metrics
    2344            6 :                 .get_bytes_started_counter_value(&RemoteOpFileKind::Layer, &RemoteOpKind::Upload)
    2345            6 :                 .map(|v| v.try_into().unwrap());
    2346            6 :             let stopped = client
    2347            6 :                 .metrics
    2348            6 :                 .get_bytes_finished_counter_value(&RemoteOpFileKind::Layer, &RemoteOpKind::Upload)
    2349            6 :                 .map(|v| v.try_into().unwrap());
    2350            6 :             BytesStartedFinished {
    2351            6 :                 started,
    2352            6 :                 finished: stopped,
    2353            6 :             }
    2354            6 :         };
    2355            2 : 
    2356            2 :         // Test
    2357            2 :         tracing::info!("now doing actual test");
    2358            2 : 
    2359            2 :         let actual_a = get_bytes_started_stopped();
    2360            2 : 
    2361            2 :         client
    2362            2 :             .schedule_layer_file_upload(layer_file_1.clone())
    2363            2 :             .unwrap();
    2364            2 : 
    2365            2 :         let actual_b = get_bytes_started_stopped();
    2366            2 : 
    2367            2 :         client.wait_completion().await.unwrap();
    2368            2 : 
    2369            2 :         let actual_c = get_bytes_started_stopped();
    2370            2 : 
    2371            2 :         // Validate
    2372            2 : 
    2373            2 :         let expected_b = actual_a
    2374            2 :             + BytesStartedFinished {
    2375            2 :                 started: Some(content_1.len()),
    2376            2 :                 // assert that the _finished metric is created eagerly so that subtractions work on first sample
    2377            2 :                 finished: Some(0),
    2378            2 :             };
    2379            2 :         assert_eq!(actual_b, expected_b);
    2380            2 : 
    2381            2 :         let expected_c = actual_a
    2382            2 :             + BytesStartedFinished {
    2383            2 :                 started: Some(content_1.len()),
    2384            2 :                 finished: Some(content_1.len()),
    2385            2 :             };
    2386            2 :         assert_eq!(actual_c, expected_c);
    2387            2 :     }
    2388              : 
    2389           12 :     async fn inject_index_part(test_state: &TestSetup, generation: Generation) -> IndexPart {
    2390           12 :         // An empty IndexPart, just sufficient to ensure deserialization will succeed
    2391           12 :         let example_index_part = IndexPart::example();
    2392           12 : 
    2393           12 :         let index_part_bytes = serde_json::to_vec(&example_index_part).unwrap();
    2394           12 : 
    2395           12 :         let index_path = test_state.harness.remote_fs_dir.join(
    2396           12 :             remote_index_path(
    2397           12 :                 &test_state.harness.tenant_shard_id,
    2398           12 :                 &TIMELINE_ID,
    2399           12 :                 generation,
    2400           12 :             )
    2401           12 :             .get_path(),
    2402           12 :         );
    2403           12 : 
    2404           12 :         std::fs::create_dir_all(index_path.parent().unwrap())
    2405           12 :             .expect("creating test dir should work");
    2406           12 : 
    2407           12 :         eprintln!("Writing {index_path}");
    2408           12 :         std::fs::write(&index_path, index_part_bytes).unwrap();
    2409           12 :         example_index_part
    2410           12 :     }
    2411              : 
    2412              :     /// Assert that when a RemoteTimelineclient in generation `get_generation` fetches its
    2413              :     /// index, the IndexPart returned is equal to `expected`
    2414           10 :     async fn assert_got_index_part(
    2415           10 :         test_state: &TestSetup,
    2416           10 :         get_generation: Generation,
    2417           10 :         expected: &IndexPart,
    2418           10 :     ) {
    2419           10 :         let client = test_state.build_client(get_generation);
    2420              : 
    2421           10 :         let download_r = client
    2422           10 :             .download_index_file(&CancellationToken::new())
    2423           53 :             .await
    2424           10 :             .expect("download should always succeed");
    2425           10 :         assert!(matches!(download_r, MaybeDeletedIndexPart::IndexPart(_)));
    2426           10 :         match download_r {
    2427           10 :             MaybeDeletedIndexPart::IndexPart(index_part) => {
    2428           10 :                 assert_eq!(&index_part, expected);
    2429              :             }
    2430            0 :             MaybeDeletedIndexPart::Deleted(_index_part) => panic!("Test doesn't set deleted_at"),
    2431              :         }
    2432           10 :     }
    2433              : 
    2434              :     #[tokio::test]
    2435            2 :     async fn index_part_download_simple() -> anyhow::Result<()> {
    2436           14 :         let test_state = TestSetup::new("index_part_download_simple").await.unwrap();
    2437            2 :         let span = test_state.span();
    2438            2 :         let _guard = span.enter();
    2439            2 : 
    2440            2 :         // Simple case: we are in generation N, load the index from generation N - 1
    2441            2 :         let generation_n = 5;
    2442            2 :         let injected = inject_index_part(&test_state, Generation::new(generation_n - 1)).await;
    2443            2 : 
    2444           10 :         assert_got_index_part(&test_state, Generation::new(generation_n), &injected).await;
    2445            2 : 
    2446            2 :         Ok(())
    2447            2 :     }
    2448              : 
    2449              :     #[tokio::test]
    2450            2 :     async fn index_part_download_ordering() -> anyhow::Result<()> {
    2451            2 :         let test_state = TestSetup::new("index_part_download_ordering")
    2452           14 :             .await
    2453            2 :             .unwrap();
    2454            2 : 
    2455            2 :         let span = test_state.span();
    2456            2 :         let _guard = span.enter();
    2457            2 : 
    2458            2 :         // A generation-less IndexPart exists in the bucket, we should find it
    2459            2 :         let generation_n = 5;
    2460            2 :         let injected_none = inject_index_part(&test_state, Generation::none()).await;
    2461           14 :         assert_got_index_part(&test_state, Generation::new(generation_n), &injected_none).await;
    2462            2 : 
    2463            2 :         // If a more recent-than-none generation exists, we should prefer to load that
    2464            2 :         let injected_1 = inject_index_part(&test_state, Generation::new(1)).await;
    2465           14 :         assert_got_index_part(&test_state, Generation::new(generation_n), &injected_1).await;
    2466            2 : 
    2467            2 :         // If a more-recent-than-me generation exists, we should ignore it.
    2468            2 :         let _injected_10 = inject_index_part(&test_state, Generation::new(10)).await;
    2469            7 :         assert_got_index_part(&test_state, Generation::new(generation_n), &injected_1).await;
    2470            2 : 
    2471            2 :         // If a directly previous generation exists, _and_ an index exists in my own
    2472            2 :         // generation, I should prefer my own generation.
    2473            2 :         let _injected_prev =
    2474            2 :             inject_index_part(&test_state, Generation::new(generation_n - 1)).await;
    2475            2 :         let injected_current = inject_index_part(&test_state, Generation::new(generation_n)).await;
    2476            2 :         assert_got_index_part(
    2477            2 :             &test_state,
    2478            2 :             Generation::new(generation_n),
    2479            2 :             &injected_current,
    2480            2 :         )
    2481            8 :         .await;
    2482            2 : 
    2483            2 :         Ok(())
    2484            2 :     }
    2485              : }
        

Generated by: LCOV version 2.1-beta