LCOV - code coverage report
Current view: top level - libs/remote_storage/src - lib.rs (source / functions) Coverage Total Hit
Test: 553e39c2773e5840c720c90d86e56f89a4330d43.info Lines: 79.2 % 427 338
Test Date: 2025-06-13 20:01:21 Functions: 57.3 % 131 75

            Line data    Source code
       1              : //! A set of generic storage abstractions for the page server to use when backing up and restoring its state from the external storage.
       2              : //! No other modules from this tree are supposed to be used directly by the external code.
       3              : //!
       4              : //! [`RemoteStorage`] trait a CRUD-like generic abstraction to use for adapting external storages with a few implementations:
       5              : //!   * [`local_fs`] allows to use local file system as an external storage
       6              : //!   * [`s3_bucket`] uses AWS S3 bucket as an external storage
       7              : //!   * [`azure_blob`] allows to use Azure Blob storage as an external storage
       8              : //!
       9              : #![deny(unsafe_code)]
      10              : #![deny(clippy::undocumented_unsafe_blocks)]
      11              : 
      12              : mod azure_blob;
      13              : mod config;
      14              : mod error;
      15              : mod local_fs;
      16              : mod metrics;
      17              : mod s3_bucket;
      18              : mod simulate_failures;
      19              : mod support;
      20              : 
      21              : use std::collections::HashMap;
      22              : use std::fmt::Debug;
      23              : use std::num::NonZeroU32;
      24              : use std::ops::Bound;
      25              : use std::pin::{Pin, pin};
      26              : use std::sync::Arc;
      27              : use std::time::SystemTime;
      28              : 
      29              : use anyhow::Context;
      30              : /// Azure SDK's ETag type is a simple String wrapper: we use this internally instead of repeating it here.
      31              : pub use azure_core::Etag;
      32              : use bytes::Bytes;
      33              : use camino::{Utf8Path, Utf8PathBuf};
      34              : pub use error::{DownloadError, TimeTravelError, TimeoutOrCancel};
      35              : use futures::StreamExt;
      36              : use futures::stream::Stream;
      37              : use itertools::Itertools as _;
      38              : use s3_bucket::RequestKind;
      39              : use serde::{Deserialize, Serialize};
      40              : use tokio::sync::Semaphore;
      41              : use tokio_util::sync::CancellationToken;
      42              : use tracing::info;
      43              : 
      44              : pub use self::azure_blob::AzureBlobStorage;
      45              : pub use self::local_fs::LocalFs;
      46              : pub use self::s3_bucket::S3Bucket;
      47              : pub use self::simulate_failures::UnreliableWrapper;
      48              : pub use crate::config::{AzureConfig, RemoteStorageConfig, RemoteStorageKind, S3Config};
      49              : 
      50              : /// Default concurrency limit for S3 operations
      51              : ///
      52              : /// Currently, sync happens with AWS S3, that has two limits on requests per second:
      53              : /// ~200 RPS for IAM services
      54              : /// <https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html>
      55              : /// ~3500 PUT/COPY/POST/DELETE or 5500 GET/HEAD S3 requests
      56              : /// <https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/>
      57              : pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100;
      58              : /// Set this limit analogously to the S3 limit
      59              : ///
      60              : /// Here, a limit of max 20k concurrent connections was noted.
      61              : /// <https://learn.microsoft.com/en-us/answers/questions/1301863/is-there-any-limitation-to-concurrent-connections>
      62              : pub const DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT: usize = 100;
      63              : /// Set this limit analogously to the S3 limit.
      64              : ///
      65              : /// The local filesystem backend doesn't enforce a concurrency limit itself, but this also bounds
      66              : /// the upload queue concurrency. Some tests create thousands of uploads, which slows down the
      67              : /// quadratic scheduling of the upload queue, and there is no point spawning so many Tokio tasks.
      68              : pub const DEFAULT_REMOTE_STORAGE_LOCALFS_CONCURRENCY_LIMIT: usize = 100;
      69              : /// No limits on the client side, which currenltly means 1000 for AWS S3.
      70              : /// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_RequestSyntax>
      71              : pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
      72              : 
      73              : /// As defined in S3 docs
      74              : ///
      75              : /// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html>
      76              : pub const MAX_KEYS_PER_DELETE_S3: usize = 1000;
      77              : 
      78              : /// As defined in Azure docs
      79              : ///
      80              : /// <https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch>
      81              : pub const MAX_KEYS_PER_DELETE_AZURE: usize = 256;
      82              : 
      83              : const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
      84              : 
      85              : /// Path on the remote storage, relative to some inner prefix.
      86              : /// The prefix is an implementation detail, that allows representing local paths
      87              : /// as the remote ones, stripping the local storage prefix away.
      88              : #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
      89              : pub struct RemotePath(Utf8PathBuf);
      90              : 
      91              : impl Serialize for RemotePath {
      92            0 :     fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
      93            0 :     where
      94            0 :         S: serde::Serializer,
      95            0 :     {
      96            0 :         serializer.collect_str(self)
      97            0 :     }
      98              : }
      99              : 
     100              : impl<'de> Deserialize<'de> for RemotePath {
     101            0 :     fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
     102            0 :     where
     103            0 :         D: serde::Deserializer<'de>,
     104            0 :     {
     105            0 :         let str = String::deserialize(deserializer)?;
     106            0 :         Ok(Self(Utf8PathBuf::from(&str)))
     107            0 :     }
     108              : }
     109              : 
     110              : impl std::fmt::Display for RemotePath {
     111          536 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     112          536 :         std::fmt::Display::fmt(&self.0, f)
     113          536 :     }
     114              : }
     115              : 
     116              : impl RemotePath {
     117         3284 :     pub fn new(relative_path: &Utf8Path) -> anyhow::Result<Self> {
     118         3284 :         anyhow::ensure!(
     119         3284 :             relative_path.is_relative(),
     120            4 :             "Path {relative_path:?} is not relative"
     121              :         );
     122         3280 :         Ok(Self(relative_path.to_path_buf()))
     123         3284 :     }
     124              : 
     125         2782 :     pub fn from_string(relative_path: &str) -> anyhow::Result<Self> {
     126         2782 :         Self::new(Utf8Path::new(relative_path))
     127         2782 :     }
     128              : 
     129         3063 :     pub fn with_base(&self, base_path: &Utf8Path) -> Utf8PathBuf {
     130         3063 :         base_path.join(&self.0)
     131         3063 :     }
     132              : 
     133           15 :     pub fn object_name(&self) -> Option<&str> {
     134           15 :         self.0.file_name()
     135           15 :     }
     136              : 
     137           84 :     pub fn join(&self, path: impl AsRef<Utf8Path>) -> Self {
     138           84 :         Self(self.0.join(path))
     139           84 :     }
     140              : 
     141         1018 :     pub fn get_path(&self) -> &Utf8PathBuf {
     142         1018 :         &self.0
     143         1018 :     }
     144              : 
     145           50 :     pub fn strip_prefix(&self, p: &RemotePath) -> Result<&Utf8Path, std::path::StripPrefixError> {
     146           50 :         self.0.strip_prefix(&p.0)
     147           50 :     }
     148              : 
     149          124 :     pub fn add_trailing_slash(&self) -> Self {
     150          124 :         // Unwrap safety inputs are guararnteed to be valid UTF-8
     151          124 :         Self(format!("{}/", self.0).try_into().unwrap())
     152          124 :     }
     153              : }
     154              : 
     155              : /// We don't need callers to be able to pass arbitrary delimiters: just control
     156              : /// whether listings will use a '/' separator or not.
     157              : ///
     158              : /// The WithDelimiter mode will populate `prefixes` and `keys` in the result.  The
     159              : /// NoDelimiter mode will only populate `keys`.
     160              : #[derive(Copy, Clone)]
     161              : pub enum ListingMode {
     162              :     WithDelimiter,
     163              :     NoDelimiter,
     164              : }
     165              : 
     166              : #[derive(PartialEq, Eq, Debug, Clone)]
     167              : pub struct ListingObject {
     168              :     pub key: RemotePath,
     169              :     pub last_modified: SystemTime,
     170              :     pub size: u64,
     171              : }
     172              : 
     173              : #[derive(Default)]
     174              : pub struct Listing {
     175              :     pub prefixes: Vec<RemotePath>,
     176              :     pub keys: Vec<ListingObject>,
     177              : }
     178              : 
     179              : #[derive(Default)]
     180              : pub struct VersionListing {
     181              :     pub versions: Vec<Version>,
     182              : }
     183              : 
     184              : pub struct Version {
     185              :     pub key: RemotePath,
     186              :     pub last_modified: SystemTime,
     187              :     pub kind: VersionKind,
     188              : }
     189              : 
     190              : impl Version {
     191           36 :     pub fn version_id(&self) -> Option<&VersionId> {
     192           36 :         match &self.kind {
     193           28 :             VersionKind::Version(id) => Some(id),
     194            8 :             VersionKind::DeletionMarker => None,
     195              :         }
     196           36 :     }
     197              : }
     198              : 
     199              : #[derive(Debug)]
     200              : pub enum VersionKind {
     201              :     DeletionMarker,
     202              :     Version(VersionId),
     203              : }
     204              : 
     205              : /// Options for downloads. The default value is a plain GET.
     206              : pub struct DownloadOpts {
     207              :     /// If given, returns [`DownloadError::Unmodified`] if the object still has
     208              :     /// the same ETag (using If-None-Match).
     209              :     pub etag: Option<Etag>,
     210              :     /// The start of the byte range to download, or unbounded.
     211              :     pub byte_start: Bound<u64>,
     212              :     /// The end of the byte range to download, or unbounded. Must be after the
     213              :     /// start bound.
     214              :     pub byte_end: Bound<u64>,
     215              :     /// Optionally request a specific version of a key
     216              :     pub version_id: Option<VersionId>,
     217              :     /// Indicate whether we're downloading something small or large: this indirectly controls
     218              :     /// timeouts: for something like an index/manifest/heatmap, we should time out faster than
     219              :     /// for layer files
     220              :     pub kind: DownloadKind,
     221              : }
     222              : 
     223              : pub enum DownloadKind {
     224              :     Large,
     225              :     Small,
     226              : }
     227              : 
     228              : #[derive(Debug, Clone)]
     229              : pub struct VersionId(pub String);
     230              : 
     231              : impl Default for DownloadOpts {
     232          541 :     fn default() -> Self {
     233          541 :         Self {
     234          541 :             etag: Default::default(),
     235          541 :             byte_start: Bound::Unbounded,
     236          541 :             byte_end: Bound::Unbounded,
     237          541 :             version_id: None,
     238          541 :             kind: DownloadKind::Large,
     239          541 :         }
     240          541 :     }
     241              : }
     242              : 
     243              : impl DownloadOpts {
     244              :     /// Returns the byte range with inclusive start and exclusive end, or None
     245              :     /// if unbounded.
     246          214 :     pub fn byte_range(&self) -> Option<(u64, Option<u64>)> {
     247          214 :         if self.byte_start == Bound::Unbounded && self.byte_end == Bound::Unbounded {
     248          118 :             return None;
     249           96 :         }
     250           96 :         let start = match self.byte_start {
     251           18 :             Bound::Excluded(i) => i + 1,
     252           60 :             Bound::Included(i) => i,
     253           18 :             Bound::Unbounded => 0,
     254              :         };
     255           96 :         let end = match self.byte_end {
     256           48 :             Bound::Excluded(i) => Some(i),
     257           27 :             Bound::Included(i) => Some(i + 1),
     258           21 :             Bound::Unbounded => None,
     259              :         };
     260           96 :         if let Some(end) = end {
     261           75 :             assert!(start < end, "range end {end} at or before start {start}");
     262           21 :         }
     263           87 :         Some((start, end))
     264          205 :     }
     265              : 
     266              :     /// Returns the byte range as an RFC 2616 Range header value with inclusive
     267              :     /// bounds, or None if unbounded.
     268           66 :     pub fn byte_range_header(&self) -> Option<String> {
     269           66 :         self.byte_range()
     270           66 :             .map(|(start, end)| (start, end.map(|end| end - 1))) // make end inclusive
     271           66 :             .map(|(start, end)| match end {
     272           30 :                 Some(end) => format!("bytes={start}-{end}"),
     273           10 :                 None => format!("bytes={start}-"),
     274           66 :             })
     275           66 :     }
     276              : }
     277              : 
     278              : /// Storage (potentially remote) API to manage its state.
     279              : /// This storage tries to be unaware of any layered repository context,
     280              : /// providing basic CRUD operations for storage files.
     281              : #[allow(async_fn_in_trait)]
     282              : pub trait RemoteStorage: Send + Sync + 'static {
     283              :     /// List objects in remote storage, with semantics matching AWS S3's [`ListObjectsV2`].
     284              :     ///
     285              :     /// The stream is guaranteed to return at least one element, even in the case of errors
     286              :     /// (in that case it's an `Err()`), or an empty `Listing`.
     287              :     ///
     288              :     /// The stream is not ending if it returns an error, as long as [`is_permanent`] returns false on the error.
     289              :     /// The `next` function can be retried, and maybe in a future retry, there will be success.
     290              :     ///
     291              :     /// Note that the prefix is relative to any `prefix_in_bucket` configured for the client, not
     292              :     /// from the absolute root of the bucket.
     293              :     ///
     294              :     /// `mode` configures whether to use a delimiter.  Without a delimiter, all keys
     295              :     /// within the prefix are listed in the `keys` of the result.  With a delimiter, any "directories" at the top level of
     296              :     /// the prefix are returned in the `prefixes` of the result, and keys in the top level of the prefix are
     297              :     /// returned in `keys` ().
     298              :     ///
     299              :     /// `max_keys` controls the maximum number of keys that will be returned.  If this is None, this function
     300              :     /// will iteratively call listobjects until it runs out of keys.  Note that this is not safe to use on
     301              :     /// unlimted size buckets, as the full list of objects is allocated into a monolithic data structure.
     302              :     ///
     303              :     /// [`ListObjectsV2`]: <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html>
     304              :     /// [`is_permanent`]: DownloadError::is_permanent
     305              :     fn list_streaming(
     306              :         &self,
     307              :         prefix: Option<&RemotePath>,
     308              :         mode: ListingMode,
     309              :         max_keys: Option<NonZeroU32>,
     310              :         cancel: &CancellationToken,
     311              :     ) -> impl Stream<Item = Result<Listing, DownloadError>> + Send;
     312              : 
     313           69 :     async fn list(
     314           69 :         &self,
     315           69 :         prefix: Option<&RemotePath>,
     316           69 :         mode: ListingMode,
     317           69 :         max_keys: Option<NonZeroU32>,
     318           69 :         cancel: &CancellationToken,
     319           69 :     ) -> Result<Listing, DownloadError> {
     320           69 :         let mut stream = pin!(self.list_streaming(prefix, mode, max_keys, cancel));
     321           69 :         let mut combined = stream.next().await.expect("At least one item required")?;
     322          117 :         while let Some(list) = stream.next().await {
     323           48 :             let list = list?;
     324           48 :             combined.keys.extend(list.keys.into_iter());
     325           48 :             combined.prefixes.extend_from_slice(&list.prefixes);
     326              :         }
     327           69 :         Ok(combined)
     328           69 :     }
     329              : 
     330              :     async fn list_versions(
     331              :         &self,
     332              :         prefix: Option<&RemotePath>,
     333              :         mode: ListingMode,
     334              :         max_keys: Option<NonZeroU32>,
     335              :         cancel: &CancellationToken,
     336              :     ) -> Result<VersionListing, DownloadError>;
     337              : 
     338              :     /// Obtain metadata information about an object.
     339              :     async fn head_object(
     340              :         &self,
     341              :         key: &RemotePath,
     342              :         cancel: &CancellationToken,
     343              :     ) -> Result<ListingObject, DownloadError>;
     344              : 
     345              :     /// Streams the local file contents into remote into the remote storage entry.
     346              :     ///
     347              :     /// If the operation fails because of timeout or cancellation, the root cause of the error will be
     348              :     /// set to `TimeoutOrCancel`.
     349              :     async fn upload(
     350              :         &self,
     351              :         from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
     352              :         // S3 PUT request requires the content length to be specified,
     353              :         // otherwise it starts to fail with the concurrent connection count increasing.
     354              :         data_size_bytes: usize,
     355              :         to: &RemotePath,
     356              :         metadata: Option<StorageMetadata>,
     357              :         cancel: &CancellationToken,
     358              :     ) -> anyhow::Result<()>;
     359              : 
     360              :     /// Streams the remote storage entry contents.
     361              :     ///
     362              :     /// The returned download stream will obey initial timeout and cancellation signal by erroring
     363              :     /// on whichever happens first. Only one of the reasons will fail the stream, which is usually
     364              :     /// enough for `tokio::io::copy_buf` usage. If needed the error can be filtered out.
     365              :     ///
     366              :     /// Returns the metadata, if any was stored with the file previously.
     367              :     async fn download(
     368              :         &self,
     369              :         from: &RemotePath,
     370              :         opts: &DownloadOpts,
     371              :         cancel: &CancellationToken,
     372              :     ) -> Result<Download, DownloadError>;
     373              : 
     374              :     /// Delete a single path from remote storage.
     375              :     ///
     376              :     /// If the operation fails because of timeout or cancellation, the root cause of the error will be
     377              :     /// set to `TimeoutOrCancel`. In such situation it is unknown if the deletion went through.
     378              :     async fn delete(&self, path: &RemotePath, cancel: &CancellationToken) -> anyhow::Result<()>;
     379              : 
     380              :     /// Delete a multiple paths from remote storage.
     381              :     ///
     382              :     /// If the operation fails because of timeout or cancellation, the root cause of the error will be
     383              :     /// set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
     384              :     /// through.
     385              :     async fn delete_objects(
     386              :         &self,
     387              :         paths: &[RemotePath],
     388              :         cancel: &CancellationToken,
     389              :     ) -> anyhow::Result<()>;
     390              : 
     391              :     /// Returns the maximum number of keys that a call to [`Self::delete_objects`] can delete without chunking
     392              :     ///
     393              :     /// The value returned is only an optimization hint, One can pass larger number of objects to
     394              :     /// `delete_objects` as well.
     395              :     ///
     396              :     /// The value is guaranteed to be >= 1.
     397              :     fn max_keys_per_delete(&self) -> usize;
     398              : 
     399              :     /// Deletes all objects matching the given prefix.
     400              :     ///
     401              :     /// NB: this uses NoDelimiter and will match partial prefixes. For example, the prefix /a/b will
     402              :     /// delete /a/b, /a/b/*, /a/bc, /a/bc/*, etc.
     403              :     ///
     404              :     /// If the operation fails because of timeout or cancellation, the root cause of the error will
     405              :     /// be set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
     406              :     /// through.
     407           24 :     async fn delete_prefix(
     408           24 :         &self,
     409           24 :         prefix: &RemotePath,
     410           24 :         cancel: &CancellationToken,
     411           24 :     ) -> anyhow::Result<()> {
     412           24 :         let mut stream =
     413           24 :             pin!(self.list_streaming(Some(prefix), ListingMode::NoDelimiter, None, cancel));
     414           51 :         while let Some(result) = stream.next().await {
     415           27 :             let keys = match result {
     416           27 :                 Ok(listing) if listing.keys.is_empty() => continue,
     417           73 :                 Ok(listing) => listing.keys.into_iter().map(|o| o.key).collect_vec(),
     418            0 :                 Err(DownloadError::Cancelled) => return Err(TimeoutOrCancel::Cancel.into()),
     419            0 :                 Err(DownloadError::Timeout) => return Err(TimeoutOrCancel::Timeout.into()),
     420            0 :                 Err(err) => return Err(err.into()),
     421              :             };
     422           18 :             tracing::info!("Deleting {} keys from remote storage", keys.len());
     423           18 :             self.delete_objects(&keys, cancel).await?;
     424              :         }
     425           24 :         Ok(())
     426            0 :     }
     427              : 
     428              :     /// Copy a remote object inside a bucket from one path to another.
     429              :     async fn copy(
     430              :         &self,
     431              :         from: &RemotePath,
     432              :         to: &RemotePath,
     433              :         cancel: &CancellationToken,
     434              :     ) -> anyhow::Result<()>;
     435              : 
     436              :     /// Resets the content of everything with the given prefix to the given state
     437              :     async fn time_travel_recover(
     438              :         &self,
     439              :         prefix: Option<&RemotePath>,
     440              :         timestamp: SystemTime,
     441              :         done_if_after: SystemTime,
     442              :         cancel: &CancellationToken,
     443              :         complexity_limit: Option<NonZeroU32>,
     444              :     ) -> Result<(), TimeTravelError>;
     445              : }
     446              : 
     447              : /// Data part of an ongoing [`Download`].
     448              : ///
     449              : /// `DownloadStream` is sensitive to the timeout and cancellation used with the original
     450              : /// [`RemoteStorage::download`] request. The type yields `std::io::Result<Bytes>` to be compatible
     451              : /// with `tokio::io::copy_buf`.
     452              : // This has 'static because safekeepers do not use cancellation tokens (yet)
     453              : pub type DownloadStream =
     454              :     Pin<Box<dyn Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static>>;
     455              : 
     456              : pub struct Download {
     457              :     pub download_stream: DownloadStream,
     458              :     /// The last time the file was modified (`last-modified` HTTP header)
     459              :     pub last_modified: SystemTime,
     460              :     /// A way to identify this specific version of the resource (`etag` HTTP header)
     461              :     pub etag: Etag,
     462              :     /// Extra key-value data, associated with the current remote file.
     463              :     pub metadata: Option<StorageMetadata>,
     464              : }
     465              : 
     466              : impl Debug for Download {
     467            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     468            0 :         f.debug_struct("Download")
     469            0 :             .field("metadata", &self.metadata)
     470            0 :             .finish()
     471            0 :     }
     472              : }
     473              : 
     474              : /// Every storage, currently supported.
     475              : /// Serves as a simple way to pass around the [`RemoteStorage`] without dealing with generics.
     476              : // Require Clone for `Other` due to https://github.com/rust-lang/rust/issues/26925
     477              : #[derive(Clone)]
     478              : pub enum GenericRemoteStorage<Other: Clone = Arc<UnreliableWrapper>> {
     479              :     LocalFs(LocalFs),
     480              :     AwsS3(Arc<S3Bucket>),
     481              :     AzureBlob(Arc<AzureBlobStorage>),
     482              :     Unreliable(Other),
     483              : }
     484              : 
     485              : impl<Other: RemoteStorage> GenericRemoteStorage<Arc<Other>> {
     486              :     // See [`RemoteStorage::list`].
     487          305 :     pub async fn list(
     488          305 :         &self,
     489          305 :         prefix: Option<&RemotePath>,
     490          305 :         mode: ListingMode,
     491          305 :         max_keys: Option<NonZeroU32>,
     492          305 :         cancel: &CancellationToken,
     493          305 :     ) -> Result<Listing, DownloadError> {
     494          305 :         match self {
     495          236 :             Self::LocalFs(s) => s.list(prefix, mode, max_keys, cancel).await,
     496           50 :             Self::AwsS3(s) => s.list(prefix, mode, max_keys, cancel).await,
     497           19 :             Self::AzureBlob(s) => s.list(prefix, mode, max_keys, cancel).await,
     498            0 :             Self::Unreliable(s) => s.list(prefix, mode, max_keys, cancel).await,
     499              :         }
     500            0 :     }
     501              : 
     502              :     // See [`RemoteStorage::list_streaming`].
     503            3 :     pub fn list_streaming<'a>(
     504            3 :         &'a self,
     505            3 :         prefix: Option<&'a RemotePath>,
     506            3 :         mode: ListingMode,
     507            3 :         max_keys: Option<NonZeroU32>,
     508            3 :         cancel: &'a CancellationToken,
     509            3 :     ) -> impl Stream<Item = Result<Listing, DownloadError>> + 'a + Send {
     510            3 :         match self {
     511            0 :             Self::LocalFs(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel))
     512            0 :                 as Pin<Box<dyn Stream<Item = Result<Listing, DownloadError>> + Send>>,
     513            2 :             Self::AwsS3(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel)),
     514            1 :             Self::AzureBlob(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel)),
     515            0 :             Self::Unreliable(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel)),
     516              :         }
     517            0 :     }
     518              : 
     519              :     // See [`RemoteStorage::list_versions`].
     520            0 :     pub async fn list_versions<'a>(
     521            0 :         &'a self,
     522            0 :         prefix: Option<&'a RemotePath>,
     523            0 :         mode: ListingMode,
     524            0 :         max_keys: Option<NonZeroU32>,
     525            0 :         cancel: &'a CancellationToken,
     526            0 :     ) -> Result<VersionListing, DownloadError> {
     527            0 :         match self {
     528            0 :             Self::LocalFs(s) => s.list_versions(prefix, mode, max_keys, cancel).await,
     529            0 :             Self::AwsS3(s) => s.list_versions(prefix, mode, max_keys, cancel).await,
     530            0 :             Self::AzureBlob(s) => s.list_versions(prefix, mode, max_keys, cancel).await,
     531            0 :             Self::Unreliable(s) => s.list_versions(prefix, mode, max_keys, cancel).await,
     532              :         }
     533            0 :     }
     534              : 
     535              :     // See [`RemoteStorage::head_object`].
     536            9 :     pub async fn head_object(
     537            9 :         &self,
     538            9 :         key: &RemotePath,
     539            9 :         cancel: &CancellationToken,
     540            9 :     ) -> Result<ListingObject, DownloadError> {
     541            9 :         match self {
     542            0 :             Self::LocalFs(s) => s.head_object(key, cancel).await,
     543            6 :             Self::AwsS3(s) => s.head_object(key, cancel).await,
     544            3 :             Self::AzureBlob(s) => s.head_object(key, cancel).await,
     545            0 :             Self::Unreliable(s) => s.head_object(key, cancel).await,
     546              :         }
     547            0 :     }
     548              : 
     549              :     /// See [`RemoteStorage::upload`]
     550         2165 :     pub async fn upload(
     551         2165 :         &self,
     552         2165 :         from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
     553         2165 :         data_size_bytes: usize,
     554         2165 :         to: &RemotePath,
     555         2165 :         metadata: Option<StorageMetadata>,
     556         2165 :         cancel: &CancellationToken,
     557         2165 :     ) -> anyhow::Result<()> {
     558         2165 :         match self {
     559         1849 :             Self::LocalFs(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
     560          199 :             Self::AwsS3(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
     561           93 :             Self::AzureBlob(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
     562           24 :             Self::Unreliable(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
     563              :         }
     564            0 :     }
     565              : 
     566              :     /// See [`RemoteStorage::download`]
     567          473 :     pub async fn download(
     568          473 :         &self,
     569          473 :         from: &RemotePath,
     570          473 :         opts: &DownloadOpts,
     571          473 :         cancel: &CancellationToken,
     572          473 :     ) -> Result<Download, DownloadError> {
     573          473 :         match self {
     574          429 :             Self::LocalFs(s) => s.download(from, opts, cancel).await,
     575           33 :             Self::AwsS3(s) => s.download(from, opts, cancel).await,
     576           11 :             Self::AzureBlob(s) => s.download(from, opts, cancel).await,
     577            0 :             Self::Unreliable(s) => s.download(from, opts, cancel).await,
     578              :         }
     579            0 :     }
     580              : 
     581              :     /// See [`RemoteStorage::delete`]
     582          477 :     pub async fn delete(
     583          477 :         &self,
     584          477 :         path: &RemotePath,
     585          477 :         cancel: &CancellationToken,
     586          477 :     ) -> anyhow::Result<()> {
     587          477 :         match self {
     588          217 :             Self::LocalFs(s) => s.delete(path, cancel).await,
     589          174 :             Self::AwsS3(s) => s.delete(path, cancel).await,
     590           86 :             Self::AzureBlob(s) => s.delete(path, cancel).await,
     591            0 :             Self::Unreliable(s) => s.delete(path, cancel).await,
     592              :         }
     593            0 :     }
     594              : 
     595              :     /// See [`RemoteStorage::delete_objects`]
     596           18 :     pub async fn delete_objects(
     597           18 :         &self,
     598           18 :         paths: &[RemotePath],
     599           18 :         cancel: &CancellationToken,
     600           18 :     ) -> anyhow::Result<()> {
     601           18 :         match self {
     602            3 :             Self::LocalFs(s) => s.delete_objects(paths, cancel).await,
     603           12 :             Self::AwsS3(s) => s.delete_objects(paths, cancel).await,
     604            3 :             Self::AzureBlob(s) => s.delete_objects(paths, cancel).await,
     605            0 :             Self::Unreliable(s) => s.delete_objects(paths, cancel).await,
     606              :         }
     607            0 :     }
     608              : 
     609              :     /// [`RemoteStorage::max_keys_per_delete`]
     610            4 :     pub fn max_keys_per_delete(&self) -> usize {
     611            4 :         match self {
     612            4 :             Self::LocalFs(s) => s.max_keys_per_delete(),
     613            0 :             Self::AwsS3(s) => s.max_keys_per_delete(),
     614            0 :             Self::AzureBlob(s) => s.max_keys_per_delete(),
     615            0 :             Self::Unreliable(s) => s.max_keys_per_delete(),
     616              :         }
     617            0 :     }
     618              : 
     619              :     /// See [`RemoteStorage::delete_prefix`]
     620           24 :     pub async fn delete_prefix(
     621           24 :         &self,
     622           24 :         prefix: &RemotePath,
     623           24 :         cancel: &CancellationToken,
     624           24 :     ) -> anyhow::Result<()> {
     625           24 :         match self {
     626            6 :             Self::LocalFs(s) => s.delete_prefix(prefix, cancel).await,
     627           12 :             Self::AwsS3(s) => s.delete_prefix(prefix, cancel).await,
     628            6 :             Self::AzureBlob(s) => s.delete_prefix(prefix, cancel).await,
     629            0 :             Self::Unreliable(s) => s.delete_prefix(prefix, cancel).await,
     630              :         }
     631            0 :     }
     632              : 
     633              :     /// See [`RemoteStorage::copy`]
     634            3 :     pub async fn copy_object(
     635            3 :         &self,
     636            3 :         from: &RemotePath,
     637            3 :         to: &RemotePath,
     638            3 :         cancel: &CancellationToken,
     639            3 :     ) -> anyhow::Result<()> {
     640            3 :         match self {
     641            0 :             Self::LocalFs(s) => s.copy(from, to, cancel).await,
     642            2 :             Self::AwsS3(s) => s.copy(from, to, cancel).await,
     643            1 :             Self::AzureBlob(s) => s.copy(from, to, cancel).await,
     644            0 :             Self::Unreliable(s) => s.copy(from, to, cancel).await,
     645              :         }
     646            0 :     }
     647              : 
     648              :     /// See [`RemoteStorage::time_travel_recover`].
     649            6 :     pub async fn time_travel_recover(
     650            6 :         &self,
     651            6 :         prefix: Option<&RemotePath>,
     652            6 :         timestamp: SystemTime,
     653            6 :         done_if_after: SystemTime,
     654            6 :         cancel: &CancellationToken,
     655            6 :         complexity_limit: Option<NonZeroU32>,
     656            6 :     ) -> Result<(), TimeTravelError> {
     657            6 :         match self {
     658            0 :             Self::LocalFs(s) => {
     659            0 :                 s.time_travel_recover(prefix, timestamp, done_if_after, cancel, complexity_limit)
     660            0 :                     .await
     661              :             }
     662            6 :             Self::AwsS3(s) => {
     663            6 :                 s.time_travel_recover(prefix, timestamp, done_if_after, cancel, complexity_limit)
     664            6 :                     .await
     665              :             }
     666            0 :             Self::AzureBlob(s) => {
     667            0 :                 s.time_travel_recover(prefix, timestamp, done_if_after, cancel, complexity_limit)
     668            0 :                     .await
     669              :             }
     670            0 :             Self::Unreliable(s) => {
     671            0 :                 s.time_travel_recover(prefix, timestamp, done_if_after, cancel, complexity_limit)
     672            0 :                     .await
     673              :             }
     674              :         }
     675            0 :     }
     676              : }
     677              : 
     678              : impl GenericRemoteStorage {
     679          162 :     pub async fn from_config(storage_config: &RemoteStorageConfig) -> anyhow::Result<Self> {
     680          162 :         let timeout = storage_config.timeout;
     681          162 : 
     682          162 :         // If somkeone overrides timeout to be small without adjusting small_timeout, then adjust it automatically
     683          162 :         let small_timeout = std::cmp::min(storage_config.small_timeout, timeout);
     684          162 : 
     685          162 :         Ok(match &storage_config.storage {
     686          126 :             RemoteStorageKind::LocalFs { local_path: path } => {
     687          126 :                 info!("Using fs root '{path}' as a remote storage");
     688          126 :                 Self::LocalFs(LocalFs::new(path.clone(), timeout)?)
     689              :             }
     690           26 :             RemoteStorageKind::AwsS3(s3_config) => {
     691           26 :                 // The profile and access key id are only printed here for debugging purposes,
     692           26 :                 // their values don't indicate the eventually taken choice for auth.
     693           26 :                 let profile = std::env::var("AWS_PROFILE").unwrap_or_else(|_| "<none>".into());
     694           26 :                 let access_key_id =
     695           26 :                     std::env::var("AWS_ACCESS_KEY_ID").unwrap_or_else(|_| "<none>".into());
     696           26 :                 info!(
     697            0 :                     "Using s3 bucket '{}' in region '{}' as a remote storage, prefix in bucket: '{:?}', bucket endpoint: '{:?}', profile: {profile}, access_key_id: {access_key_id}",
     698              :                     s3_config.bucket_name,
     699              :                     s3_config.bucket_region,
     700              :                     s3_config.prefix_in_bucket,
     701              :                     s3_config.endpoint
     702              :                 );
     703           26 :                 Self::AwsS3(Arc::new(S3Bucket::new(s3_config, timeout).await?))
     704              :             }
     705           10 :             RemoteStorageKind::AzureContainer(azure_config) => {
     706           10 :                 let storage_account = azure_config
     707           10 :                     .storage_account
     708           10 :                     .as_deref()
     709           10 :                     .unwrap_or("<AZURE_STORAGE_ACCOUNT>");
     710           10 :                 info!(
     711            0 :                     "Using azure container '{}' in account '{storage_account}' in region '{}' as a remote storage, prefix in container: '{:?}'",
     712              :                     azure_config.container_name,
     713              :                     azure_config.container_region,
     714              :                     azure_config.prefix_in_container
     715              :                 );
     716           10 :                 Self::AzureBlob(Arc::new(AzureBlobStorage::new(
     717           10 :                     azure_config,
     718           10 :                     timeout,
     719           10 :                     small_timeout,
     720           10 :                 )?))
     721              :             }
     722              :         })
     723          162 :     }
     724              : 
     725            2 :     pub fn unreliable_wrapper(s: Self, fail_first: u64) -> Self {
     726            2 :         Self::Unreliable(Arc::new(UnreliableWrapper::new(s, fail_first)))
     727            2 :     }
     728              : 
     729              :     /// See [`RemoteStorage::upload`], which this method calls with `None` as metadata.
     730          888 :     pub async fn upload_storage_object(
     731          888 :         &self,
     732          888 :         from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
     733          888 :         from_size_bytes: usize,
     734          888 :         to: &RemotePath,
     735          888 :         cancel: &CancellationToken,
     736          888 :     ) -> anyhow::Result<()> {
     737          888 :         self.upload(from, from_size_bytes, to, None, cancel)
     738          888 :             .await
     739          884 :             .with_context(|| {
     740            0 :                 format!("Failed to upload data of length {from_size_bytes} to storage path {to:?}")
     741          884 :             })
     742          884 :     }
     743              : 
     744              :     /// The name of the bucket/container/etc.
     745            0 :     pub fn bucket_name(&self) -> Option<&str> {
     746            0 :         match self {
     747            0 :             Self::LocalFs(_s) => None,
     748            0 :             Self::AwsS3(s) => Some(s.bucket_name()),
     749            0 :             Self::AzureBlob(s) => Some(s.container_name()),
     750            0 :             Self::Unreliable(_s) => None,
     751              :         }
     752            0 :     }
     753              : }
     754              : 
     755              : /// Extra set of key-value pairs that contain arbitrary metadata about the storage entry.
     756              : /// Immutable, cannot be changed once the file is created.
     757              : #[derive(Debug, Clone, PartialEq, Eq)]
     758              : pub struct StorageMetadata(HashMap<String, String>);
     759              : 
     760              : impl<const N: usize> From<[(&str, &str); N]> for StorageMetadata {
     761            0 :     fn from(arr: [(&str, &str); N]) -> Self {
     762            0 :         let map: HashMap<String, String> = arr
     763            0 :             .iter()
     764            0 :             .map(|(k, v)| (k.to_string(), v.to_string()))
     765            0 :             .collect();
     766            0 :         Self(map)
     767            0 :     }
     768              : }
     769              : 
     770              : struct ConcurrencyLimiter {
     771              :     // Every request to S3 can be throttled or cancelled, if a certain number of requests per second is exceeded.
     772              :     // Same goes to IAM, which is queried before every S3 request, if enabled. IAM has even lower RPS threshold.
     773              :     // The helps to ensure we don't exceed the thresholds.
     774              :     write: Arc<Semaphore>,
     775              :     read: Arc<Semaphore>,
     776              : }
     777              : 
     778              : impl ConcurrencyLimiter {
     779          731 :     fn for_kind(&self, kind: RequestKind) -> &Arc<Semaphore> {
     780          731 :         match kind {
     781           44 :             RequestKind::Get => &self.read,
     782          292 :             RequestKind::Put => &self.write,
     783           64 :             RequestKind::List => &self.read,
     784          287 :             RequestKind::Delete => &self.write,
     785            3 :             RequestKind::Copy => &self.write,
     786            6 :             RequestKind::TimeTravel => &self.write,
     787            9 :             RequestKind::Head => &self.read,
     788           26 :             RequestKind::ListVersions => &self.read,
     789              :         }
     790          731 :     }
     791              : 
     792          698 :     async fn acquire(
     793          698 :         &self,
     794          698 :         kind: RequestKind,
     795          698 :     ) -> Result<tokio::sync::SemaphorePermit<'_>, tokio::sync::AcquireError> {
     796          698 :         self.for_kind(kind).acquire().await
     797          698 :     }
     798              : 
     799           33 :     async fn acquire_owned(
     800           33 :         &self,
     801           33 :         kind: RequestKind,
     802           33 :     ) -> Result<tokio::sync::OwnedSemaphorePermit, tokio::sync::AcquireError> {
     803           33 :         Arc::clone(self.for_kind(kind)).acquire_owned().await
     804           33 :     }
     805              : 
     806           51 :     fn new(limit: usize) -> ConcurrencyLimiter {
     807           51 :         Self {
     808           51 :             read: Arc::new(Semaphore::new(limit)),
     809           51 :             write: Arc::new(Semaphore::new(limit)),
     810           51 :         }
     811           51 :     }
     812              : }
     813              : 
     814              : #[cfg(test)]
     815              : mod tests {
     816              :     use super::*;
     817              : 
     818              :     /// DownloadOpts::byte_range() should generate (inclusive, exclusive) ranges
     819              :     /// with optional end bound, or None when unbounded.
     820              :     #[test]
     821            3 :     fn download_opts_byte_range() {
     822            3 :         // Consider using test_case or a similar table-driven test framework.
     823            3 :         let cases = [
     824            3 :             // (byte_start, byte_end, expected)
     825            3 :             (Bound::Unbounded, Bound::Unbounded, None),
     826            3 :             (Bound::Unbounded, Bound::Included(7), Some((0, Some(8)))),
     827            3 :             (Bound::Unbounded, Bound::Excluded(7), Some((0, Some(7)))),
     828            3 :             (Bound::Included(3), Bound::Unbounded, Some((3, None))),
     829            3 :             (Bound::Included(3), Bound::Included(7), Some((3, Some(8)))),
     830            3 :             (Bound::Included(3), Bound::Excluded(7), Some((3, Some(7)))),
     831            3 :             (Bound::Excluded(3), Bound::Unbounded, Some((4, None))),
     832            3 :             (Bound::Excluded(3), Bound::Included(7), Some((4, Some(8)))),
     833            3 :             (Bound::Excluded(3), Bound::Excluded(7), Some((4, Some(7)))),
     834            3 :             // 1-sized ranges are fine, 0 aren't and will panic (separate test).
     835            3 :             (Bound::Included(3), Bound::Included(3), Some((3, Some(4)))),
     836            3 :             (Bound::Included(3), Bound::Excluded(4), Some((3, Some(4)))),
     837            3 :         ];
     838              : 
     839           36 :         for (byte_start, byte_end, expect) in cases {
     840           33 :             let opts = DownloadOpts {
     841           33 :                 byte_start,
     842           33 :                 byte_end,
     843           33 :                 ..Default::default()
     844           33 :             };
     845           33 :             let result = opts.byte_range();
     846           33 :             assert_eq!(
     847              :                 result, expect,
     848            0 :                 "byte_start={byte_start:?} byte_end={byte_end:?}"
     849              :             );
     850              : 
     851              :             // Check generated HTTP header, which uses an inclusive range.
     852           33 :             let expect_header = expect.map(|(start, end)| match end {
     853           24 :                 Some(end) => format!("bytes={start}-{}", end - 1), // inclusive end
     854            6 :                 None => format!("bytes={start}-"),
     855           33 :             });
     856           33 :             assert_eq!(
     857           33 :                 opts.byte_range_header(),
     858              :                 expect_header,
     859            0 :                 "byte_start={byte_start:?} byte_end={byte_end:?}"
     860              :             );
     861              :         }
     862            3 :     }
     863              : 
     864              :     /// DownloadOpts::byte_range() zero-sized byte range should panic.
     865              :     #[test]
     866              :     #[should_panic]
     867            3 :     fn download_opts_byte_range_zero() {
     868            3 :         DownloadOpts {
     869            3 :             byte_start: Bound::Included(3),
     870            3 :             byte_end: Bound::Excluded(3),
     871            3 :             ..Default::default()
     872            3 :         }
     873            3 :         .byte_range();
     874            3 :     }
     875              : 
     876              :     /// DownloadOpts::byte_range() negative byte range should panic.
     877              :     #[test]
     878              :     #[should_panic]
     879            3 :     fn download_opts_byte_range_negative() {
     880            3 :         DownloadOpts {
     881            3 :             byte_start: Bound::Included(3),
     882            3 :             byte_end: Bound::Included(2),
     883            3 :             ..Default::default()
     884            3 :         }
     885            3 :         .byte_range();
     886            3 :     }
     887              : 
     888              :     #[test]
     889            3 :     fn test_object_name() {
     890            3 :         let k = RemotePath::new(Utf8Path::new("a/b/c")).unwrap();
     891            3 :         assert_eq!(k.object_name(), Some("c"));
     892              : 
     893            3 :         let k = RemotePath::new(Utf8Path::new("a/b/c/")).unwrap();
     894            3 :         assert_eq!(k.object_name(), Some("c"));
     895              : 
     896            3 :         let k = RemotePath::new(Utf8Path::new("a/")).unwrap();
     897            3 :         assert_eq!(k.object_name(), Some("a"));
     898              : 
     899              :         // XXX is it impossible to have an empty key?
     900            3 :         let k = RemotePath::new(Utf8Path::new("")).unwrap();
     901            3 :         assert_eq!(k.object_name(), None);
     902            3 :     }
     903              : 
     904              :     #[test]
     905            3 :     fn rempte_path_cannot_be_created_from_absolute_ones() {
     906            3 :         let err = RemotePath::new(Utf8Path::new("/")).expect_err("Should fail on absolute paths");
     907            3 :         assert_eq!(err.to_string(), "Path \"/\" is not relative");
     908            3 :     }
     909              : }
        

Generated by: LCOV version 2.1-beta