Line data Source code
1 : //! A set of generic storage abstractions for the page server to use when backing up and restoring its state from the external storage.
2 : //! No other modules from this tree are supposed to be used directly by the external code.
3 : //!
4 : //! [`RemoteStorage`] trait a CRUD-like generic abstraction to use for adapting external storages with a few implementations:
5 : //! * [`local_fs`] allows to use local file system as an external storage
6 : //! * [`s3_bucket`] uses AWS S3 bucket as an external storage
7 : //! * [`azure_blob`] allows to use Azure Blob storage as an external storage
8 : //!
9 : #![deny(unsafe_code)]
10 : #![deny(clippy::undocumented_unsafe_blocks)]
11 :
12 : mod azure_blob;
13 : mod config;
14 : mod error;
15 : mod local_fs;
16 : mod metrics;
17 : mod s3_bucket;
18 : mod simulate_failures;
19 : mod support;
20 :
21 : use std::collections::HashMap;
22 : use std::fmt::Debug;
23 : use std::num::NonZeroU32;
24 : use std::ops::Bound;
25 : use std::pin::{Pin, pin};
26 : use std::sync::Arc;
27 : use std::time::SystemTime;
28 :
29 : use anyhow::Context;
30 : /// Azure SDK's ETag type is a simple String wrapper: we use this internally instead of repeating it here.
31 : pub use azure_core::Etag;
32 : use bytes::Bytes;
33 : use camino::{Utf8Path, Utf8PathBuf};
34 : pub use error::{DownloadError, TimeTravelError, TimeoutOrCancel};
35 : use futures::StreamExt;
36 : use futures::stream::Stream;
37 : use itertools::Itertools as _;
38 : use s3_bucket::RequestKind;
39 : use serde::{Deserialize, Serialize};
40 : use tokio::sync::Semaphore;
41 : use tokio_util::sync::CancellationToken;
42 : use tracing::info;
43 :
44 : pub use self::azure_blob::AzureBlobStorage;
45 : pub use self::local_fs::LocalFs;
46 : pub use self::s3_bucket::S3Bucket;
47 : pub use self::simulate_failures::UnreliableWrapper;
48 : pub use crate::config::{AzureConfig, RemoteStorageConfig, RemoteStorageKind, S3Config};
49 :
50 : /// Default concurrency limit for S3 operations
51 : ///
52 : /// Currently, sync happens with AWS S3, that has two limits on requests per second:
53 : /// ~200 RPS for IAM services
54 : /// <https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html>
55 : /// ~3500 PUT/COPY/POST/DELETE or 5500 GET/HEAD S3 requests
56 : /// <https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/>
57 : pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100;
58 : /// Set this limit analogously to the S3 limit
59 : ///
60 : /// Here, a limit of max 20k concurrent connections was noted.
61 : /// <https://learn.microsoft.com/en-us/answers/questions/1301863/is-there-any-limitation-to-concurrent-connections>
62 : pub const DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT: usize = 100;
63 : /// Set this limit analogously to the S3 limit.
64 : ///
65 : /// The local filesystem backend doesn't enforce a concurrency limit itself, but this also bounds
66 : /// the upload queue concurrency. Some tests create thousands of uploads, which slows down the
67 : /// quadratic scheduling of the upload queue, and there is no point spawning so many Tokio tasks.
68 : pub const DEFAULT_REMOTE_STORAGE_LOCALFS_CONCURRENCY_LIMIT: usize = 100;
69 : /// No limits on the client side, which currenltly means 1000 for AWS S3.
70 : /// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_RequestSyntax>
71 : pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
72 :
73 : /// As defined in S3 docs
74 : ///
75 : /// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html>
76 : pub const MAX_KEYS_PER_DELETE_S3: usize = 1000;
77 :
78 : /// As defined in Azure docs
79 : ///
80 : /// <https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch>
81 : pub const MAX_KEYS_PER_DELETE_AZURE: usize = 256;
82 :
83 : const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
84 :
85 : /// Path on the remote storage, relative to some inner prefix.
86 : /// The prefix is an implementation detail, that allows representing local paths
87 : /// as the remote ones, stripping the local storage prefix away.
88 : #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
89 : pub struct RemotePath(Utf8PathBuf);
90 :
91 : impl Serialize for RemotePath {
92 0 : fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
93 0 : where
94 0 : S: serde::Serializer,
95 0 : {
96 0 : serializer.collect_str(self)
97 0 : }
98 : }
99 :
100 : impl<'de> Deserialize<'de> for RemotePath {
101 0 : fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
102 0 : where
103 0 : D: serde::Deserializer<'de>,
104 0 : {
105 0 : let str = String::deserialize(deserializer)?;
106 0 : Ok(Self(Utf8PathBuf::from(&str)))
107 0 : }
108 : }
109 :
110 : impl std::fmt::Display for RemotePath {
111 1203 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
112 1203 : std::fmt::Display::fmt(&self.0, f)
113 1203 : }
114 : }
115 :
116 : impl RemotePath {
117 10157 : pub fn new(relative_path: &Utf8Path) -> anyhow::Result<Self> {
118 10157 : anyhow::ensure!(
119 10157 : relative_path.is_relative(),
120 4 : "Path {relative_path:?} is not relative"
121 : );
122 10153 : Ok(Self(relative_path.to_path_buf()))
123 10157 : }
124 :
125 9669 : pub fn from_string(relative_path: &str) -> anyhow::Result<Self> {
126 9669 : Self::new(Utf8Path::new(relative_path))
127 9669 : }
128 :
129 9587 : pub fn with_base(&self, base_path: &Utf8Path) -> Utf8PathBuf {
130 9587 : base_path.join(&self.0)
131 9587 : }
132 :
133 24 : pub fn object_name(&self) -> Option<&str> {
134 24 : self.0.file_name()
135 24 : }
136 :
137 138 : pub fn join(&self, path: impl AsRef<Utf8Path>) -> Self {
138 138 : Self(self.0.join(path))
139 138 : }
140 :
141 1067 : pub fn get_path(&self) -> &Utf8PathBuf {
142 1067 : &self.0
143 1067 : }
144 :
145 101 : pub fn strip_prefix(&self, p: &RemotePath) -> Result<&Utf8Path, std::path::StripPrefixError> {
146 101 : self.0.strip_prefix(&p.0)
147 101 : }
148 :
149 458 : pub fn add_trailing_slash(&self) -> Self {
150 458 : // Unwrap safety inputs are guararnteed to be valid UTF-8
151 458 : Self(format!("{}/", self.0).try_into().unwrap())
152 458 : }
153 : }
154 :
155 : /// We don't need callers to be able to pass arbitrary delimiters: just control
156 : /// whether listings will use a '/' separator or not.
157 : ///
158 : /// The WithDelimiter mode will populate `prefixes` and `keys` in the result. The
159 : /// NoDelimiter mode will only populate `keys`.
160 : #[derive(Copy, Clone)]
161 : pub enum ListingMode {
162 : WithDelimiter,
163 : NoDelimiter,
164 : }
165 :
166 : #[derive(PartialEq, Eq, Debug, Clone)]
167 : pub struct ListingObject {
168 : pub key: RemotePath,
169 : pub last_modified: SystemTime,
170 : pub size: u64,
171 : }
172 :
173 : #[derive(Default)]
174 : pub struct Listing {
175 : pub prefixes: Vec<RemotePath>,
176 : pub keys: Vec<ListingObject>,
177 : }
178 :
179 : /// Options for downloads. The default value is a plain GET.
180 : pub struct DownloadOpts {
181 : /// If given, returns [`DownloadError::Unmodified`] if the object still has
182 : /// the same ETag (using If-None-Match).
183 : pub etag: Option<Etag>,
184 : /// The start of the byte range to download, or unbounded.
185 : pub byte_start: Bound<u64>,
186 : /// The end of the byte range to download, or unbounded. Must be after the
187 : /// start bound.
188 : pub byte_end: Bound<u64>,
189 : /// Indicate whether we're downloading something small or large: this indirectly controls
190 : /// timeouts: for something like an index/manifest/heatmap, we should time out faster than
191 : /// for layer files
192 : pub kind: DownloadKind,
193 : }
194 :
195 : pub enum DownloadKind {
196 : Large,
197 : Small,
198 : }
199 :
200 : impl Default for DownloadOpts {
201 1568 : fn default() -> Self {
202 1568 : Self {
203 1568 : etag: Default::default(),
204 1568 : byte_start: Bound::Unbounded,
205 1568 : byte_end: Bound::Unbounded,
206 1568 : kind: DownloadKind::Large,
207 1568 : }
208 1568 : }
209 : }
210 :
211 : impl DownloadOpts {
212 : /// Returns the byte range with inclusive start and exclusive end, or None
213 : /// if unbounded.
214 224 : pub fn byte_range(&self) -> Option<(u64, Option<u64>)> {
215 224 : if self.byte_start == Bound::Unbounded && self.byte_end == Bound::Unbounded {
216 128 : return None;
217 96 : }
218 96 : let start = match self.byte_start {
219 18 : Bound::Excluded(i) => i + 1,
220 60 : Bound::Included(i) => i,
221 18 : Bound::Unbounded => 0,
222 : };
223 96 : let end = match self.byte_end {
224 48 : Bound::Excluded(i) => Some(i),
225 27 : Bound::Included(i) => Some(i + 1),
226 21 : Bound::Unbounded => None,
227 : };
228 96 : if let Some(end) = end {
229 75 : assert!(start < end, "range end {end} at or before start {start}");
230 21 : }
231 87 : Some((start, end))
232 215 : }
233 :
234 : /// Returns the byte range as an RFC 2616 Range header value with inclusive
235 : /// bounds, or None if unbounded.
236 66 : pub fn byte_range_header(&self) -> Option<String> {
237 66 : self.byte_range()
238 66 : .map(|(start, end)| (start, end.map(|end| end - 1))) // make end inclusive
239 66 : .map(|(start, end)| match end {
240 30 : Some(end) => format!("bytes={start}-{end}"),
241 10 : None => format!("bytes={start}-"),
242 66 : })
243 66 : }
244 : }
245 :
246 : /// Storage (potentially remote) API to manage its state.
247 : /// This storage tries to be unaware of any layered repository context,
248 : /// providing basic CRUD operations for storage files.
249 : #[allow(async_fn_in_trait)]
250 : pub trait RemoteStorage: Send + Sync + 'static {
251 : /// List objects in remote storage, with semantics matching AWS S3's [`ListObjectsV2`].
252 : ///
253 : /// The stream is guaranteed to return at least one element, even in the case of errors
254 : /// (in that case it's an `Err()`), or an empty `Listing`.
255 : ///
256 : /// The stream is not ending if it returns an error, as long as [`is_permanent`] returns false on the error.
257 : /// The `next` function can be retried, and maybe in a future retry, there will be success.
258 : ///
259 : /// Note that the prefix is relative to any `prefix_in_bucket` configured for the client, not
260 : /// from the absolute root of the bucket.
261 : ///
262 : /// `mode` configures whether to use a delimiter. Without a delimiter, all keys
263 : /// within the prefix are listed in the `keys` of the result. With a delimiter, any "directories" at the top level of
264 : /// the prefix are returned in the `prefixes` of the result, and keys in the top level of the prefix are
265 : /// returned in `keys` ().
266 : ///
267 : /// `max_keys` controls the maximum number of keys that will be returned. If this is None, this function
268 : /// will iteratively call listobjects until it runs out of keys. Note that this is not safe to use on
269 : /// unlimted size buckets, as the full list of objects is allocated into a monolithic data structure.
270 : ///
271 : /// [`ListObjectsV2`]: <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html>
272 : /// [`is_permanent`]: DownloadError::is_permanent
273 : fn list_streaming(
274 : &self,
275 : prefix: Option<&RemotePath>,
276 : mode: ListingMode,
277 : max_keys: Option<NonZeroU32>,
278 : cancel: &CancellationToken,
279 : ) -> impl Stream<Item = Result<Listing, DownloadError>> + Send;
280 :
281 69 : async fn list(
282 69 : &self,
283 69 : prefix: Option<&RemotePath>,
284 69 : mode: ListingMode,
285 69 : max_keys: Option<NonZeroU32>,
286 69 : cancel: &CancellationToken,
287 69 : ) -> Result<Listing, DownloadError> {
288 69 : let mut stream = pin!(self.list_streaming(prefix, mode, max_keys, cancel));
289 69 : let mut combined = stream.next().await.expect("At least one item required")?;
290 117 : while let Some(list) = stream.next().await {
291 48 : let list = list?;
292 48 : combined.keys.extend(list.keys.into_iter());
293 48 : combined.prefixes.extend_from_slice(&list.prefixes);
294 : }
295 69 : Ok(combined)
296 0 : }
297 :
298 : /// Obtain metadata information about an object.
299 : async fn head_object(
300 : &self,
301 : key: &RemotePath,
302 : cancel: &CancellationToken,
303 : ) -> Result<ListingObject, DownloadError>;
304 :
305 : /// Streams the local file contents into remote into the remote storage entry.
306 : ///
307 : /// If the operation fails because of timeout or cancellation, the root cause of the error will be
308 : /// set to `TimeoutOrCancel`.
309 : async fn upload(
310 : &self,
311 : from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
312 : // S3 PUT request requires the content length to be specified,
313 : // otherwise it starts to fail with the concurrent connection count increasing.
314 : data_size_bytes: usize,
315 : to: &RemotePath,
316 : metadata: Option<StorageMetadata>,
317 : cancel: &CancellationToken,
318 : ) -> anyhow::Result<()>;
319 :
320 : /// Streams the remote storage entry contents.
321 : ///
322 : /// The returned download stream will obey initial timeout and cancellation signal by erroring
323 : /// on whichever happens first. Only one of the reasons will fail the stream, which is usually
324 : /// enough for `tokio::io::copy_buf` usage. If needed the error can be filtered out.
325 : ///
326 : /// Returns the metadata, if any was stored with the file previously.
327 : async fn download(
328 : &self,
329 : from: &RemotePath,
330 : opts: &DownloadOpts,
331 : cancel: &CancellationToken,
332 : ) -> Result<Download, DownloadError>;
333 :
334 : /// Delete a single path from remote storage.
335 : ///
336 : /// If the operation fails because of timeout or cancellation, the root cause of the error will be
337 : /// set to `TimeoutOrCancel`. In such situation it is unknown if the deletion went through.
338 : async fn delete(&self, path: &RemotePath, cancel: &CancellationToken) -> anyhow::Result<()>;
339 :
340 : /// Delete a multiple paths from remote storage.
341 : ///
342 : /// If the operation fails because of timeout or cancellation, the root cause of the error will be
343 : /// set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
344 : /// through.
345 : async fn delete_objects(
346 : &self,
347 : paths: &[RemotePath],
348 : cancel: &CancellationToken,
349 : ) -> anyhow::Result<()>;
350 :
351 : /// Returns the maximum number of keys that a call to [`Self::delete_objects`] can delete without chunking
352 : ///
353 : /// The value returned is only an optimization hint, One can pass larger number of objects to
354 : /// `delete_objects` as well.
355 : ///
356 : /// The value is guaranteed to be >= 1.
357 : fn max_keys_per_delete(&self) -> usize;
358 :
359 : /// Deletes all objects matching the given prefix.
360 : ///
361 : /// NB: this uses NoDelimiter and will match partial prefixes. For example, the prefix /a/b will
362 : /// delete /a/b, /a/b/*, /a/bc, /a/bc/*, etc.
363 : ///
364 : /// If the operation fails because of timeout or cancellation, the root cause of the error will
365 : /// be set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
366 : /// through.
367 18 : async fn delete_prefix(
368 18 : &self,
369 18 : prefix: &RemotePath,
370 18 : cancel: &CancellationToken,
371 18 : ) -> anyhow::Result<()> {
372 18 : let mut stream =
373 18 : pin!(self.list_streaming(Some(prefix), ListingMode::NoDelimiter, None, cancel));
374 39 : while let Some(result) = stream.next().await {
375 21 : let keys = match result {
376 21 : Ok(listing) if listing.keys.is_empty() => continue,
377 63 : Ok(listing) => listing.keys.into_iter().map(|o| o.key).collect_vec(),
378 0 : Err(DownloadError::Cancelled) => return Err(TimeoutOrCancel::Cancel.into()),
379 0 : Err(DownloadError::Timeout) => return Err(TimeoutOrCancel::Timeout.into()),
380 0 : Err(err) => return Err(err.into()),
381 : };
382 12 : tracing::info!("Deleting {} keys from remote storage", keys.len());
383 12 : self.delete_objects(&keys, cancel).await?;
384 : }
385 18 : Ok(())
386 0 : }
387 :
388 : /// Copy a remote object inside a bucket from one path to another.
389 : async fn copy(
390 : &self,
391 : from: &RemotePath,
392 : to: &RemotePath,
393 : cancel: &CancellationToken,
394 : ) -> anyhow::Result<()>;
395 :
396 : /// Resets the content of everything with the given prefix to the given state
397 : async fn time_travel_recover(
398 : &self,
399 : prefix: Option<&RemotePath>,
400 : timestamp: SystemTime,
401 : done_if_after: SystemTime,
402 : cancel: &CancellationToken,
403 : ) -> Result<(), TimeTravelError>;
404 : }
405 :
406 : /// Data part of an ongoing [`Download`].
407 : ///
408 : /// `DownloadStream` is sensitive to the timeout and cancellation used with the original
409 : /// [`RemoteStorage::download`] request. The type yields `std::io::Result<Bytes>` to be compatible
410 : /// with `tokio::io::copy_buf`.
411 : // This has 'static because safekeepers do not use cancellation tokens (yet)
412 : pub type DownloadStream =
413 : Pin<Box<dyn Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static>>;
414 :
415 : pub struct Download {
416 : pub download_stream: DownloadStream,
417 : /// The last time the file was modified (`last-modified` HTTP header)
418 : pub last_modified: SystemTime,
419 : /// A way to identify this specific version of the resource (`etag` HTTP header)
420 : pub etag: Etag,
421 : /// Extra key-value data, associated with the current remote file.
422 : pub metadata: Option<StorageMetadata>,
423 : }
424 :
425 : impl Debug for Download {
426 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
427 0 : f.debug_struct("Download")
428 0 : .field("metadata", &self.metadata)
429 0 : .finish()
430 0 : }
431 : }
432 :
433 : /// Every storage, currently supported.
434 : /// Serves as a simple way to pass around the [`RemoteStorage`] without dealing with generics.
435 : // Require Clone for `Other` due to https://github.com/rust-lang/rust/issues/26925
436 : #[derive(Clone)]
437 : pub enum GenericRemoteStorage<Other: Clone = Arc<UnreliableWrapper>> {
438 : LocalFs(LocalFs),
439 : AwsS3(Arc<S3Bucket>),
440 : AzureBlob(Arc<AzureBlobStorage>),
441 : Unreliable(Other),
442 : }
443 :
444 : impl<Other: RemoteStorage> GenericRemoteStorage<Arc<Other>> {
445 : // See [`RemoteStorage::list`].
446 985 : pub async fn list(
447 985 : &self,
448 985 : prefix: Option<&RemotePath>,
449 985 : mode: ListingMode,
450 985 : max_keys: Option<NonZeroU32>,
451 985 : cancel: &CancellationToken,
452 985 : ) -> Result<Listing, DownloadError> {
453 985 : match self {
454 916 : Self::LocalFs(s) => s.list(prefix, mode, max_keys, cancel).await,
455 50 : Self::AwsS3(s) => s.list(prefix, mode, max_keys, cancel).await,
456 19 : Self::AzureBlob(s) => s.list(prefix, mode, max_keys, cancel).await,
457 0 : Self::Unreliable(s) => s.list(prefix, mode, max_keys, cancel).await,
458 : }
459 0 : }
460 :
461 : // See [`RemoteStorage::list_streaming`].
462 3 : pub fn list_streaming<'a>(
463 3 : &'a self,
464 3 : prefix: Option<&'a RemotePath>,
465 3 : mode: ListingMode,
466 3 : max_keys: Option<NonZeroU32>,
467 3 : cancel: &'a CancellationToken,
468 3 : ) -> impl Stream<Item = Result<Listing, DownloadError>> + 'a + Send {
469 3 : match self {
470 0 : Self::LocalFs(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel))
471 0 : as Pin<Box<dyn Stream<Item = Result<Listing, DownloadError>> + Send>>,
472 2 : Self::AwsS3(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel)),
473 1 : Self::AzureBlob(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel)),
474 0 : Self::Unreliable(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel)),
475 : }
476 0 : }
477 :
478 : // See [`RemoteStorage::head_object`].
479 9 : pub async fn head_object(
480 9 : &self,
481 9 : key: &RemotePath,
482 9 : cancel: &CancellationToken,
483 9 : ) -> Result<ListingObject, DownloadError> {
484 9 : match self {
485 0 : Self::LocalFs(s) => s.head_object(key, cancel).await,
486 6 : Self::AwsS3(s) => s.head_object(key, cancel).await,
487 3 : Self::AzureBlob(s) => s.head_object(key, cancel).await,
488 0 : Self::Unreliable(s) => s.head_object(key, cancel).await,
489 : }
490 0 : }
491 :
492 : /// See [`RemoteStorage::upload`]
493 6495 : pub async fn upload(
494 6495 : &self,
495 6495 : from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
496 6495 : data_size_bytes: usize,
497 6495 : to: &RemotePath,
498 6495 : metadata: Option<StorageMetadata>,
499 6495 : cancel: &CancellationToken,
500 6495 : ) -> anyhow::Result<()> {
501 6495 : match self {
502 6178 : Self::LocalFs(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
503 200 : Self::AwsS3(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
504 93 : Self::AzureBlob(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
505 24 : Self::Unreliable(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
506 : }
507 0 : }
508 :
509 : /// See [`RemoteStorage::download`]
510 1500 : pub async fn download(
511 1500 : &self,
512 1500 : from: &RemotePath,
513 1500 : opts: &DownloadOpts,
514 1500 : cancel: &CancellationToken,
515 1500 : ) -> Result<Download, DownloadError> {
516 1500 : match self {
517 1456 : Self::LocalFs(s) => s.download(from, opts, cancel).await,
518 33 : Self::AwsS3(s) => s.download(from, opts, cancel).await,
519 11 : Self::AzureBlob(s) => s.download(from, opts, cancel).await,
520 0 : Self::Unreliable(s) => s.download(from, opts, cancel).await,
521 : }
522 0 : }
523 :
524 : /// See [`RemoteStorage::delete`]
525 897 : pub async fn delete(
526 897 : &self,
527 897 : path: &RemotePath,
528 897 : cancel: &CancellationToken,
529 897 : ) -> anyhow::Result<()> {
530 897 : match self {
531 637 : Self::LocalFs(s) => s.delete(path, cancel).await,
532 174 : Self::AwsS3(s) => s.delete(path, cancel).await,
533 86 : Self::AzureBlob(s) => s.delete(path, cancel).await,
534 0 : Self::Unreliable(s) => s.delete(path, cancel).await,
535 : }
536 0 : }
537 :
538 : /// See [`RemoteStorage::delete_objects`]
539 27 : pub async fn delete_objects(
540 27 : &self,
541 27 : paths: &[RemotePath],
542 27 : cancel: &CancellationToken,
543 27 : ) -> anyhow::Result<()> {
544 27 : match self {
545 12 : Self::LocalFs(s) => s.delete_objects(paths, cancel).await,
546 12 : Self::AwsS3(s) => s.delete_objects(paths, cancel).await,
547 3 : Self::AzureBlob(s) => s.delete_objects(paths, cancel).await,
548 0 : Self::Unreliable(s) => s.delete_objects(paths, cancel).await,
549 : }
550 0 : }
551 :
552 : /// [`RemoteStorage::max_keys_per_delete`]
553 16 : pub fn max_keys_per_delete(&self) -> usize {
554 16 : match self {
555 16 : Self::LocalFs(s) => s.max_keys_per_delete(),
556 0 : Self::AwsS3(s) => s.max_keys_per_delete(),
557 0 : Self::AzureBlob(s) => s.max_keys_per_delete(),
558 0 : Self::Unreliable(s) => s.max_keys_per_delete(),
559 : }
560 0 : }
561 :
562 : /// See [`RemoteStorage::delete_prefix`]
563 18 : pub async fn delete_prefix(
564 18 : &self,
565 18 : prefix: &RemotePath,
566 18 : cancel: &CancellationToken,
567 18 : ) -> anyhow::Result<()> {
568 18 : match self {
569 0 : Self::LocalFs(s) => s.delete_prefix(prefix, cancel).await,
570 12 : Self::AwsS3(s) => s.delete_prefix(prefix, cancel).await,
571 6 : Self::AzureBlob(s) => s.delete_prefix(prefix, cancel).await,
572 0 : Self::Unreliable(s) => s.delete_prefix(prefix, cancel).await,
573 : }
574 0 : }
575 :
576 : /// See [`RemoteStorage::copy`]
577 3 : pub async fn copy_object(
578 3 : &self,
579 3 : from: &RemotePath,
580 3 : to: &RemotePath,
581 3 : cancel: &CancellationToken,
582 3 : ) -> anyhow::Result<()> {
583 3 : match self {
584 0 : Self::LocalFs(s) => s.copy(from, to, cancel).await,
585 2 : Self::AwsS3(s) => s.copy(from, to, cancel).await,
586 1 : Self::AzureBlob(s) => s.copy(from, to, cancel).await,
587 0 : Self::Unreliable(s) => s.copy(from, to, cancel).await,
588 : }
589 0 : }
590 :
591 : /// See [`RemoteStorage::time_travel_recover`].
592 6 : pub async fn time_travel_recover(
593 6 : &self,
594 6 : prefix: Option<&RemotePath>,
595 6 : timestamp: SystemTime,
596 6 : done_if_after: SystemTime,
597 6 : cancel: &CancellationToken,
598 6 : ) -> Result<(), TimeTravelError> {
599 6 : match self {
600 0 : Self::LocalFs(s) => {
601 0 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
602 0 : .await
603 : }
604 6 : Self::AwsS3(s) => {
605 6 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
606 6 : .await
607 : }
608 0 : Self::AzureBlob(s) => {
609 0 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
610 0 : .await
611 : }
612 0 : Self::Unreliable(s) => {
613 0 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
614 0 : .await
615 : }
616 : }
617 0 : }
618 : }
619 :
620 : impl GenericRemoteStorage {
621 505 : pub async fn from_config(storage_config: &RemoteStorageConfig) -> anyhow::Result<Self> {
622 505 : let timeout = storage_config.timeout;
623 505 :
624 505 : // If somkeone overrides timeout to be small without adjusting small_timeout, then adjust it automatically
625 505 : let small_timeout = std::cmp::min(storage_config.small_timeout, timeout);
626 505 :
627 505 : Ok(match &storage_config.storage {
628 469 : RemoteStorageKind::LocalFs { local_path: path } => {
629 469 : info!("Using fs root '{path}' as a remote storage");
630 469 : Self::LocalFs(LocalFs::new(path.clone(), timeout)?)
631 : }
632 26 : RemoteStorageKind::AwsS3(s3_config) => {
633 26 : // The profile and access key id are only printed here for debugging purposes,
634 26 : // their values don't indicate the eventually taken choice for auth.
635 26 : let profile = std::env::var("AWS_PROFILE").unwrap_or_else(|_| "<none>".into());
636 26 : let access_key_id =
637 26 : std::env::var("AWS_ACCESS_KEY_ID").unwrap_or_else(|_| "<none>".into());
638 26 : info!(
639 0 : "Using s3 bucket '{}' in region '{}' as a remote storage, prefix in bucket: '{:?}', bucket endpoint: '{:?}', profile: {profile}, access_key_id: {access_key_id}",
640 : s3_config.bucket_name,
641 : s3_config.bucket_region,
642 : s3_config.prefix_in_bucket,
643 : s3_config.endpoint
644 : );
645 26 : Self::AwsS3(Arc::new(S3Bucket::new(s3_config, timeout).await?))
646 : }
647 10 : RemoteStorageKind::AzureContainer(azure_config) => {
648 10 : let storage_account = azure_config
649 10 : .storage_account
650 10 : .as_deref()
651 10 : .unwrap_or("<AZURE_STORAGE_ACCOUNT>");
652 10 : info!(
653 0 : "Using azure container '{}' in account '{storage_account}' in region '{}' as a remote storage, prefix in container: '{:?}'",
654 : azure_config.container_name,
655 : azure_config.container_region,
656 : azure_config.prefix_in_container
657 : );
658 10 : Self::AzureBlob(Arc::new(AzureBlobStorage::new(
659 10 : azure_config,
660 10 : timeout,
661 10 : small_timeout,
662 10 : )?))
663 : }
664 : })
665 0 : }
666 :
667 2 : pub fn unreliable_wrapper(s: Self, fail_first: u64) -> Self {
668 2 : Self::Unreliable(Arc::new(UnreliableWrapper::new(s, fail_first)))
669 2 : }
670 :
671 : /// See [`RemoteStorage::upload`], which this method calls with `None` as metadata.
672 3005 : pub async fn upload_storage_object(
673 3005 : &self,
674 3005 : from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
675 3005 : from_size_bytes: usize,
676 3005 : to: &RemotePath,
677 3005 : cancel: &CancellationToken,
678 3005 : ) -> anyhow::Result<()> {
679 3005 : self.upload(from, from_size_bytes, to, None, cancel)
680 3005 : .await
681 2989 : .with_context(|| {
682 0 : format!("Failed to upload data of length {from_size_bytes} to storage path {to:?}")
683 2989 : })
684 2989 : }
685 :
686 : /// The name of the bucket/container/etc.
687 0 : pub fn bucket_name(&self) -> Option<&str> {
688 0 : match self {
689 0 : Self::LocalFs(_s) => None,
690 0 : Self::AwsS3(s) => Some(s.bucket_name()),
691 0 : Self::AzureBlob(s) => Some(s.container_name()),
692 0 : Self::Unreliable(_s) => None,
693 : }
694 0 : }
695 : }
696 :
697 : /// Extra set of key-value pairs that contain arbitrary metadata about the storage entry.
698 : /// Immutable, cannot be changed once the file is created.
699 : #[derive(Debug, Clone, PartialEq, Eq)]
700 : pub struct StorageMetadata(HashMap<String, String>);
701 :
702 : impl<const N: usize> From<[(&str, &str); N]> for StorageMetadata {
703 0 : fn from(arr: [(&str, &str); N]) -> Self {
704 0 : let map: HashMap<String, String> = arr
705 0 : .iter()
706 0 : .map(|(k, v)| (k.to_string(), v.to_string()))
707 0 : .collect();
708 0 : Self(map)
709 0 : }
710 : }
711 :
712 : struct ConcurrencyLimiter {
713 : // Every request to S3 can be throttled or cancelled, if a certain number of requests per second is exceeded.
714 : // Same goes to IAM, which is queried before every S3 request, if enabled. IAM has even lower RPS threshold.
715 : // The helps to ensure we don't exceed the thresholds.
716 : write: Arc<Semaphore>,
717 : read: Arc<Semaphore>,
718 : }
719 :
720 : impl ConcurrencyLimiter {
721 732 : fn for_kind(&self, kind: RequestKind) -> &Arc<Semaphore> {
722 732 : match kind {
723 44 : RequestKind::Get => &self.read,
724 293 : RequestKind::Put => &self.write,
725 90 : RequestKind::List => &self.read,
726 287 : RequestKind::Delete => &self.write,
727 3 : RequestKind::Copy => &self.write,
728 6 : RequestKind::TimeTravel => &self.write,
729 9 : RequestKind::Head => &self.read,
730 : }
731 732 : }
732 :
733 699 : async fn acquire(
734 699 : &self,
735 699 : kind: RequestKind,
736 699 : ) -> Result<tokio::sync::SemaphorePermit<'_>, tokio::sync::AcquireError> {
737 699 : self.for_kind(kind).acquire().await
738 0 : }
739 :
740 33 : async fn acquire_owned(
741 33 : &self,
742 33 : kind: RequestKind,
743 33 : ) -> Result<tokio::sync::OwnedSemaphorePermit, tokio::sync::AcquireError> {
744 33 : Arc::clone(self.for_kind(kind)).acquire_owned().await
745 0 : }
746 :
747 51 : fn new(limit: usize) -> ConcurrencyLimiter {
748 51 : Self {
749 51 : read: Arc::new(Semaphore::new(limit)),
750 51 : write: Arc::new(Semaphore::new(limit)),
751 51 : }
752 51 : }
753 : }
754 :
755 : #[cfg(test)]
756 : mod tests {
757 : use super::*;
758 :
759 : /// DownloadOpts::byte_range() should generate (inclusive, exclusive) ranges
760 : /// with optional end bound, or None when unbounded.
761 : #[test]
762 3 : fn download_opts_byte_range() {
763 3 : // Consider using test_case or a similar table-driven test framework.
764 3 : let cases = [
765 3 : // (byte_start, byte_end, expected)
766 3 : (Bound::Unbounded, Bound::Unbounded, None),
767 3 : (Bound::Unbounded, Bound::Included(7), Some((0, Some(8)))),
768 3 : (Bound::Unbounded, Bound::Excluded(7), Some((0, Some(7)))),
769 3 : (Bound::Included(3), Bound::Unbounded, Some((3, None))),
770 3 : (Bound::Included(3), Bound::Included(7), Some((3, Some(8)))),
771 3 : (Bound::Included(3), Bound::Excluded(7), Some((3, Some(7)))),
772 3 : (Bound::Excluded(3), Bound::Unbounded, Some((4, None))),
773 3 : (Bound::Excluded(3), Bound::Included(7), Some((4, Some(8)))),
774 3 : (Bound::Excluded(3), Bound::Excluded(7), Some((4, Some(7)))),
775 3 : // 1-sized ranges are fine, 0 aren't and will panic (separate test).
776 3 : (Bound::Included(3), Bound::Included(3), Some((3, Some(4)))),
777 3 : (Bound::Included(3), Bound::Excluded(4), Some((3, Some(4)))),
778 3 : ];
779 :
780 36 : for (byte_start, byte_end, expect) in cases {
781 33 : let opts = DownloadOpts {
782 33 : byte_start,
783 33 : byte_end,
784 33 : ..Default::default()
785 33 : };
786 33 : let result = opts.byte_range();
787 33 : assert_eq!(
788 : result, expect,
789 0 : "byte_start={byte_start:?} byte_end={byte_end:?}"
790 : );
791 :
792 : // Check generated HTTP header, which uses an inclusive range.
793 33 : let expect_header = expect.map(|(start, end)| match end {
794 24 : Some(end) => format!("bytes={start}-{}", end - 1), // inclusive end
795 6 : None => format!("bytes={start}-"),
796 33 : });
797 33 : assert_eq!(
798 33 : opts.byte_range_header(),
799 : expect_header,
800 0 : "byte_start={byte_start:?} byte_end={byte_end:?}"
801 : );
802 : }
803 3 : }
804 :
805 : /// DownloadOpts::byte_range() zero-sized byte range should panic.
806 : #[test]
807 : #[should_panic]
808 3 : fn download_opts_byte_range_zero() {
809 3 : DownloadOpts {
810 3 : byte_start: Bound::Included(3),
811 3 : byte_end: Bound::Excluded(3),
812 3 : ..Default::default()
813 3 : }
814 3 : .byte_range();
815 3 : }
816 :
817 : /// DownloadOpts::byte_range() negative byte range should panic.
818 : #[test]
819 : #[should_panic]
820 3 : fn download_opts_byte_range_negative() {
821 3 : DownloadOpts {
822 3 : byte_start: Bound::Included(3),
823 3 : byte_end: Bound::Included(2),
824 3 : ..Default::default()
825 3 : }
826 3 : .byte_range();
827 3 : }
828 :
829 : #[test]
830 3 : fn test_object_name() {
831 3 : let k = RemotePath::new(Utf8Path::new("a/b/c")).unwrap();
832 3 : assert_eq!(k.object_name(), Some("c"));
833 :
834 3 : let k = RemotePath::new(Utf8Path::new("a/b/c/")).unwrap();
835 3 : assert_eq!(k.object_name(), Some("c"));
836 :
837 3 : let k = RemotePath::new(Utf8Path::new("a/")).unwrap();
838 3 : assert_eq!(k.object_name(), Some("a"));
839 :
840 : // XXX is it impossible to have an empty key?
841 3 : let k = RemotePath::new(Utf8Path::new("")).unwrap();
842 3 : assert_eq!(k.object_name(), None);
843 3 : }
844 :
845 : #[test]
846 3 : fn rempte_path_cannot_be_created_from_absolute_ones() {
847 3 : let err = RemotePath::new(Utf8Path::new("/")).expect_err("Should fail on absolute paths");
848 3 : assert_eq!(err.to_string(), "Path \"/\" is not relative");
849 3 : }
850 : }
|