Line data Source code
1 : //! A set of generic storage abstractions for the page server to use when backing up and restoring its state from the external storage.
2 : //! No other modules from this tree are supposed to be used directly by the external code.
3 : //!
4 : //! [`RemoteStorage`] trait a CRUD-like generic abstraction to use for adapting external storages with a few implementations:
5 : //! * [`local_fs`] allows to use local file system as an external storage
6 : //! * [`s3_bucket`] uses AWS S3 bucket as an external storage
7 : //! * [`azure_blob`] allows to use Azure Blob storage as an external storage
8 : //!
9 : #![deny(unsafe_code)]
10 : #![deny(clippy::undocumented_unsafe_blocks)]
11 :
12 : mod azure_blob;
13 : mod config;
14 : mod error;
15 : mod local_fs;
16 : mod metrics;
17 : mod s3_bucket;
18 : mod simulate_failures;
19 : mod support;
20 :
21 : use std::{
22 : collections::HashMap,
23 : fmt::Debug,
24 : num::NonZeroU32,
25 : ops::Bound,
26 : pin::{pin, Pin},
27 : sync::Arc,
28 : time::SystemTime,
29 : };
30 :
31 : use anyhow::Context;
32 : use camino::{Utf8Path, Utf8PathBuf};
33 :
34 : use bytes::Bytes;
35 : use futures::{stream::Stream, StreamExt};
36 : use itertools::Itertools as _;
37 : use serde::{Deserialize, Serialize};
38 : use tokio::sync::Semaphore;
39 : use tokio_util::sync::CancellationToken;
40 : use tracing::info;
41 :
42 : pub use self::{
43 : azure_blob::AzureBlobStorage, local_fs::LocalFs, s3_bucket::S3Bucket,
44 : simulate_failures::UnreliableWrapper,
45 : };
46 : use s3_bucket::RequestKind;
47 :
48 : pub use crate::config::{AzureConfig, RemoteStorageConfig, RemoteStorageKind, S3Config};
49 :
50 : /// Azure SDK's ETag type is a simple String wrapper: we use this internally instead of repeating it here.
51 : pub use azure_core::Etag;
52 :
53 : pub use error::{DownloadError, TimeTravelError, TimeoutOrCancel};
54 :
55 : /// Default concurrency limit for S3 operations
56 : ///
57 : /// Currently, sync happens with AWS S3, that has two limits on requests per second:
58 : /// ~200 RPS for IAM services
59 : /// <https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html>
60 : /// ~3500 PUT/COPY/POST/DELETE or 5500 GET/HEAD S3 requests
61 : /// <https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/>
62 : pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100;
63 : /// Set this limit analogously to the S3 limit
64 : ///
65 : /// Here, a limit of max 20k concurrent connections was noted.
66 : /// <https://learn.microsoft.com/en-us/answers/questions/1301863/is-there-any-limitation-to-concurrent-connections>
67 : pub const DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT: usize = 100;
68 : /// No limits on the client side, which currenltly means 1000 for AWS S3.
69 : /// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_RequestSyntax>
70 : pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
71 :
72 : /// As defined in S3 docs
73 : ///
74 : /// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjects.html>
75 : pub const MAX_KEYS_PER_DELETE_S3: usize = 1000;
76 :
77 : /// As defined in Azure docs
78 : ///
79 : /// <https://learn.microsoft.com/en-us/rest/api/storageservices/blob-batch>
80 : pub const MAX_KEYS_PER_DELETE_AZURE: usize = 256;
81 :
82 : const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
83 :
84 : /// Path on the remote storage, relative to some inner prefix.
85 : /// The prefix is an implementation detail, that allows representing local paths
86 : /// as the remote ones, stripping the local storage prefix away.
87 : #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
88 : pub struct RemotePath(Utf8PathBuf);
89 :
90 : impl Serialize for RemotePath {
91 0 : fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
92 0 : where
93 0 : S: serde::Serializer,
94 0 : {
95 0 : serializer.collect_str(self)
96 0 : }
97 : }
98 :
99 : impl<'de> Deserialize<'de> for RemotePath {
100 0 : fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
101 0 : where
102 0 : D: serde::Deserializer<'de>,
103 0 : {
104 0 : let str = String::deserialize(deserializer)?;
105 0 : Ok(Self(Utf8PathBuf::from(&str)))
106 0 : }
107 : }
108 :
109 : impl std::fmt::Display for RemotePath {
110 525 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
111 525 : std::fmt::Display::fmt(&self.0, f)
112 525 : }
113 : }
114 :
115 : impl RemotePath {
116 4857 : pub fn new(relative_path: &Utf8Path) -> anyhow::Result<Self> {
117 4857 : anyhow::ensure!(
118 4857 : relative_path.is_relative(),
119 4 : "Path {relative_path:?} is not relative"
120 : );
121 4853 : Ok(Self(relative_path.to_path_buf()))
122 4857 : }
123 :
124 4405 : pub fn from_string(relative_path: &str) -> anyhow::Result<Self> {
125 4405 : Self::new(Utf8Path::new(relative_path))
126 4405 : }
127 :
128 4448 : pub fn with_base(&self, base_path: &Utf8Path) -> Utf8PathBuf {
129 4448 : base_path.join(&self.0)
130 4448 : }
131 :
132 18 : pub fn object_name(&self) -> Option<&str> {
133 18 : self.0.file_name()
134 18 : }
135 :
136 102 : pub fn join(&self, path: impl AsRef<Utf8Path>) -> Self {
137 102 : Self(self.0.join(path))
138 102 : }
139 :
140 1021 : pub fn get_path(&self) -> &Utf8PathBuf {
141 1021 : &self.0
142 1021 : }
143 :
144 67 : pub fn strip_prefix(&self, p: &RemotePath) -> Result<&Utf8Path, std::path::StripPrefixError> {
145 67 : self.0.strip_prefix(&p.0)
146 67 : }
147 :
148 202 : pub fn add_trailing_slash(&self) -> Self {
149 202 : // Unwrap safety inputs are guararnteed to be valid UTF-8
150 202 : Self(format!("{}/", self.0).try_into().unwrap())
151 202 : }
152 : }
153 :
154 : /// We don't need callers to be able to pass arbitrary delimiters: just control
155 : /// whether listings will use a '/' separator or not.
156 : ///
157 : /// The WithDelimiter mode will populate `prefixes` and `keys` in the result. The
158 : /// NoDelimiter mode will only populate `keys`.
159 : #[derive(Copy, Clone)]
160 : pub enum ListingMode {
161 : WithDelimiter,
162 : NoDelimiter,
163 : }
164 :
165 : #[derive(PartialEq, Eq, Debug, Clone)]
166 : pub struct ListingObject {
167 : pub key: RemotePath,
168 : pub last_modified: SystemTime,
169 : pub size: u64,
170 : }
171 :
172 : #[derive(Default)]
173 : pub struct Listing {
174 : pub prefixes: Vec<RemotePath>,
175 : pub keys: Vec<ListingObject>,
176 : }
177 :
178 : /// Options for downloads. The default value is a plain GET.
179 : pub struct DownloadOpts {
180 : /// If given, returns [`DownloadError::Unmodified`] if the object still has
181 : /// the same ETag (using If-None-Match).
182 : pub etag: Option<Etag>,
183 : /// The start of the byte range to download, or unbounded.
184 : pub byte_start: Bound<u64>,
185 : /// The end of the byte range to download, or unbounded. Must be after the
186 : /// start bound.
187 : pub byte_end: Bound<u64>,
188 : /// Indicate whether we're downloading something small or large: this indirectly controls
189 : /// timeouts: for something like an index/manifest/heatmap, we should time out faster than
190 : /// for layer files
191 : pub kind: DownloadKind,
192 : }
193 :
194 : pub enum DownloadKind {
195 : Large,
196 : Small,
197 : }
198 :
199 : impl Default for DownloadOpts {
200 742 : fn default() -> Self {
201 742 : Self {
202 742 : etag: Default::default(),
203 742 : byte_start: Bound::Unbounded,
204 742 : byte_end: Bound::Unbounded,
205 742 : kind: DownloadKind::Large,
206 742 : }
207 742 : }
208 : }
209 :
210 : impl DownloadOpts {
211 : /// Returns the byte range with inclusive start and exclusive end, or None
212 : /// if unbounded.
213 179 : pub fn byte_range(&self) -> Option<(u64, Option<u64>)> {
214 179 : if self.byte_start == Bound::Unbounded && self.byte_end == Bound::Unbounded {
215 83 : return None;
216 96 : }
217 96 : let start = match self.byte_start {
218 18 : Bound::Excluded(i) => i + 1,
219 60 : Bound::Included(i) => i,
220 18 : Bound::Unbounded => 0,
221 : };
222 96 : let end = match self.byte_end {
223 48 : Bound::Excluded(i) => Some(i),
224 27 : Bound::Included(i) => Some(i + 1),
225 21 : Bound::Unbounded => None,
226 : };
227 96 : if let Some(end) = end {
228 75 : assert!(start < end, "range end {end} at or before start {start}");
229 21 : }
230 87 : Some((start, end))
231 170 : }
232 :
233 : /// Returns the byte range as an RFC 2616 Range header value with inclusive
234 : /// bounds, or None if unbounded.
235 65 : pub fn byte_range_header(&self) -> Option<String> {
236 65 : self.byte_range()
237 65 : .map(|(start, end)| (start, end.map(|end| end - 1))) // make end inclusive
238 65 : .map(|(start, end)| match end {
239 30 : Some(end) => format!("bytes={start}-{end}"),
240 10 : None => format!("bytes={start}-"),
241 65 : })
242 65 : }
243 : }
244 :
245 : /// Storage (potentially remote) API to manage its state.
246 : /// This storage tries to be unaware of any layered repository context,
247 : /// providing basic CRUD operations for storage files.
248 : #[allow(async_fn_in_trait)]
249 : pub trait RemoteStorage: Send + Sync + 'static {
250 : /// List objects in remote storage, with semantics matching AWS S3's [`ListObjectsV2`].
251 : ///
252 : /// The stream is guaranteed to return at least one element, even in the case of errors
253 : /// (in that case it's an `Err()`), or an empty `Listing`.
254 : ///
255 : /// The stream is not ending if it returns an error, as long as [`is_permanent`] returns false on the error.
256 : /// The `next` function can be retried, and maybe in a future retry, there will be success.
257 : ///
258 : /// Note that the prefix is relative to any `prefix_in_bucket` configured for the client, not
259 : /// from the absolute root of the bucket.
260 : ///
261 : /// `mode` configures whether to use a delimiter. Without a delimiter, all keys
262 : /// within the prefix are listed in the `keys` of the result. With a delimiter, any "directories" at the top level of
263 : /// the prefix are returned in the `prefixes` of the result, and keys in the top level of the prefix are
264 : /// returned in `keys` ().
265 : ///
266 : /// `max_keys` controls the maximum number of keys that will be returned. If this is None, this function
267 : /// will iteratively call listobjects until it runs out of keys. Note that this is not safe to use on
268 : /// unlimted size buckets, as the full list of objects is allocated into a monolithic data structure.
269 : ///
270 : /// [`ListObjectsV2`]: <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html>
271 : /// [`is_permanent`]: DownloadError::is_permanent
272 : fn list_streaming(
273 : &self,
274 : prefix: Option<&RemotePath>,
275 : mode: ListingMode,
276 : max_keys: Option<NonZeroU32>,
277 : cancel: &CancellationToken,
278 : ) -> impl Stream<Item = Result<Listing, DownloadError>> + Send;
279 :
280 69 : async fn list(
281 69 : &self,
282 69 : prefix: Option<&RemotePath>,
283 69 : mode: ListingMode,
284 69 : max_keys: Option<NonZeroU32>,
285 69 : cancel: &CancellationToken,
286 69 : ) -> Result<Listing, DownloadError> {
287 69 : let mut stream = pin!(self.list_streaming(prefix, mode, max_keys, cancel));
288 69 : let mut combined = stream.next().await.expect("At least one item required")?;
289 117 : while let Some(list) = stream.next().await {
290 48 : let list = list?;
291 48 : combined.keys.extend(list.keys.into_iter());
292 48 : combined.prefixes.extend_from_slice(&list.prefixes);
293 : }
294 69 : Ok(combined)
295 69 : }
296 :
297 : /// Obtain metadata information about an object.
298 : async fn head_object(
299 : &self,
300 : key: &RemotePath,
301 : cancel: &CancellationToken,
302 : ) -> Result<ListingObject, DownloadError>;
303 :
304 : /// Streams the local file contents into remote into the remote storage entry.
305 : ///
306 : /// If the operation fails because of timeout or cancellation, the root cause of the error will be
307 : /// set to `TimeoutOrCancel`.
308 : async fn upload(
309 : &self,
310 : from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
311 : // S3 PUT request requires the content length to be specified,
312 : // otherwise it starts to fail with the concurrent connection count increasing.
313 : data_size_bytes: usize,
314 : to: &RemotePath,
315 : metadata: Option<StorageMetadata>,
316 : cancel: &CancellationToken,
317 : ) -> anyhow::Result<()>;
318 :
319 : /// Streams the remote storage entry contents.
320 : ///
321 : /// The returned download stream will obey initial timeout and cancellation signal by erroring
322 : /// on whichever happens first. Only one of the reasons will fail the stream, which is usually
323 : /// enough for `tokio::io::copy_buf` usage. If needed the error can be filtered out.
324 : ///
325 : /// Returns the metadata, if any was stored with the file previously.
326 : async fn download(
327 : &self,
328 : from: &RemotePath,
329 : opts: &DownloadOpts,
330 : cancel: &CancellationToken,
331 : ) -> Result<Download, DownloadError>;
332 :
333 : /// Delete a single path from remote storage.
334 : ///
335 : /// If the operation fails because of timeout or cancellation, the root cause of the error will be
336 : /// set to `TimeoutOrCancel`. In such situation it is unknown if the deletion went through.
337 : async fn delete(&self, path: &RemotePath, cancel: &CancellationToken) -> anyhow::Result<()>;
338 :
339 : /// Delete a multiple paths from remote storage.
340 : ///
341 : /// If the operation fails because of timeout or cancellation, the root cause of the error will be
342 : /// set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
343 : /// through.
344 : async fn delete_objects(
345 : &self,
346 : paths: &[RemotePath],
347 : cancel: &CancellationToken,
348 : ) -> anyhow::Result<()>;
349 :
350 : /// Returns the maximum number of keys that a call to [`Self::delete_objects`] can delete without chunking
351 : ///
352 : /// The value returned is only an optimization hint, One can pass larger number of objects to
353 : /// `delete_objects` as well.
354 : ///
355 : /// The value is guaranteed to be >= 1.
356 : fn max_keys_per_delete(&self) -> usize;
357 :
358 : /// Deletes all objects matching the given prefix.
359 : ///
360 : /// NB: this uses NoDelimiter and will match partial prefixes. For example, the prefix /a/b will
361 : /// delete /a/b, /a/b/*, /a/bc, /a/bc/*, etc.
362 : ///
363 : /// If the operation fails because of timeout or cancellation, the root cause of the error will
364 : /// be set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
365 : /// through.
366 18 : async fn delete_prefix(
367 18 : &self,
368 18 : prefix: &RemotePath,
369 18 : cancel: &CancellationToken,
370 18 : ) -> anyhow::Result<()> {
371 18 : let mut stream =
372 18 : pin!(self.list_streaming(Some(prefix), ListingMode::NoDelimiter, None, cancel));
373 39 : while let Some(result) = stream.next().await {
374 21 : let keys = match result {
375 21 : Ok(listing) if listing.keys.is_empty() => continue,
376 63 : Ok(listing) => listing.keys.into_iter().map(|o| o.key).collect_vec(),
377 0 : Err(DownloadError::Cancelled) => return Err(TimeoutOrCancel::Cancel.into()),
378 0 : Err(DownloadError::Timeout) => return Err(TimeoutOrCancel::Timeout.into()),
379 0 : Err(err) => return Err(err.into()),
380 : };
381 12 : tracing::info!("Deleting {} keys from remote storage", keys.len());
382 12 : self.delete_objects(&keys, cancel).await?;
383 : }
384 18 : Ok(())
385 18 : }
386 :
387 : /// Copy a remote object inside a bucket from one path to another.
388 : async fn copy(
389 : &self,
390 : from: &RemotePath,
391 : to: &RemotePath,
392 : cancel: &CancellationToken,
393 : ) -> anyhow::Result<()>;
394 :
395 : /// Resets the content of everything with the given prefix to the given state
396 : async fn time_travel_recover(
397 : &self,
398 : prefix: Option<&RemotePath>,
399 : timestamp: SystemTime,
400 : done_if_after: SystemTime,
401 : cancel: &CancellationToken,
402 : ) -> Result<(), TimeTravelError>;
403 : }
404 :
405 : /// Data part of an ongoing [`Download`].
406 : ///
407 : /// `DownloadStream` is sensitive to the timeout and cancellation used with the original
408 : /// [`RemoteStorage::download`] request. The type yields `std::io::Result<Bytes>` to be compatible
409 : /// with `tokio::io::copy_buf`.
410 : // This has 'static because safekeepers do not use cancellation tokens (yet)
411 : pub type DownloadStream =
412 : Pin<Box<dyn Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static>>;
413 :
414 : pub struct Download {
415 : pub download_stream: DownloadStream,
416 : /// The last time the file was modified (`last-modified` HTTP header)
417 : pub last_modified: SystemTime,
418 : /// A way to identify this specific version of the resource (`etag` HTTP header)
419 : pub etag: Etag,
420 : /// Extra key-value data, associated with the current remote file.
421 : pub metadata: Option<StorageMetadata>,
422 : }
423 :
424 : impl Debug for Download {
425 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
426 0 : f.debug_struct("Download")
427 0 : .field("metadata", &self.metadata)
428 0 : .finish()
429 0 : }
430 : }
431 :
432 : /// Every storage, currently supported.
433 : /// Serves as a simple way to pass around the [`RemoteStorage`] without dealing with generics.
434 : // Require Clone for `Other` due to https://github.com/rust-lang/rust/issues/26925
435 : #[derive(Clone)]
436 : pub enum GenericRemoteStorage<Other: Clone = Arc<UnreliableWrapper>> {
437 : LocalFs(LocalFs),
438 : AwsS3(Arc<S3Bucket>),
439 : AzureBlob(Arc<AzureBlobStorage>),
440 : Unreliable(Other),
441 : }
442 :
443 : impl<Other: RemoteStorage> GenericRemoteStorage<Arc<Other>> {
444 : // See [`RemoteStorage::list`].
445 467 : pub async fn list(
446 467 : &self,
447 467 : prefix: Option<&RemotePath>,
448 467 : mode: ListingMode,
449 467 : max_keys: Option<NonZeroU32>,
450 467 : cancel: &CancellationToken,
451 467 : ) -> Result<Listing, DownloadError> {
452 467 : match self {
453 398 : Self::LocalFs(s) => s.list(prefix, mode, max_keys, cancel).await,
454 50 : Self::AwsS3(s) => s.list(prefix, mode, max_keys, cancel).await,
455 19 : Self::AzureBlob(s) => s.list(prefix, mode, max_keys, cancel).await,
456 0 : Self::Unreliable(s) => s.list(prefix, mode, max_keys, cancel).await,
457 : }
458 467 : }
459 :
460 : // See [`RemoteStorage::list_streaming`].
461 3 : pub fn list_streaming<'a>(
462 3 : &'a self,
463 3 : prefix: Option<&'a RemotePath>,
464 3 : mode: ListingMode,
465 3 : max_keys: Option<NonZeroU32>,
466 3 : cancel: &'a CancellationToken,
467 3 : ) -> impl Stream<Item = Result<Listing, DownloadError>> + 'a + Send {
468 3 : match self {
469 0 : Self::LocalFs(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel))
470 0 : as Pin<Box<dyn Stream<Item = Result<Listing, DownloadError>> + Send>>,
471 2 : Self::AwsS3(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel)),
472 1 : Self::AzureBlob(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel)),
473 0 : Self::Unreliable(s) => Box::pin(s.list_streaming(prefix, mode, max_keys, cancel)),
474 : }
475 3 : }
476 :
477 : // See [`RemoteStorage::head_object`].
478 9 : pub async fn head_object(
479 9 : &self,
480 9 : key: &RemotePath,
481 9 : cancel: &CancellationToken,
482 9 : ) -> Result<ListingObject, DownloadError> {
483 9 : match self {
484 0 : Self::LocalFs(s) => s.head_object(key, cancel).await,
485 6 : Self::AwsS3(s) => s.head_object(key, cancel).await,
486 3 : Self::AzureBlob(s) => s.head_object(key, cancel).await,
487 0 : Self::Unreliable(s) => s.head_object(key, cancel).await,
488 : }
489 9 : }
490 :
491 : /// See [`RemoteStorage::upload`]
492 3159 : pub async fn upload(
493 3159 : &self,
494 3159 : from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
495 3159 : data_size_bytes: usize,
496 3159 : to: &RemotePath,
497 3159 : metadata: Option<StorageMetadata>,
498 3159 : cancel: &CancellationToken,
499 3159 : ) -> anyhow::Result<()> {
500 3159 : match self {
501 2843 : Self::LocalFs(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
502 199 : Self::AwsS3(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
503 93 : Self::AzureBlob(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
504 24 : Self::Unreliable(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
505 : }
506 3062 : }
507 :
508 : /// See [`RemoteStorage::download`]
509 673 : pub async fn download(
510 673 : &self,
511 673 : from: &RemotePath,
512 673 : opts: &DownloadOpts,
513 673 : cancel: &CancellationToken,
514 673 : ) -> Result<Download, DownloadError> {
515 673 : match self {
516 630 : Self::LocalFs(s) => s.download(from, opts, cancel).await,
517 32 : Self::AwsS3(s) => s.download(from, opts, cancel).await,
518 11 : Self::AzureBlob(s) => s.download(from, opts, cancel).await,
519 0 : Self::Unreliable(s) => s.download(from, opts, cancel).await,
520 : }
521 673 : }
522 :
523 : /// See [`RemoteStorage::delete`]
524 511 : pub async fn delete(
525 511 : &self,
526 511 : path: &RemotePath,
527 511 : cancel: &CancellationToken,
528 511 : ) -> anyhow::Result<()> {
529 511 : match self {
530 251 : Self::LocalFs(s) => s.delete(path, cancel).await,
531 174 : Self::AwsS3(s) => s.delete(path, cancel).await,
532 86 : Self::AzureBlob(s) => s.delete(path, cancel).await,
533 0 : Self::Unreliable(s) => s.delete(path, cancel).await,
534 : }
535 511 : }
536 :
537 : /// See [`RemoteStorage::delete_objects`]
538 21 : pub async fn delete_objects(
539 21 : &self,
540 21 : paths: &[RemotePath],
541 21 : cancel: &CancellationToken,
542 21 : ) -> anyhow::Result<()> {
543 21 : match self {
544 6 : Self::LocalFs(s) => s.delete_objects(paths, cancel).await,
545 12 : Self::AwsS3(s) => s.delete_objects(paths, cancel).await,
546 3 : Self::AzureBlob(s) => s.delete_objects(paths, cancel).await,
547 0 : Self::Unreliable(s) => s.delete_objects(paths, cancel).await,
548 : }
549 21 : }
550 :
551 : /// [`RemoteStorage::max_keys_per_delete`]
552 8 : pub fn max_keys_per_delete(&self) -> usize {
553 8 : match self {
554 8 : Self::LocalFs(s) => s.max_keys_per_delete(),
555 0 : Self::AwsS3(s) => s.max_keys_per_delete(),
556 0 : Self::AzureBlob(s) => s.max_keys_per_delete(),
557 0 : Self::Unreliable(s) => s.max_keys_per_delete(),
558 : }
559 8 : }
560 :
561 : /// See [`RemoteStorage::delete_prefix`]
562 18 : pub async fn delete_prefix(
563 18 : &self,
564 18 : prefix: &RemotePath,
565 18 : cancel: &CancellationToken,
566 18 : ) -> anyhow::Result<()> {
567 18 : match self {
568 0 : Self::LocalFs(s) => s.delete_prefix(prefix, cancel).await,
569 12 : Self::AwsS3(s) => s.delete_prefix(prefix, cancel).await,
570 6 : Self::AzureBlob(s) => s.delete_prefix(prefix, cancel).await,
571 0 : Self::Unreliable(s) => s.delete_prefix(prefix, cancel).await,
572 : }
573 18 : }
574 :
575 : /// See [`RemoteStorage::copy`]
576 3 : pub async fn copy_object(
577 3 : &self,
578 3 : from: &RemotePath,
579 3 : to: &RemotePath,
580 3 : cancel: &CancellationToken,
581 3 : ) -> anyhow::Result<()> {
582 3 : match self {
583 0 : Self::LocalFs(s) => s.copy(from, to, cancel).await,
584 2 : Self::AwsS3(s) => s.copy(from, to, cancel).await,
585 1 : Self::AzureBlob(s) => s.copy(from, to, cancel).await,
586 0 : Self::Unreliable(s) => s.copy(from, to, cancel).await,
587 : }
588 3 : }
589 :
590 : /// See [`RemoteStorage::time_travel_recover`].
591 6 : pub async fn time_travel_recover(
592 6 : &self,
593 6 : prefix: Option<&RemotePath>,
594 6 : timestamp: SystemTime,
595 6 : done_if_after: SystemTime,
596 6 : cancel: &CancellationToken,
597 6 : ) -> Result<(), TimeTravelError> {
598 6 : match self {
599 0 : Self::LocalFs(s) => {
600 0 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
601 0 : .await
602 : }
603 6 : Self::AwsS3(s) => {
604 6 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
605 6 : .await
606 : }
607 0 : Self::AzureBlob(s) => {
608 0 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
609 0 : .await
610 : }
611 0 : Self::Unreliable(s) => {
612 0 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
613 0 : .await
614 : }
615 : }
616 6 : }
617 : }
618 :
619 : impl GenericRemoteStorage {
620 243 : pub async fn from_config(storage_config: &RemoteStorageConfig) -> anyhow::Result<Self> {
621 243 : let timeout = storage_config.timeout;
622 243 :
623 243 : // If somkeone overrides timeout to be small without adjusting small_timeout, then adjust it automatically
624 243 : let small_timeout = std::cmp::min(storage_config.small_timeout, timeout);
625 243 :
626 243 : Ok(match &storage_config.storage {
627 207 : RemoteStorageKind::LocalFs { local_path: path } => {
628 207 : info!("Using fs root '{path}' as a remote storage");
629 207 : Self::LocalFs(LocalFs::new(path.clone(), timeout)?)
630 : }
631 26 : RemoteStorageKind::AwsS3(s3_config) => {
632 26 : // The profile and access key id are only printed here for debugging purposes,
633 26 : // their values don't indicate the eventually taken choice for auth.
634 26 : let profile = std::env::var("AWS_PROFILE").unwrap_or_else(|_| "<none>".into());
635 26 : let access_key_id =
636 26 : std::env::var("AWS_ACCESS_KEY_ID").unwrap_or_else(|_| "<none>".into());
637 26 : info!("Using s3 bucket '{}' in region '{}' as a remote storage, prefix in bucket: '{:?}', bucket endpoint: '{:?}', profile: {profile}, access_key_id: {access_key_id}",
638 : s3_config.bucket_name, s3_config.bucket_region, s3_config.prefix_in_bucket, s3_config.endpoint);
639 26 : Self::AwsS3(Arc::new(S3Bucket::new(s3_config, timeout).await?))
640 : }
641 10 : RemoteStorageKind::AzureContainer(azure_config) => {
642 10 : let storage_account = azure_config
643 10 : .storage_account
644 10 : .as_deref()
645 10 : .unwrap_or("<AZURE_STORAGE_ACCOUNT>");
646 10 : info!("Using azure container '{}' in account '{storage_account}' in region '{}' as a remote storage, prefix in container: '{:?}'",
647 : azure_config.container_name, azure_config.container_region, azure_config.prefix_in_container);
648 10 : Self::AzureBlob(Arc::new(AzureBlobStorage::new(
649 10 : azure_config,
650 10 : timeout,
651 10 : small_timeout,
652 10 : )?))
653 : }
654 : })
655 243 : }
656 :
657 2 : pub fn unreliable_wrapper(s: Self, fail_first: u64) -> Self {
658 2 : Self::Unreliable(Arc::new(UnreliableWrapper::new(s, fail_first)))
659 2 : }
660 :
661 : /// See [`RemoteStorage::upload`], which this method calls with `None` as metadata.
662 1462 : pub async fn upload_storage_object(
663 1462 : &self,
664 1462 : from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
665 1462 : from_size_bytes: usize,
666 1462 : to: &RemotePath,
667 1462 : cancel: &CancellationToken,
668 1462 : ) -> anyhow::Result<()> {
669 1462 : self.upload(from, from_size_bytes, to, None, cancel)
670 1462 : .await
671 1458 : .with_context(|| {
672 0 : format!("Failed to upload data of length {from_size_bytes} to storage path {to:?}")
673 1458 : })
674 1458 : }
675 :
676 : /// The name of the bucket/container/etc.
677 0 : pub fn bucket_name(&self) -> Option<&str> {
678 0 : match self {
679 0 : Self::LocalFs(_s) => None,
680 0 : Self::AwsS3(s) => Some(s.bucket_name()),
681 0 : Self::AzureBlob(s) => Some(s.container_name()),
682 0 : Self::Unreliable(_s) => None,
683 : }
684 0 : }
685 : }
686 :
687 : /// Extra set of key-value pairs that contain arbitrary metadata about the storage entry.
688 : /// Immutable, cannot be changed once the file is created.
689 : #[derive(Debug, Clone, PartialEq, Eq)]
690 : pub struct StorageMetadata(HashMap<String, String>);
691 :
692 : impl<const N: usize> From<[(&str, &str); N]> for StorageMetadata {
693 0 : fn from(arr: [(&str, &str); N]) -> Self {
694 0 : let map: HashMap<String, String> = arr
695 0 : .iter()
696 0 : .map(|(k, v)| (k.to_string(), v.to_string()))
697 0 : .collect();
698 0 : Self(map)
699 0 : }
700 : }
701 :
702 : struct ConcurrencyLimiter {
703 : // Every request to S3 can be throttled or cancelled, if a certain number of requests per second is exceeded.
704 : // Same goes to IAM, which is queried before every S3 request, if enabled. IAM has even lower RPS threshold.
705 : // The helps to ensure we don't exceed the thresholds.
706 : write: Arc<Semaphore>,
707 : read: Arc<Semaphore>,
708 : }
709 :
710 : impl ConcurrencyLimiter {
711 730 : fn for_kind(&self, kind: RequestKind) -> &Arc<Semaphore> {
712 730 : match kind {
713 43 : RequestKind::Get => &self.read,
714 292 : RequestKind::Put => &self.write,
715 90 : RequestKind::List => &self.read,
716 287 : RequestKind::Delete => &self.write,
717 3 : RequestKind::Copy => &self.write,
718 6 : RequestKind::TimeTravel => &self.write,
719 9 : RequestKind::Head => &self.read,
720 : }
721 730 : }
722 :
723 698 : async fn acquire(
724 698 : &self,
725 698 : kind: RequestKind,
726 698 : ) -> Result<tokio::sync::SemaphorePermit<'_>, tokio::sync::AcquireError> {
727 698 : self.for_kind(kind).acquire().await
728 698 : }
729 :
730 32 : async fn acquire_owned(
731 32 : &self,
732 32 : kind: RequestKind,
733 32 : ) -> Result<tokio::sync::OwnedSemaphorePermit, tokio::sync::AcquireError> {
734 32 : Arc::clone(self.for_kind(kind)).acquire_owned().await
735 32 : }
736 :
737 51 : fn new(limit: usize) -> ConcurrencyLimiter {
738 51 : Self {
739 51 : read: Arc::new(Semaphore::new(limit)),
740 51 : write: Arc::new(Semaphore::new(limit)),
741 51 : }
742 51 : }
743 : }
744 :
745 : #[cfg(test)]
746 : mod tests {
747 : use super::*;
748 :
749 : /// DownloadOpts::byte_range() should generate (inclusive, exclusive) ranges
750 : /// with optional end bound, or None when unbounded.
751 : #[test]
752 3 : fn download_opts_byte_range() {
753 3 : // Consider using test_case or a similar table-driven test framework.
754 3 : let cases = [
755 3 : // (byte_start, byte_end, expected)
756 3 : (Bound::Unbounded, Bound::Unbounded, None),
757 3 : (Bound::Unbounded, Bound::Included(7), Some((0, Some(8)))),
758 3 : (Bound::Unbounded, Bound::Excluded(7), Some((0, Some(7)))),
759 3 : (Bound::Included(3), Bound::Unbounded, Some((3, None))),
760 3 : (Bound::Included(3), Bound::Included(7), Some((3, Some(8)))),
761 3 : (Bound::Included(3), Bound::Excluded(7), Some((3, Some(7)))),
762 3 : (Bound::Excluded(3), Bound::Unbounded, Some((4, None))),
763 3 : (Bound::Excluded(3), Bound::Included(7), Some((4, Some(8)))),
764 3 : (Bound::Excluded(3), Bound::Excluded(7), Some((4, Some(7)))),
765 3 : // 1-sized ranges are fine, 0 aren't and will panic (separate test).
766 3 : (Bound::Included(3), Bound::Included(3), Some((3, Some(4)))),
767 3 : (Bound::Included(3), Bound::Excluded(4), Some((3, Some(4)))),
768 3 : ];
769 :
770 36 : for (byte_start, byte_end, expect) in cases {
771 33 : let opts = DownloadOpts {
772 33 : byte_start,
773 33 : byte_end,
774 33 : ..Default::default()
775 33 : };
776 33 : let result = opts.byte_range();
777 33 : assert_eq!(
778 : result, expect,
779 0 : "byte_start={byte_start:?} byte_end={byte_end:?}"
780 : );
781 :
782 : // Check generated HTTP header, which uses an inclusive range.
783 33 : let expect_header = expect.map(|(start, end)| match end {
784 24 : Some(end) => format!("bytes={start}-{}", end - 1), // inclusive end
785 6 : None => format!("bytes={start}-"),
786 33 : });
787 33 : assert_eq!(
788 33 : opts.byte_range_header(),
789 : expect_header,
790 0 : "byte_start={byte_start:?} byte_end={byte_end:?}"
791 : );
792 : }
793 3 : }
794 :
795 : /// DownloadOpts::byte_range() zero-sized byte range should panic.
796 : #[test]
797 : #[should_panic]
798 3 : fn download_opts_byte_range_zero() {
799 3 : DownloadOpts {
800 3 : byte_start: Bound::Included(3),
801 3 : byte_end: Bound::Excluded(3),
802 3 : ..Default::default()
803 3 : }
804 3 : .byte_range();
805 3 : }
806 :
807 : /// DownloadOpts::byte_range() negative byte range should panic.
808 : #[test]
809 : #[should_panic]
810 3 : fn download_opts_byte_range_negative() {
811 3 : DownloadOpts {
812 3 : byte_start: Bound::Included(3),
813 3 : byte_end: Bound::Included(2),
814 3 : ..Default::default()
815 3 : }
816 3 : .byte_range();
817 3 : }
818 :
819 : #[test]
820 3 : fn test_object_name() {
821 3 : let k = RemotePath::new(Utf8Path::new("a/b/c")).unwrap();
822 3 : assert_eq!(k.object_name(), Some("c"));
823 :
824 3 : let k = RemotePath::new(Utf8Path::new("a/b/c/")).unwrap();
825 3 : assert_eq!(k.object_name(), Some("c"));
826 :
827 3 : let k = RemotePath::new(Utf8Path::new("a/")).unwrap();
828 3 : assert_eq!(k.object_name(), Some("a"));
829 :
830 : // XXX is it impossible to have an empty key?
831 3 : let k = RemotePath::new(Utf8Path::new("")).unwrap();
832 3 : assert_eq!(k.object_name(), None);
833 3 : }
834 :
835 : #[test]
836 3 : fn rempte_path_cannot_be_created_from_absolute_ones() {
837 3 : let err = RemotePath::new(Utf8Path::new("/")).expect_err("Should fail on absolute paths");
838 3 : assert_eq!(err.to_string(), "Path \"/\" is not relative");
839 3 : }
840 : }
|