Line data Source code
1 : //! A set of generic storage abstractions for the page server to use when backing up and restoring its state from the external storage.
2 : //! No other modules from this tree are supposed to be used directly by the external code.
3 : //!
4 : //! [`RemoteStorage`] trait a CRUD-like generic abstraction to use for adapting external storages with a few implementations:
5 : //! * [`local_fs`] allows to use local file system as an external storage
6 : //! * [`s3_bucket`] uses AWS S3 bucket as an external storage
7 : //! * [`azure_blob`] allows to use Azure Blob storage as an external storage
8 : //!
9 : #![deny(unsafe_code)]
10 : #![deny(clippy::undocumented_unsafe_blocks)]
11 :
12 : mod azure_blob;
13 : mod config;
14 : mod error;
15 : mod local_fs;
16 : mod metrics;
17 : mod s3_bucket;
18 : mod simulate_failures;
19 : mod support;
20 :
21 : use std::{
22 : collections::HashMap, fmt::Debug, num::NonZeroU32, pin::Pin, sync::Arc, time::SystemTime,
23 : };
24 :
25 : use anyhow::Context;
26 : use camino::{Utf8Path, Utf8PathBuf};
27 :
28 : use bytes::Bytes;
29 : use futures::stream::Stream;
30 : use serde::{Deserialize, Serialize};
31 : use tokio::sync::Semaphore;
32 : use tokio_util::sync::CancellationToken;
33 : use tracing::info;
34 :
35 : pub use self::{
36 : azure_blob::AzureBlobStorage, local_fs::LocalFs, s3_bucket::S3Bucket,
37 : simulate_failures::UnreliableWrapper,
38 : };
39 : use s3_bucket::RequestKind;
40 :
41 : pub use crate::config::{AzureConfig, RemoteStorageConfig, RemoteStorageKind, S3Config};
42 :
43 : /// Azure SDK's ETag type is a simple String wrapper: we use this internally instead of repeating it here.
44 : pub use azure_core::Etag;
45 :
46 : pub use error::{DownloadError, TimeTravelError, TimeoutOrCancel};
47 :
48 : /// Currently, sync happens with AWS S3, that has two limits on requests per second:
49 : /// ~200 RPS for IAM services
50 : /// <https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/UsingWithRDS.IAMDBAuth.html>
51 : /// ~3500 PUT/COPY/POST/DELETE or 5500 GET/HEAD S3 requests
52 : /// <https://aws.amazon.com/premiumsupport/knowledge-center/s3-request-limit-avoid-throttling/>
53 : pub const DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT: usize = 100;
54 : /// Set this limit analogously to the S3 limit
55 : ///
56 : /// Here, a limit of max 20k concurrent connections was noted.
57 : /// <https://learn.microsoft.com/en-us/answers/questions/1301863/is-there-any-limitation-to-concurrent-connections>
58 : pub const DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT: usize = 100;
59 : /// No limits on the client side, which currenltly means 1000 for AWS S3.
60 : /// <https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html#API_ListObjectsV2_RequestSyntax>
61 : pub const DEFAULT_MAX_KEYS_PER_LIST_RESPONSE: Option<i32> = None;
62 :
63 : /// As defined in S3 docs
64 : pub const MAX_KEYS_PER_DELETE: usize = 1000;
65 :
66 : const REMOTE_STORAGE_PREFIX_SEPARATOR: char = '/';
67 :
68 : /// Path on the remote storage, relative to some inner prefix.
69 : /// The prefix is an implementation detail, that allows representing local paths
70 : /// as the remote ones, stripping the local storage prefix away.
71 : #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
72 : pub struct RemotePath(Utf8PathBuf);
73 :
74 : impl Serialize for RemotePath {
75 0 : fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
76 0 : where
77 0 : S: serde::Serializer,
78 0 : {
79 0 : serializer.collect_str(self)
80 0 : }
81 : }
82 :
83 : impl<'de> Deserialize<'de> for RemotePath {
84 0 : fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
85 0 : where
86 0 : D: serde::Deserializer<'de>,
87 0 : {
88 0 : let str = String::deserialize(deserializer)?;
89 0 : Ok(Self(Utf8PathBuf::from(&str)))
90 0 : }
91 : }
92 :
93 : impl std::fmt::Display for RemotePath {
94 255 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
95 255 : std::fmt::Display::fmt(&self.0, f)
96 255 : }
97 : }
98 :
99 : impl RemotePath {
100 3489 : pub fn new(relative_path: &Utf8Path) -> anyhow::Result<Self> {
101 3489 : anyhow::ensure!(
102 3489 : relative_path.is_relative(),
103 6 : "Path {relative_path:?} is not relative"
104 : );
105 3483 : Ok(Self(relative_path.to_path_buf()))
106 3489 : }
107 :
108 3131 : pub fn from_string(relative_path: &str) -> anyhow::Result<Self> {
109 3131 : Self::new(Utf8Path::new(relative_path))
110 3131 : }
111 :
112 3358 : pub fn with_base(&self, base_path: &Utf8Path) -> Utf8PathBuf {
113 3358 : base_path.join(&self.0)
114 3358 : }
115 :
116 22 : pub fn object_name(&self) -> Option<&str> {
117 22 : self.0.file_name()
118 22 : }
119 :
120 99 : pub fn join(&self, path: impl AsRef<Utf8Path>) -> Self {
121 99 : Self(self.0.join(path))
122 99 : }
123 :
124 523 : pub fn get_path(&self) -> &Utf8PathBuf {
125 523 : &self.0
126 523 : }
127 :
128 0 : pub fn extension(&self) -> Option<&str> {
129 0 : self.0.extension()
130 0 : }
131 :
132 78 : pub fn strip_prefix(&self, p: &RemotePath) -> Result<&Utf8Path, std::path::StripPrefixError> {
133 78 : self.0.strip_prefix(&p.0)
134 78 : }
135 :
136 164 : pub fn add_trailing_slash(&self) -> Self {
137 164 : // Unwrap safety inputs are guararnteed to be valid UTF-8
138 164 : Self(format!("{}/", self.0).try_into().unwrap())
139 164 : }
140 : }
141 :
142 : /// We don't need callers to be able to pass arbitrary delimiters: just control
143 : /// whether listings will use a '/' separator or not.
144 : ///
145 : /// The WithDelimiter mode will populate `prefixes` and `keys` in the result. The
146 : /// NoDelimiter mode will only populate `keys`.
147 : pub enum ListingMode {
148 : WithDelimiter,
149 : NoDelimiter,
150 : }
151 :
152 : #[derive(Default)]
153 : pub struct Listing {
154 : pub prefixes: Vec<RemotePath>,
155 : pub keys: Vec<RemotePath>,
156 : }
157 :
158 : /// Storage (potentially remote) API to manage its state.
159 : /// This storage tries to be unaware of any layered repository context,
160 : /// providing basic CRUD operations for storage files.
161 : #[allow(async_fn_in_trait)]
162 : pub trait RemoteStorage: Send + Sync + 'static {
163 : /// List objects in remote storage, with semantics matching AWS S3's ListObjectsV2.
164 : /// (see `<https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html>`)
165 : ///
166 : /// Note that the prefix is relative to any `prefix_in_bucket` configured for the client, not
167 : /// from the absolute root of the bucket.
168 : ///
169 : /// `mode` configures whether to use a delimiter. Without a delimiter all keys
170 : /// within the prefix are listed in the `keys` of the result. With a delimiter, any "directories" at the top level of
171 : /// the prefix are returned in the `prefixes` of the result, and keys in the top level of the prefix are
172 : /// returned in `keys` ().
173 : ///
174 : /// `max_keys` controls the maximum number of keys that will be returned. If this is None, this function
175 : /// will iteratively call listobjects until it runs out of keys. Note that this is not safe to use on
176 : /// unlimted size buckets, as the full list of objects is allocated into a monolithic data structure.
177 : ///
178 : async fn list(
179 : &self,
180 : prefix: Option<&RemotePath>,
181 : _mode: ListingMode,
182 : max_keys: Option<NonZeroU32>,
183 : cancel: &CancellationToken,
184 : ) -> Result<Listing, DownloadError>;
185 :
186 : /// Streams the local file contents into remote into the remote storage entry.
187 : ///
188 : /// If the operation fails because of timeout or cancellation, the root cause of the error will be
189 : /// set to `TimeoutOrCancel`.
190 : async fn upload(
191 : &self,
192 : from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
193 : // S3 PUT request requires the content length to be specified,
194 : // otherwise it starts to fail with the concurrent connection count increasing.
195 : data_size_bytes: usize,
196 : to: &RemotePath,
197 : metadata: Option<StorageMetadata>,
198 : cancel: &CancellationToken,
199 : ) -> anyhow::Result<()>;
200 :
201 : /// Streams the remote storage entry contents.
202 : ///
203 : /// The returned download stream will obey initial timeout and cancellation signal by erroring
204 : /// on whichever happens first. Only one of the reasons will fail the stream, which is usually
205 : /// enough for `tokio::io::copy_buf` usage. If needed the error can be filtered out.
206 : ///
207 : /// Returns the metadata, if any was stored with the file previously.
208 : async fn download(
209 : &self,
210 : from: &RemotePath,
211 : cancel: &CancellationToken,
212 : ) -> Result<Download, DownloadError>;
213 :
214 : /// Streams a given byte range of the remote storage entry contents.
215 : ///
216 : /// The returned download stream will obey initial timeout and cancellation signal by erroring
217 : /// on whichever happens first. Only one of the reasons will fail the stream, which is usually
218 : /// enough for `tokio::io::copy_buf` usage. If needed the error can be filtered out.
219 : ///
220 : /// Returns the metadata, if any was stored with the file previously.
221 : async fn download_byte_range(
222 : &self,
223 : from: &RemotePath,
224 : start_inclusive: u64,
225 : end_exclusive: Option<u64>,
226 : cancel: &CancellationToken,
227 : ) -> Result<Download, DownloadError>;
228 :
229 : /// Delete a single path from remote storage.
230 : ///
231 : /// If the operation fails because of timeout or cancellation, the root cause of the error will be
232 : /// set to `TimeoutOrCancel`. In such situation it is unknown if the deletion went through.
233 : async fn delete(&self, path: &RemotePath, cancel: &CancellationToken) -> anyhow::Result<()>;
234 :
235 : /// Delete a multiple paths from remote storage.
236 : ///
237 : /// If the operation fails because of timeout or cancellation, the root cause of the error will be
238 : /// set to `TimeoutOrCancel`. In such situation it is unknown which deletions, if any, went
239 : /// through.
240 : async fn delete_objects<'a>(
241 : &self,
242 : paths: &'a [RemotePath],
243 : cancel: &CancellationToken,
244 : ) -> anyhow::Result<()>;
245 :
246 : /// Copy a remote object inside a bucket from one path to another.
247 : async fn copy(
248 : &self,
249 : from: &RemotePath,
250 : to: &RemotePath,
251 : cancel: &CancellationToken,
252 : ) -> anyhow::Result<()>;
253 :
254 : /// Resets the content of everything with the given prefix to the given state
255 : async fn time_travel_recover(
256 : &self,
257 : prefix: Option<&RemotePath>,
258 : timestamp: SystemTime,
259 : done_if_after: SystemTime,
260 : cancel: &CancellationToken,
261 : ) -> Result<(), TimeTravelError>;
262 : }
263 :
264 : /// DownloadStream is sensitive to the timeout and cancellation used with the original
265 : /// [`RemoteStorage::download`] request. The type yields `std::io::Result<Bytes>` to be compatible
266 : /// with `tokio::io::copy_buf`.
267 : // This has 'static because safekeepers do not use cancellation tokens (yet)
268 : pub type DownloadStream =
269 : Pin<Box<dyn Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static>>;
270 :
271 : pub struct Download {
272 : pub download_stream: DownloadStream,
273 : /// The last time the file was modified (`last-modified` HTTP header)
274 : pub last_modified: SystemTime,
275 : /// A way to identify this specific version of the resource (`etag` HTTP header)
276 : pub etag: Etag,
277 : /// Extra key-value data, associated with the current remote file.
278 : pub metadata: Option<StorageMetadata>,
279 : }
280 :
281 : impl Debug for Download {
282 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
283 0 : f.debug_struct("Download")
284 0 : .field("metadata", &self.metadata)
285 0 : .finish()
286 0 : }
287 : }
288 :
289 : /// Every storage, currently supported.
290 : /// Serves as a simple way to pass around the [`RemoteStorage`] without dealing with generics.
291 : #[derive(Clone)]
292 : // Require Clone for `Other` due to https://github.com/rust-lang/rust/issues/26925
293 : pub enum GenericRemoteStorage<Other: Clone = Arc<UnreliableWrapper>> {
294 : LocalFs(LocalFs),
295 : AwsS3(Arc<S3Bucket>),
296 : AzureBlob(Arc<AzureBlobStorage>),
297 : Unreliable(Other),
298 : }
299 :
300 : impl<Other: RemoteStorage> GenericRemoteStorage<Arc<Other>> {
301 197 : pub async fn list(
302 197 : &self,
303 197 : prefix: Option<&RemotePath>,
304 197 : mode: ListingMode,
305 197 : max_keys: Option<NonZeroU32>,
306 197 : cancel: &CancellationToken,
307 197 : ) -> anyhow::Result<Listing, DownloadError> {
308 197 : match self {
309 623 : Self::LocalFs(s) => s.list(prefix, mode, max_keys, cancel).await,
310 120 : Self::AwsS3(s) => s.list(prefix, mode, max_keys, cancel).await,
311 60 : Self::AzureBlob(s) => s.list(prefix, mode, max_keys, cancel).await,
312 0 : Self::Unreliable(s) => s.list(prefix, mode, max_keys, cancel).await,
313 : }
314 197 : }
315 :
316 : /// See [`RemoteStorage::upload`]
317 2942 : pub async fn upload(
318 2942 : &self,
319 2942 : from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
320 2942 : data_size_bytes: usize,
321 2942 : to: &RemotePath,
322 2942 : metadata: Option<StorageMetadata>,
323 2942 : cancel: &CancellationToken,
324 2942 : ) -> anyhow::Result<()> {
325 2942 : match self {
326 31033 : Self::LocalFs(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
327 572 : Self::AwsS3(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
328 269 : Self::AzureBlob(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
329 76 : Self::Unreliable(s) => s.upload(from, data_size_bytes, to, metadata, cancel).await,
330 : }
331 2908 : }
332 :
333 59 : pub async fn download(
334 59 : &self,
335 59 : from: &RemotePath,
336 59 : cancel: &CancellationToken,
337 59 : ) -> Result<Download, DownloadError> {
338 59 : match self {
339 67 : Self::LocalFs(s) => s.download(from, cancel).await,
340 21 : Self::AwsS3(s) => s.download(from, cancel).await,
341 10 : Self::AzureBlob(s) => s.download(from, cancel).await,
342 0 : Self::Unreliable(s) => s.download(from, cancel).await,
343 : }
344 59 : }
345 :
346 15 : pub async fn download_byte_range(
347 15 : &self,
348 15 : from: &RemotePath,
349 15 : start_inclusive: u64,
350 15 : end_exclusive: Option<u64>,
351 15 : cancel: &CancellationToken,
352 15 : ) -> Result<Download, DownloadError> {
353 15 : match self {
354 0 : Self::LocalFs(s) => {
355 0 : s.download_byte_range(from, start_inclusive, end_exclusive, cancel)
356 0 : .await
357 : }
358 10 : Self::AwsS3(s) => {
359 10 : s.download_byte_range(from, start_inclusive, end_exclusive, cancel)
360 10 : .await
361 : }
362 5 : Self::AzureBlob(s) => {
363 5 : s.download_byte_range(from, start_inclusive, end_exclusive, cancel)
364 25 : .await
365 : }
366 0 : Self::Unreliable(s) => {
367 0 : s.download_byte_range(from, start_inclusive, end_exclusive, cancel)
368 0 : .await
369 : }
370 : }
371 15 : }
372 :
373 : /// See [`RemoteStorage::delete`]
374 136 : pub async fn delete(
375 136 : &self,
376 136 : path: &RemotePath,
377 136 : cancel: &CancellationToken,
378 136 : ) -> anyhow::Result<()> {
379 136 : match self {
380 2 : Self::LocalFs(s) => s.delete(path, cancel).await,
381 288 : Self::AwsS3(s) => s.delete(path, cancel).await,
382 221 : Self::AzureBlob(s) => s.delete(path, cancel).await,
383 0 : Self::Unreliable(s) => s.delete(path, cancel).await,
384 : }
385 136 : }
386 :
387 : /// See [`RemoteStorage::delete_objects`]
388 21 : pub async fn delete_objects(
389 21 : &self,
390 21 : paths: &[RemotePath],
391 21 : cancel: &CancellationToken,
392 21 : ) -> anyhow::Result<()> {
393 21 : match self {
394 6 : Self::LocalFs(s) => s.delete_objects(paths, cancel).await,
395 55 : Self::AwsS3(s) => s.delete_objects(paths, cancel).await,
396 26 : Self::AzureBlob(s) => s.delete_objects(paths, cancel).await,
397 0 : Self::Unreliable(s) => s.delete_objects(paths, cancel).await,
398 : }
399 21 : }
400 :
401 : /// See [`RemoteStorage::copy`]
402 3 : pub async fn copy_object(
403 3 : &self,
404 3 : from: &RemotePath,
405 3 : to: &RemotePath,
406 3 : cancel: &CancellationToken,
407 3 : ) -> anyhow::Result<()> {
408 3 : match self {
409 0 : Self::LocalFs(s) => s.copy(from, to, cancel).await,
410 4 : Self::AwsS3(s) => s.copy(from, to, cancel).await,
411 5 : Self::AzureBlob(s) => s.copy(from, to, cancel).await,
412 0 : Self::Unreliable(s) => s.copy(from, to, cancel).await,
413 : }
414 3 : }
415 :
416 : /// See [`RemoteStorage::time_travel_recover`].
417 6 : pub async fn time_travel_recover(
418 6 : &self,
419 6 : prefix: Option<&RemotePath>,
420 6 : timestamp: SystemTime,
421 6 : done_if_after: SystemTime,
422 6 : cancel: &CancellationToken,
423 6 : ) -> Result<(), TimeTravelError> {
424 6 : match self {
425 0 : Self::LocalFs(s) => {
426 0 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
427 0 : .await
428 : }
429 6 : Self::AwsS3(s) => {
430 6 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
431 74 : .await
432 : }
433 0 : Self::AzureBlob(s) => {
434 0 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
435 0 : .await
436 : }
437 0 : Self::Unreliable(s) => {
438 0 : s.time_travel_recover(prefix, timestamp, done_if_after, cancel)
439 0 : .await
440 : }
441 : }
442 6 : }
443 : }
444 :
445 : impl GenericRemoteStorage {
446 201 : pub fn from_config(storage_config: &RemoteStorageConfig) -> anyhow::Result<Self> {
447 201 : let timeout = storage_config.timeout;
448 201 : Ok(match &storage_config.storage {
449 177 : RemoteStorageKind::LocalFs { local_path: path } => {
450 177 : info!("Using fs root '{path}' as a remote storage");
451 177 : Self::LocalFs(LocalFs::new(path.clone(), timeout)?)
452 : }
453 18 : RemoteStorageKind::AwsS3(s3_config) => {
454 18 : // The profile and access key id are only printed here for debugging purposes,
455 18 : // their values don't indicate the eventually taken choice for auth.
456 18 : let profile = std::env::var("AWS_PROFILE").unwrap_or_else(|_| "<none>".into());
457 18 : let access_key_id =
458 18 : std::env::var("AWS_ACCESS_KEY_ID").unwrap_or_else(|_| "<none>".into());
459 18 : info!("Using s3 bucket '{}' in region '{}' as a remote storage, prefix in bucket: '{:?}', bucket endpoint: '{:?}', profile: {profile}, access_key_id: {access_key_id}",
460 : s3_config.bucket_name, s3_config.bucket_region, s3_config.prefix_in_bucket, s3_config.endpoint);
461 18 : Self::AwsS3(Arc::new(S3Bucket::new(s3_config, timeout)?))
462 : }
463 6 : RemoteStorageKind::AzureContainer(azure_config) => {
464 6 : let storage_account = azure_config
465 6 : .storage_account
466 6 : .as_deref()
467 6 : .unwrap_or("<AZURE_STORAGE_ACCOUNT>");
468 6 : info!("Using azure container '{}' in account '{storage_account}' in region '{}' as a remote storage, prefix in container: '{:?}'",
469 : azure_config.container_name, azure_config.container_region, azure_config.prefix_in_container);
470 6 : Self::AzureBlob(Arc::new(AzureBlobStorage::new(azure_config, timeout)?))
471 : }
472 : })
473 201 : }
474 :
475 4 : pub fn unreliable_wrapper(s: Self, fail_first: u64) -> Self {
476 4 : Self::Unreliable(Arc::new(UnreliableWrapper::new(s, fail_first)))
477 4 : }
478 :
479 : /// See [`RemoteStorage::upload`], which this method calls with `None` as metadata.
480 1363 : pub async fn upload_storage_object(
481 1363 : &self,
482 1363 : from: impl Stream<Item = std::io::Result<Bytes>> + Send + Sync + 'static,
483 1363 : from_size_bytes: usize,
484 1363 : to: &RemotePath,
485 1363 : cancel: &CancellationToken,
486 1363 : ) -> anyhow::Result<()> {
487 1363 : self.upload(from, from_size_bytes, to, None, cancel)
488 3791 : .await
489 1355 : .with_context(|| {
490 0 : format!("Failed to upload data of length {from_size_bytes} to storage path {to:?}")
491 1355 : })
492 1355 : }
493 :
494 : /// Downloads the storage object into the `to_path` provided.
495 : /// `byte_range` could be specified to dowload only a part of the file, if needed.
496 0 : pub async fn download_storage_object(
497 0 : &self,
498 0 : byte_range: Option<(u64, Option<u64>)>,
499 0 : from: &RemotePath,
500 0 : cancel: &CancellationToken,
501 0 : ) -> Result<Download, DownloadError> {
502 0 : match byte_range {
503 0 : Some((start, end)) => self.download_byte_range(from, start, end, cancel).await,
504 0 : None => self.download(from, cancel).await,
505 : }
506 0 : }
507 : }
508 :
509 : /// Extra set of key-value pairs that contain arbitrary metadata about the storage entry.
510 : /// Immutable, cannot be changed once the file is created.
511 : #[derive(Debug, Clone, PartialEq, Eq)]
512 : pub struct StorageMetadata(HashMap<String, String>);
513 :
514 : impl<const N: usize> From<[(&str, &str); N]> for StorageMetadata {
515 0 : fn from(arr: [(&str, &str); N]) -> Self {
516 0 : let map: HashMap<String, String> = arr
517 0 : .iter()
518 0 : .map(|(k, v)| (k.to_string(), v.to_string()))
519 0 : .collect();
520 0 : Self(map)
521 0 : }
522 : }
523 :
524 : struct ConcurrencyLimiter {
525 : // Every request to S3 can be throttled or cancelled, if a certain number of requests per second is exceeded.
526 : // Same goes to IAM, which is queried before every S3 request, if enabled. IAM has even lower RPS threshold.
527 : // The helps to ensure we don't exceed the thresholds.
528 : write: Arc<Semaphore>,
529 : read: Arc<Semaphore>,
530 : }
531 :
532 : impl ConcurrencyLimiter {
533 373 : fn for_kind(&self, kind: RequestKind) -> &Arc<Semaphore> {
534 373 : match kind {
535 32 : RequestKind::Get => &self.read,
536 153 : RequestKind::Put => &self.write,
537 30 : RequestKind::List => &self.read,
538 149 : RequestKind::Delete => &self.write,
539 3 : RequestKind::Copy => &self.write,
540 6 : RequestKind::TimeTravel => &self.write,
541 : }
542 373 : }
543 :
544 348 : async fn acquire(
545 348 : &self,
546 348 : kind: RequestKind,
547 348 : ) -> Result<tokio::sync::SemaphorePermit<'_>, tokio::sync::AcquireError> {
548 348 : self.for_kind(kind).acquire().await
549 348 : }
550 :
551 25 : async fn acquire_owned(
552 25 : &self,
553 25 : kind: RequestKind,
554 25 : ) -> Result<tokio::sync::OwnedSemaphorePermit, tokio::sync::AcquireError> {
555 25 : Arc::clone(self.for_kind(kind)).acquire_owned().await
556 25 : }
557 :
558 44 : fn new(limit: usize) -> ConcurrencyLimiter {
559 44 : Self {
560 44 : read: Arc::new(Semaphore::new(limit)),
561 44 : write: Arc::new(Semaphore::new(limit)),
562 44 : }
563 44 : }
564 : }
565 :
566 : #[cfg(test)]
567 : mod tests {
568 : use super::*;
569 :
570 : #[test]
571 4 : fn test_object_name() {
572 4 : let k = RemotePath::new(Utf8Path::new("a/b/c")).unwrap();
573 4 : assert_eq!(k.object_name(), Some("c"));
574 :
575 4 : let k = RemotePath::new(Utf8Path::new("a/b/c/")).unwrap();
576 4 : assert_eq!(k.object_name(), Some("c"));
577 :
578 4 : let k = RemotePath::new(Utf8Path::new("a/")).unwrap();
579 4 : assert_eq!(k.object_name(), Some("a"));
580 :
581 : // XXX is it impossible to have an empty key?
582 4 : let k = RemotePath::new(Utf8Path::new("")).unwrap();
583 4 : assert_eq!(k.object_name(), None);
584 4 : }
585 :
586 : #[test]
587 4 : fn rempte_path_cannot_be_created_from_absolute_ones() {
588 4 : let err = RemotePath::new(Utf8Path::new("/")).expect_err("Should fail on absolute paths");
589 4 : assert_eq!(err.to_string(), "Path \"/\" is not relative");
590 4 : }
591 : }
|