LCOV - code coverage report
Current view: top level - libs/remote_storage/src - config.rs (source / functions) Coverage Total Hit
Test: f8d8f5b90fa487a9e82c42da223f012f5d4fece7.info Lines: 79.9 % 159 127
Test Date: 2024-09-19 20:36:02 Functions: 37.0 % 92 34

            Line data    Source code
       1              : use std::{fmt::Debug, num::NonZeroUsize, str::FromStr, time::Duration};
       2              : 
       3              : use aws_sdk_s3::types::StorageClass;
       4              : use camino::Utf8PathBuf;
       5              : 
       6              : use serde::{Deserialize, Serialize};
       7              : 
       8              : use crate::{
       9              :     DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT,
      10              :     DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT,
      11              : };
      12              : 
      13              : /// External backup storage configuration, enough for creating a client for that storage.
      14           53 : #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
      15              : pub struct RemoteStorageConfig {
      16              :     /// The storage connection configuration.
      17              :     #[serde(flatten)]
      18              :     pub storage: RemoteStorageKind,
      19              :     /// A common timeout enforced for all requests after concurrency limiter permit has been
      20              :     /// acquired.
      21              :     #[serde(
      22              :         with = "humantime_serde",
      23              :         default = "default_timeout",
      24              :         skip_serializing_if = "is_default_timeout"
      25              :     )]
      26              :     pub timeout: Duration,
      27              : }
      28              : 
      29            1 : fn default_timeout() -> Duration {
      30            1 :     RemoteStorageConfig::DEFAULT_TIMEOUT
      31            1 : }
      32              : 
      33            0 : fn is_default_timeout(d: &Duration) -> bool {
      34            0 :     *d == RemoteStorageConfig::DEFAULT_TIMEOUT
      35            0 : }
      36              : 
      37              : /// A kind of a remote storage to connect to, with its connection configuration.
      38           35 : #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
      39              : #[serde(untagged)]
      40              : pub enum RemoteStorageKind {
      41              :     /// Storage based on local file system.
      42              :     /// Specify a root folder to place all stored files into.
      43              :     LocalFs { local_path: Utf8PathBuf },
      44              :     /// AWS S3 based storage, storing all files in the S3 bucket
      45              :     /// specified by the config
      46              :     AwsS3(S3Config),
      47              :     /// Azure Blob based storage, storing all files in the container
      48              :     /// specified by the config
      49              :     AzureContainer(AzureConfig),
      50              : }
      51              : 
      52              : /// AWS S3 bucket coordinates and access credentials to manage the bucket contents (read and write).
      53           35 : #[derive(Clone, PartialEq, Eq, Deserialize, Serialize)]
      54              : pub struct S3Config {
      55              :     /// Name of the bucket to connect to.
      56              :     pub bucket_name: String,
      57              :     /// The region where the bucket is located at.
      58              :     pub bucket_region: String,
      59              :     /// A "subfolder" in the bucket, to use the same bucket separately by multiple remote storage users at once.
      60              :     pub prefix_in_bucket: Option<String>,
      61              :     /// A base URL to send S3 requests to.
      62              :     /// By default, the endpoint is derived from a region name, assuming it's
      63              :     /// an AWS S3 region name, erroring on wrong region name.
      64              :     /// Endpoint provides a way to support other S3 flavors and their regions.
      65              :     ///
      66              :     /// Example: `http://127.0.0.1:5000`
      67              :     pub endpoint: Option<String>,
      68              :     /// AWS S3 has various limits on its API calls, we need not to exceed those.
      69              :     /// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details.
      70              :     #[serde(default = "default_remote_storage_s3_concurrency_limit")]
      71              :     pub concurrency_limit: NonZeroUsize,
      72              :     #[serde(default = "default_max_keys_per_list_response")]
      73              :     pub max_keys_per_list_response: Option<i32>,
      74              :     #[serde(
      75              :         deserialize_with = "deserialize_storage_class",
      76              :         serialize_with = "serialize_storage_class",
      77              :         default
      78              :     )]
      79              :     pub upload_storage_class: Option<StorageClass>,
      80              : }
      81              : 
      82            7 : fn default_remote_storage_s3_concurrency_limit() -> NonZeroUsize {
      83            7 :     DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT
      84            7 :         .try_into()
      85            7 :         .unwrap()
      86            7 : }
      87              : 
      88            7 : fn default_max_keys_per_list_response() -> Option<i32> {
      89            7 :     DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
      90            7 : }
      91              : 
      92              : impl Debug for S3Config {
      93            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      94            0 :         f.debug_struct("S3Config")
      95            0 :             .field("bucket_name", &self.bucket_name)
      96            0 :             .field("bucket_region", &self.bucket_region)
      97            0 :             .field("prefix_in_bucket", &self.prefix_in_bucket)
      98            0 :             .field("concurrency_limit", &self.concurrency_limit)
      99            0 :             .field(
     100            0 :                 "max_keys_per_list_response",
     101            0 :                 &self.max_keys_per_list_response,
     102            0 :             )
     103            0 :             .finish()
     104            0 :     }
     105              : }
     106              : 
     107              : /// Azure  bucket coordinates and access credentials to manage the bucket contents (read and write).
     108           12 : #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
     109              : pub struct AzureConfig {
     110              :     /// Name of the container to connect to.
     111              :     pub container_name: String,
     112              :     /// Name of the storage account the container is inside of
     113              :     pub storage_account: Option<String>,
     114              :     /// The region where the bucket is located at.
     115              :     pub container_region: String,
     116              :     /// A "subfolder" in the container, to use the same container separately by multiple remote storage users at once.
     117              :     pub prefix_in_container: Option<String>,
     118              :     /// Azure has various limits on its API calls, we need not to exceed those.
     119              :     /// See [`DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT`] for more details.
     120              :     #[serde(default = "default_remote_storage_azure_concurrency_limit")]
     121              :     pub concurrency_limit: NonZeroUsize,
     122              :     #[serde(default = "default_max_keys_per_list_response")]
     123              :     pub max_keys_per_list_response: Option<i32>,
     124              : }
     125              : 
     126            6 : fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize {
     127            6 :     NonZeroUsize::new(DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT).unwrap()
     128            6 : }
     129              : 
     130              : impl Debug for AzureConfig {
     131            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     132            0 :         f.debug_struct("AzureConfig")
     133            0 :             .field("bucket_name", &self.container_name)
     134            0 :             .field("storage_account", &self.storage_account)
     135            0 :             .field("bucket_region", &self.container_region)
     136            0 :             .field("prefix_in_container", &self.prefix_in_container)
     137            0 :             .field("concurrency_limit", &self.concurrency_limit)
     138            0 :             .field(
     139            0 :                 "max_keys_per_list_response",
     140            0 :                 &self.max_keys_per_list_response,
     141            0 :             )
     142            0 :             .finish()
     143            0 :     }
     144              : }
     145              : 
     146           15 : fn deserialize_storage_class<'de, D: serde::Deserializer<'de>>(
     147           15 :     deserializer: D,
     148           15 : ) -> Result<Option<StorageClass>, D::Error> {
     149           15 :     Option::<String>::deserialize(deserializer).and_then(|s| {
     150           15 :         if let Some(s) = s {
     151              :             use serde::de::Error;
     152           12 :             let storage_class = StorageClass::from_str(&s).expect("infallible");
     153              :             #[allow(deprecated)]
     154           12 :             if matches!(storage_class, StorageClass::Unknown(_)) {
     155            0 :                 return Err(D::Error::custom(format!(
     156            0 :                     "Specified storage class unknown to SDK: '{s}'. Allowed values: {:?}",
     157            0 :                     StorageClass::values()
     158            0 :                 )));
     159           12 :             }
     160           12 :             Ok(Some(storage_class))
     161              :         } else {
     162            3 :             Ok(None)
     163              :         }
     164           15 :     })
     165           15 : }
     166              : 
     167            9 : fn serialize_storage_class<S: serde::Serializer>(
     168            9 :     val: &Option<StorageClass>,
     169            9 :     serializer: S,
     170            9 : ) -> Result<S::Ok, S::Error> {
     171            9 :     let val = val.as_ref().map(StorageClass::as_str);
     172            9 :     Option::<&str>::serialize(&val, serializer)
     173            9 : }
     174              : 
     175              : impl RemoteStorageConfig {
     176              :     pub const DEFAULT_TIMEOUT: Duration = std::time::Duration::from_secs(120);
     177              : 
     178           10 :     pub fn from_toml(toml: &toml_edit::Item) -> anyhow::Result<RemoteStorageConfig> {
     179           10 :         Ok(utils::toml_edit_ext::deserialize_item(toml)?)
     180           10 :     }
     181              : }
     182              : 
     183              : #[cfg(test)]
     184              : mod tests {
     185              :     use super::*;
     186              : 
     187            9 :     fn parse(input: &str) -> anyhow::Result<RemoteStorageConfig> {
     188            9 :         let toml = input.parse::<toml_edit::DocumentMut>().unwrap();
     189            9 :         RemoteStorageConfig::from_toml(toml.as_item())
     190            9 :     }
     191              : 
     192              :     #[test]
     193            3 :     fn parse_localfs_config_with_timeout() {
     194            3 :         let input = "local_path = '.'
     195            3 : timeout = '5s'";
     196            3 : 
     197            3 :         let config = parse(input).unwrap();
     198            3 : 
     199            3 :         assert_eq!(
     200            3 :             config,
     201            3 :             RemoteStorageConfig {
     202            3 :                 storage: RemoteStorageKind::LocalFs {
     203            3 :                     local_path: Utf8PathBuf::from(".")
     204            3 :                 },
     205            3 :                 timeout: Duration::from_secs(5)
     206            3 :             }
     207            3 :         );
     208            3 :     }
     209              : 
     210              :     #[test]
     211            3 :     fn test_s3_parsing() {
     212            3 :         let toml = "\
     213            3 :     bucket_name = 'foo-bar'
     214            3 :     bucket_region = 'eu-central-1'
     215            3 :     upload_storage_class = 'INTELLIGENT_TIERING'
     216            3 :     timeout = '7s'
     217            3 :     ";
     218            3 : 
     219            3 :         let config = parse(toml).unwrap();
     220            3 : 
     221            3 :         assert_eq!(
     222            3 :             config,
     223            3 :             RemoteStorageConfig {
     224            3 :                 storage: RemoteStorageKind::AwsS3(S3Config {
     225            3 :                     bucket_name: "foo-bar".into(),
     226            3 :                     bucket_region: "eu-central-1".into(),
     227            3 :                     prefix_in_bucket: None,
     228            3 :                     endpoint: None,
     229            3 :                     concurrency_limit: default_remote_storage_s3_concurrency_limit(),
     230            3 :                     max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE,
     231            3 :                     upload_storage_class: Some(StorageClass::IntelligentTiering),
     232            3 :                 }),
     233            3 :                 timeout: Duration::from_secs(7)
     234            3 :             }
     235            3 :         );
     236            3 :     }
     237              : 
     238              :     #[test]
     239            3 :     fn test_storage_class_serde_roundtrip() {
     240            3 :         let classes = [
     241            3 :             None,
     242            3 :             Some(StorageClass::Standard),
     243            3 :             Some(StorageClass::IntelligentTiering),
     244            3 :         ];
     245           12 :         for class in classes {
     246           27 :             #[derive(Serialize, Deserialize)]
     247            9 :             struct Wrapper {
     248              :                 #[serde(
     249              :                     deserialize_with = "deserialize_storage_class",
     250              :                     serialize_with = "serialize_storage_class"
     251              :                 )]
     252              :                 class: Option<StorageClass>,
     253              :             }
     254            9 :             let wrapped = Wrapper {
     255            9 :                 class: class.clone(),
     256            9 :             };
     257            9 :             let serialized = serde_json::to_string(&wrapped).unwrap();
     258            9 :             let deserialized: Wrapper = serde_json::from_str(&serialized).unwrap();
     259            9 :             assert_eq!(class, deserialized.class);
     260              :         }
     261            3 :     }
     262              : 
     263              :     #[test]
     264            3 :     fn test_azure_parsing() {
     265            3 :         let toml = "\
     266            3 :     container_name = 'foo-bar'
     267            3 :     container_region = 'westeurope'
     268            3 :     upload_storage_class = 'INTELLIGENT_TIERING'
     269            3 :     timeout = '7s'
     270            3 :     ";
     271            3 : 
     272            3 :         let config = parse(toml).unwrap();
     273            3 : 
     274            3 :         assert_eq!(
     275            3 :             config,
     276            3 :             RemoteStorageConfig {
     277            3 :                 storage: RemoteStorageKind::AzureContainer(AzureConfig {
     278            3 :                     container_name: "foo-bar".into(),
     279            3 :                     storage_account: None,
     280            3 :                     container_region: "westeurope".into(),
     281            3 :                     prefix_in_container: None,
     282            3 :                     concurrency_limit: default_remote_storage_azure_concurrency_limit(),
     283            3 :                     max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE,
     284            3 :                 }),
     285            3 :                 timeout: Duration::from_secs(7)
     286            3 :             }
     287            3 :         );
     288            3 :     }
     289              : }
        

Generated by: LCOV version 2.1-beta