Line data Source code
1 : use std::{fmt::Debug, num::NonZeroUsize, str::FromStr, time::Duration};
2 :
3 : use aws_sdk_s3::types::StorageClass;
4 : use camino::Utf8PathBuf;
5 :
6 : use serde::{Deserialize, Serialize};
7 :
8 : use crate::{
9 : DEFAULT_MAX_KEYS_PER_LIST_RESPONSE, DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT,
10 : DEFAULT_REMOTE_STORAGE_LOCALFS_CONCURRENCY_LIMIT, DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT,
11 : };
12 :
13 : /// External backup storage configuration, enough for creating a client for that storage.
14 37 : #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
15 : pub struct RemoteStorageConfig {
16 : /// The storage connection configuration.
17 : #[serde(flatten)]
18 : pub storage: RemoteStorageKind,
19 : /// A common timeout enforced for all requests after concurrency limiter permit has been
20 : /// acquired.
21 : #[serde(
22 : with = "humantime_serde",
23 : default = "default_timeout",
24 : skip_serializing_if = "is_default_timeout"
25 : )]
26 : pub timeout: Duration,
27 : /// Alternative timeout used for metadata objects which are expected to be small
28 : #[serde(
29 : with = "humantime_serde",
30 : default = "default_small_timeout",
31 : skip_serializing_if = "is_default_small_timeout"
32 : )]
33 : pub small_timeout: Duration,
34 : }
35 :
36 : impl RemoteStorageKind {
37 0 : pub fn bucket_name(&self) -> Option<&str> {
38 0 : match self {
39 0 : RemoteStorageKind::LocalFs { .. } => None,
40 0 : RemoteStorageKind::AwsS3(config) => Some(&config.bucket_name),
41 0 : RemoteStorageKind::AzureContainer(config) => Some(&config.container_name),
42 : }
43 0 : }
44 : }
45 :
46 : impl RemoteStorageConfig {
47 : /// Helper to fetch the configured concurrency limit.
48 0 : pub fn concurrency_limit(&self) -> usize {
49 0 : match &self.storage {
50 0 : RemoteStorageKind::LocalFs { .. } => DEFAULT_REMOTE_STORAGE_LOCALFS_CONCURRENCY_LIMIT,
51 0 : RemoteStorageKind::AwsS3(c) => c.concurrency_limit.into(),
52 0 : RemoteStorageKind::AzureContainer(c) => c.concurrency_limit.into(),
53 : }
54 0 : }
55 : }
56 :
57 1 : fn default_timeout() -> Duration {
58 1 : RemoteStorageConfig::DEFAULT_TIMEOUT
59 1 : }
60 :
61 10 : fn default_small_timeout() -> Duration {
62 10 : RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT
63 10 : }
64 :
65 0 : fn is_default_timeout(d: &Duration) -> bool {
66 0 : *d == RemoteStorageConfig::DEFAULT_TIMEOUT
67 0 : }
68 :
69 0 : fn is_default_small_timeout(d: &Duration) -> bool {
70 0 : *d == RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT
71 0 : }
72 :
73 : /// A kind of a remote storage to connect to, with its connection configuration.
74 28 : #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize)]
75 : #[serde(untagged)]
76 : pub enum RemoteStorageKind {
77 : /// Storage based on local file system.
78 : /// Specify a root folder to place all stored files into.
79 : LocalFs { local_path: Utf8PathBuf },
80 : /// AWS S3 based storage, storing all files in the S3 bucket
81 : /// specified by the config
82 : AwsS3(S3Config),
83 : /// Azure Blob based storage, storing all files in the container
84 : /// specified by the config
85 : AzureContainer(AzureConfig),
86 : }
87 :
88 : /// AWS S3 bucket coordinates and access credentials to manage the bucket contents (read and write).
89 25 : #[derive(Clone, PartialEq, Eq, Deserialize, Serialize)]
90 : pub struct S3Config {
91 : /// Name of the bucket to connect to.
92 : pub bucket_name: String,
93 : /// The region where the bucket is located at.
94 : pub bucket_region: String,
95 : /// A "subfolder" in the bucket, to use the same bucket separately by multiple remote storage users at once.
96 : pub prefix_in_bucket: Option<String>,
97 : /// A base URL to send S3 requests to.
98 : /// By default, the endpoint is derived from a region name, assuming it's
99 : /// an AWS S3 region name, erroring on wrong region name.
100 : /// Endpoint provides a way to support other S3 flavors and their regions.
101 : ///
102 : /// Example: `http://127.0.0.1:5000`
103 : pub endpoint: Option<String>,
104 : /// AWS S3 has various limits on its API calls, we need not to exceed those.
105 : /// See [`DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT`] for more details.
106 : #[serde(default = "default_remote_storage_s3_concurrency_limit")]
107 : pub concurrency_limit: NonZeroUsize,
108 : #[serde(default = "default_max_keys_per_list_response")]
109 : pub max_keys_per_list_response: Option<i32>,
110 : #[serde(
111 : deserialize_with = "deserialize_storage_class",
112 : serialize_with = "serialize_storage_class",
113 : default
114 : )]
115 : pub upload_storage_class: Option<StorageClass>,
116 : }
117 :
118 7 : fn default_remote_storage_s3_concurrency_limit() -> NonZeroUsize {
119 7 : DEFAULT_REMOTE_STORAGE_S3_CONCURRENCY_LIMIT
120 7 : .try_into()
121 7 : .unwrap()
122 7 : }
123 :
124 7 : fn default_max_keys_per_list_response() -> Option<i32> {
125 7 : DEFAULT_MAX_KEYS_PER_LIST_RESPONSE
126 7 : }
127 :
128 0 : fn default_azure_conn_pool_size() -> usize {
129 0 : // By default, the Azure SDK does no connection pooling, due to historic reports of hard-to-reproduce issues
130 0 : // (https://github.com/hyperium/hyper/issues/2312)
131 0 : //
132 0 : // However, using connection pooling is important to avoid exhausting client ports when
133 0 : // doing huge numbers of requests (https://github.com/neondatabase/cloud/issues/20971)
134 0 : //
135 0 : // We therefore enable a modest pool size by default: this may be configured to zero if
136 0 : // issues like the alleged upstream hyper issue appear.
137 0 : 8
138 0 : }
139 :
140 : impl Debug for S3Config {
141 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
142 0 : f.debug_struct("S3Config")
143 0 : .field("bucket_name", &self.bucket_name)
144 0 : .field("bucket_region", &self.bucket_region)
145 0 : .field("prefix_in_bucket", &self.prefix_in_bucket)
146 0 : .field("concurrency_limit", &self.concurrency_limit)
147 0 : .field(
148 0 : "max_keys_per_list_response",
149 0 : &self.max_keys_per_list_response,
150 0 : )
151 0 : .finish()
152 0 : }
153 : }
154 :
155 : /// Azure bucket coordinates and access credentials to manage the bucket contents (read and write).
156 12 : #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
157 : pub struct AzureConfig {
158 : /// Name of the container to connect to.
159 : pub container_name: String,
160 : /// Name of the storage account the container is inside of
161 : pub storage_account: Option<String>,
162 : /// The region where the bucket is located at.
163 : pub container_region: String,
164 : /// A "subfolder" in the container, to use the same container separately by multiple remote storage users at once.
165 : pub prefix_in_container: Option<String>,
166 : /// Azure has various limits on its API calls, we need not to exceed those.
167 : /// See [`DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT`] for more details.
168 : #[serde(default = "default_remote_storage_azure_concurrency_limit")]
169 : pub concurrency_limit: NonZeroUsize,
170 : #[serde(default = "default_max_keys_per_list_response")]
171 : pub max_keys_per_list_response: Option<i32>,
172 : #[serde(default = "default_azure_conn_pool_size")]
173 : pub conn_pool_size: usize,
174 : }
175 :
176 6 : fn default_remote_storage_azure_concurrency_limit() -> NonZeroUsize {
177 6 : NonZeroUsize::new(DEFAULT_REMOTE_STORAGE_AZURE_CONCURRENCY_LIMIT).unwrap()
178 6 : }
179 :
180 : impl Debug for AzureConfig {
181 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
182 0 : f.debug_struct("AzureConfig")
183 0 : .field("bucket_name", &self.container_name)
184 0 : .field("storage_account", &self.storage_account)
185 0 : .field("bucket_region", &self.container_region)
186 0 : .field("prefix_in_container", &self.prefix_in_container)
187 0 : .field("concurrency_limit", &self.concurrency_limit)
188 0 : .field(
189 0 : "max_keys_per_list_response",
190 0 : &self.max_keys_per_list_response,
191 0 : )
192 0 : .finish()
193 0 : }
194 : }
195 :
196 15 : fn deserialize_storage_class<'de, D: serde::Deserializer<'de>>(
197 15 : deserializer: D,
198 15 : ) -> Result<Option<StorageClass>, D::Error> {
199 15 : Option::<String>::deserialize(deserializer).and_then(|s| {
200 15 : if let Some(s) = s {
201 : use serde::de::Error;
202 12 : let storage_class = StorageClass::from_str(&s).expect("infallible");
203 : #[allow(deprecated)]
204 12 : if matches!(storage_class, StorageClass::Unknown(_)) {
205 0 : return Err(D::Error::custom(format!(
206 0 : "Specified storage class unknown to SDK: '{s}'. Allowed values: {:?}",
207 0 : StorageClass::values()
208 0 : )));
209 12 : }
210 12 : Ok(Some(storage_class))
211 : } else {
212 3 : Ok(None)
213 : }
214 15 : })
215 15 : }
216 :
217 9 : fn serialize_storage_class<S: serde::Serializer>(
218 9 : val: &Option<StorageClass>,
219 9 : serializer: S,
220 9 : ) -> Result<S::Ok, S::Error> {
221 9 : let val = val.as_ref().map(StorageClass::as_str);
222 9 : Option::<&str>::serialize(&val, serializer)
223 9 : }
224 :
225 : impl RemoteStorageConfig {
226 : pub const DEFAULT_TIMEOUT: Duration = std::time::Duration::from_secs(120);
227 : pub const DEFAULT_SMALL_TIMEOUT: Duration = std::time::Duration::from_secs(30);
228 :
229 10 : pub fn from_toml(toml: &toml_edit::Item) -> anyhow::Result<RemoteStorageConfig> {
230 10 : Ok(utils::toml_edit_ext::deserialize_item(toml)?)
231 10 : }
232 :
233 9 : pub fn from_toml_str(input: &str) -> anyhow::Result<RemoteStorageConfig> {
234 9 : let toml_document = toml_edit::DocumentMut::from_str(input)?;
235 9 : if let Some(item) = toml_document.get("remote_storage") {
236 0 : return Self::from_toml(item);
237 9 : }
238 9 : Self::from_toml(toml_document.as_item())
239 9 : }
240 : }
241 :
242 : #[cfg(test)]
243 : mod tests {
244 : use super::*;
245 :
246 9 : fn parse(input: &str) -> anyhow::Result<RemoteStorageConfig> {
247 9 : RemoteStorageConfig::from_toml_str(input)
248 9 : }
249 :
250 : #[test]
251 3 : fn parse_localfs_config_with_timeout() {
252 3 : let input = "local_path = '.'
253 3 : timeout = '5s'";
254 3 :
255 3 : let config = parse(input).unwrap();
256 3 :
257 3 : assert_eq!(
258 3 : config,
259 3 : RemoteStorageConfig {
260 3 : storage: RemoteStorageKind::LocalFs {
261 3 : local_path: Utf8PathBuf::from(".")
262 3 : },
263 3 : timeout: Duration::from_secs(5),
264 3 : small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT
265 3 : }
266 3 : );
267 3 : }
268 :
269 : #[test]
270 3 : fn test_s3_parsing() {
271 3 : let toml = "\
272 3 : bucket_name = 'foo-bar'
273 3 : bucket_region = 'eu-central-1'
274 3 : upload_storage_class = 'INTELLIGENT_TIERING'
275 3 : timeout = '7s'
276 3 : ";
277 3 :
278 3 : let config = parse(toml).unwrap();
279 3 :
280 3 : assert_eq!(
281 3 : config,
282 3 : RemoteStorageConfig {
283 3 : storage: RemoteStorageKind::AwsS3(S3Config {
284 3 : bucket_name: "foo-bar".into(),
285 3 : bucket_region: "eu-central-1".into(),
286 3 : prefix_in_bucket: None,
287 3 : endpoint: None,
288 3 : concurrency_limit: default_remote_storage_s3_concurrency_limit(),
289 3 : max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE,
290 3 : upload_storage_class: Some(StorageClass::IntelligentTiering),
291 3 : }),
292 3 : timeout: Duration::from_secs(7),
293 3 : small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT
294 3 : }
295 3 : );
296 3 : }
297 :
298 : #[test]
299 3 : fn test_storage_class_serde_roundtrip() {
300 3 : let classes = [
301 3 : None,
302 3 : Some(StorageClass::Standard),
303 3 : Some(StorageClass::IntelligentTiering),
304 3 : ];
305 12 : for class in classes {
306 9 : #[derive(Serialize, Deserialize)]
307 9 : struct Wrapper {
308 : #[serde(
309 : deserialize_with = "deserialize_storage_class",
310 : serialize_with = "serialize_storage_class"
311 : )]
312 : class: Option<StorageClass>,
313 : }
314 9 : let wrapped = Wrapper {
315 9 : class: class.clone(),
316 9 : };
317 9 : let serialized = serde_json::to_string(&wrapped).unwrap();
318 9 : let deserialized: Wrapper = serde_json::from_str(&serialized).unwrap();
319 9 : assert_eq!(class, deserialized.class);
320 : }
321 3 : }
322 :
323 : #[test]
324 3 : fn test_azure_parsing() {
325 3 : let toml = "\
326 3 : container_name = 'foo-bar'
327 3 : container_region = 'westeurope'
328 3 : upload_storage_class = 'INTELLIGENT_TIERING'
329 3 : timeout = '7s'
330 3 : conn_pool_size = 8
331 3 : ";
332 3 :
333 3 : let config = parse(toml).unwrap();
334 3 :
335 3 : assert_eq!(
336 3 : config,
337 3 : RemoteStorageConfig {
338 3 : storage: RemoteStorageKind::AzureContainer(AzureConfig {
339 3 : container_name: "foo-bar".into(),
340 3 : storage_account: None,
341 3 : container_region: "westeurope".into(),
342 3 : prefix_in_container: None,
343 3 : concurrency_limit: default_remote_storage_azure_concurrency_limit(),
344 3 : max_keys_per_list_response: DEFAULT_MAX_KEYS_PER_LIST_RESPONSE,
345 3 : conn_pool_size: 8,
346 3 : }),
347 3 : timeout: Duration::from_secs(7),
348 3 : small_timeout: RemoteStorageConfig::DEFAULT_SMALL_TIMEOUT
349 3 : }
350 3 : );
351 3 : }
352 : }
|