TLA Line data Source code
1 : //! Functions for handling per-tenant configuration options
2 : //!
3 : //! If tenant is created with --config option,
4 : //! the tenant-specific config will be stored in tenant's directory.
5 : //! Otherwise, global pageserver's config is used.
6 : //!
7 : //! If the tenant config file is corrupted, the tenant will be disabled.
8 : //! We cannot use global or default config instead, because wrong settings
9 : //! may lead to a data loss.
10 : //!
11 : use anyhow::bail;
12 : use pageserver_api::models;
13 : use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize};
14 : use serde::de::IntoDeserializer;
15 : use serde::{Deserialize, Serialize};
16 : use serde_json::Value;
17 : use std::num::NonZeroU64;
18 : use std::time::Duration;
19 : use utils::generation::Generation;
20 :
21 : pub mod defaults {
22 : // FIXME: This current value is very low. I would imagine something like 1 GB or 10 GB
23 : // would be more appropriate. But a low value forces the code to be exercised more,
24 : // which is good for now to trigger bugs.
25 : // This parameter actually determines L0 layer file size.
26 : pub const DEFAULT_CHECKPOINT_DISTANCE: u64 = 256 * 1024 * 1024;
27 : pub const DEFAULT_CHECKPOINT_TIMEOUT: &str = "10 m";
28 :
29 : // Target file size, when creating image and delta layers.
30 : // This parameter determines L1 layer file size.
31 : pub const DEFAULT_COMPACTION_TARGET_SIZE: u64 = 128 * 1024 * 1024;
32 :
33 : pub const DEFAULT_COMPACTION_PERIOD: &str = "20 s";
34 : pub const DEFAULT_COMPACTION_THRESHOLD: usize = 10;
35 :
36 : pub const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024;
37 :
38 : // Large DEFAULT_GC_PERIOD is fine as long as PITR_INTERVAL is larger.
39 : // If there's a need to decrease this value, first make sure that GC
40 : // doesn't hold a layer map write lock for non-trivial operations.
41 : // Relevant: https://github.com/neondatabase/neon/issues/3394
42 : pub const DEFAULT_GC_PERIOD: &str = "1 hr";
43 : pub const DEFAULT_IMAGE_CREATION_THRESHOLD: usize = 3;
44 : pub const DEFAULT_PITR_INTERVAL: &str = "7 days";
45 : pub const DEFAULT_WALRECEIVER_CONNECT_TIMEOUT: &str = "10 seconds";
46 : pub const DEFAULT_WALRECEIVER_LAGGING_WAL_TIMEOUT: &str = "10 seconds";
47 : pub const DEFAULT_MAX_WALRECEIVER_LSN_WAL_LAG: u64 = 10 * 1024 * 1024;
48 : pub const DEFAULT_EVICTIONS_LOW_RESIDENCE_DURATION_METRIC_THRESHOLD: &str = "24 hour";
49 :
50 : pub const DEFAULT_INGEST_BATCH_SIZE: u64 = 100;
51 : }
52 :
53 CBC 1266 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
54 : pub(crate) enum AttachmentMode {
55 : /// Our generation is current as far as we know, and as far as we know we are the only attached
56 : /// pageserver. This is the "normal" attachment mode.
57 : Single,
58 : /// Our generation number is current as far as we know, but we are advised that another
59 : /// pageserver is still attached, and therefore to avoid executing deletions. This is
60 : /// the attachment mode of a pagesever that is the destination of a migration.
61 : Multi,
62 : /// Our generation number is superseded, or about to be superseded. We are advised
63 : /// to avoid remote storage writes if possible, and to avoid sending billing data. This
64 : /// is the attachment mode of a pageserver that is the origin of a migration.
65 : Stale,
66 : }
67 :
68 1070 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
69 : pub(crate) struct AttachedLocationConfig {
70 : pub(crate) generation: Generation,
71 : pub(crate) attach_mode: AttachmentMode,
72 : // TODO: add a flag to override AttachmentMode's policies under
73 : // disk pressure (i.e. unblock uploads under disk pressure in Stale
74 : // state, unblock deletions after timeout in Multi state)
75 : }
76 :
77 33 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
78 : pub(crate) struct SecondaryLocationConfig {
79 : /// If true, keep the local cache warm by polling remote storage
80 : pub(crate) warm: bool,
81 : }
82 :
83 812 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
84 : pub(crate) enum LocationMode {
85 : Attached(AttachedLocationConfig),
86 : Secondary(SecondaryLocationConfig),
87 : }
88 :
89 : /// Per-tenant, per-pageserver configuration. All pageservers use the same TenantConf,
90 : /// but have distinct LocationConf.
91 1095 : #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
92 : pub(crate) struct LocationConf {
93 : /// The location-specific part of the configuration, describes the operating
94 : /// mode of this pageserver for this tenant.
95 : pub(crate) mode: LocationMode,
96 :
97 : /// The detailed shard identity. This structure is already scoped within
98 : /// a TenantShardId, but we need the full ShardIdentity to enable calculating
99 : /// key->shard mappings.
100 : #[serde(default = "ShardIdentity::unsharded")]
101 : #[serde(skip_serializing_if = "ShardIdentity::is_unsharded")]
102 : pub(crate) shard: ShardIdentity,
103 :
104 : /// The pan-cluster tenant configuration, the same on all locations
105 : pub(crate) tenant_conf: TenantConfOpt,
106 : }
107 :
108 : impl std::fmt::Debug for LocationConf {
109 548 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
110 548 : match &self.mode {
111 520 : LocationMode::Attached(conf) => {
112 520 : write!(
113 520 : f,
114 520 : "Attached {:?}, gen={:?}",
115 520 : conf.attach_mode, conf.generation
116 520 : )
117 : }
118 28 : LocationMode::Secondary(conf) => {
119 28 : write!(f, "Secondary, warm={}", conf.warm)
120 : }
121 : }
122 548 : }
123 : }
124 :
125 : impl AttachedLocationConfig {
126 : /// Consult attachment mode to determine whether we are currently permitted
127 : /// to delete layers. This is only advisory, not required for data safety.
128 : /// See [`AttachmentMode`] for more context.
129 698 : pub(crate) fn may_delete_layers_hint(&self) -> bool {
130 698 : // TODO: add an override for disk pressure in AttachedLocationConfig,
131 698 : // and respect it here.
132 698 : match &self.attach_mode {
133 688 : AttachmentMode::Single => true,
134 : AttachmentMode::Multi | AttachmentMode::Stale => {
135 : // In Multi mode we avoid doing deletions because some other
136 : // attached pageserver might get 404 while trying to read
137 : // a layer we delete which is still referenced in their metadata.
138 : //
139 : // In Stale mode, we avoid doing deletions because we expect
140 : // that they would ultimately fail validation in the deletion
141 : // queue due to our stale generation.
142 10 : false
143 : }
144 : }
145 698 : }
146 :
147 : /// Whether we are currently hinted that it is worthwhile to upload layers.
148 : /// This is only advisory, not required for data safety.
149 : /// See [`AttachmentMode`] for more context.
150 323 : pub(crate) fn may_upload_layers_hint(&self) -> bool {
151 323 : // TODO: add an override for disk pressure in AttachedLocationConfig,
152 323 : // and respect it here.
153 323 : match &self.attach_mode {
154 323 : AttachmentMode::Single | AttachmentMode::Multi => true,
155 : AttachmentMode::Stale => {
156 : // In Stale mode, we avoid doing uploads because we expect that
157 : // our replacement pageserver will already have started its own
158 : // IndexPart that will never reference layers we upload: it is
159 : // wasteful.
160 UBC 0 : false
161 : }
162 : }
163 CBC 323 : }
164 : }
165 :
166 : impl LocationConf {
167 : /// For use when loading from a legacy configuration: presence of a tenant
168 : /// implies it is in AttachmentMode::Single, which used to be the only
169 : /// possible state. This function should eventually be removed.
170 535 : pub(crate) fn attached_single(tenant_conf: TenantConfOpt, generation: Generation) -> Self {
171 535 : Self {
172 535 : mode: LocationMode::Attached(AttachedLocationConfig {
173 535 : generation,
174 535 : attach_mode: AttachmentMode::Single,
175 535 : }),
176 535 : // Legacy configuration loads are always from tenants created before sharding existed.
177 535 : shard: ShardIdentity::unsharded(),
178 535 : tenant_conf,
179 535 : }
180 535 : }
181 :
182 : /// For use when attaching/re-attaching: update the generation stored in this
183 : /// structure. If we were in a secondary state, promote to attached (posession
184 : /// of a fresh generation implies this).
185 206 : pub(crate) fn attach_in_generation(&mut self, generation: Generation) {
186 206 : match &mut self.mode {
187 206 : LocationMode::Attached(attach_conf) => {
188 206 : attach_conf.generation = generation;
189 206 : }
190 : LocationMode::Secondary(_) => {
191 : // We are promoted to attached by the control plane's re-attach response
192 UBC 0 : self.mode = LocationMode::Attached(AttachedLocationConfig {
193 0 : generation,
194 0 : attach_mode: AttachmentMode::Single,
195 0 : })
196 : }
197 : }
198 CBC 206 : }
199 :
200 117 : pub(crate) fn try_from(conf: &'_ models::LocationConfig) -> anyhow::Result<Self> {
201 117 : let tenant_conf = TenantConfOpt::try_from(&conf.tenant_conf)?;
202 :
203 89 : fn get_generation(conf: &'_ models::LocationConfig) -> Result<Generation, anyhow::Error> {
204 89 : conf.generation
205 89 : .map(Generation::new)
206 89 : .ok_or_else(|| anyhow::anyhow!("Generation must be set when attaching"))
207 89 : }
208 :
209 117 : let mode = match &conf.mode {
210 : models::LocationConfigMode::AttachedMulti => {
211 : LocationMode::Attached(AttachedLocationConfig {
212 40 : generation: get_generation(conf)?,
213 40 : attach_mode: AttachmentMode::Multi,
214 : })
215 : }
216 : models::LocationConfigMode::AttachedSingle => {
217 : LocationMode::Attached(AttachedLocationConfig {
218 26 : generation: get_generation(conf)?,
219 26 : attach_mode: AttachmentMode::Single,
220 : })
221 : }
222 : models::LocationConfigMode::AttachedStale => {
223 : LocationMode::Attached(AttachedLocationConfig {
224 23 : generation: get_generation(conf)?,
225 23 : attach_mode: AttachmentMode::Stale,
226 : })
227 : }
228 : models::LocationConfigMode::Secondary => {
229 28 : anyhow::ensure!(conf.generation.is_none());
230 :
231 28 : let warm = conf
232 28 : .secondary_conf
233 28 : .as_ref()
234 28 : .map(|c| c.warm)
235 28 : .unwrap_or(false);
236 28 : LocationMode::Secondary(SecondaryLocationConfig { warm })
237 : }
238 : models::LocationConfigMode::Detached => {
239 : // Should not have been called: API code should translate this mode
240 : // into a detach rather than trying to decode it as a LocationConf
241 UBC 0 : return Err(anyhow::anyhow!("Cannot decode a Detached configuration"));
242 : }
243 : };
244 :
245 CBC 117 : let shard = if conf.shard_count == 0 {
246 117 : ShardIdentity::unsharded()
247 : } else {
248 UBC 0 : ShardIdentity::new(
249 0 : ShardNumber(conf.shard_number),
250 0 : ShardCount(conf.shard_count),
251 0 : ShardStripeSize(conf.shard_stripe_size),
252 0 : )?
253 : };
254 :
255 CBC 117 : Ok(Self {
256 117 : shard,
257 117 : mode,
258 117 : tenant_conf,
259 117 : })
260 117 : }
261 : }
262 :
263 : impl Default for LocationConf {
264 : // TODO: this should be removed once tenant loading can guarantee that we are never
265 : // loading from a directory without a configuration.
266 : // => tech debt since https://github.com/neondatabase/neon/issues/1555
267 4 : fn default() -> Self {
268 4 : Self {
269 4 : mode: LocationMode::Attached(AttachedLocationConfig {
270 4 : generation: Generation::none(),
271 4 : attach_mode: AttachmentMode::Single,
272 4 : }),
273 4 : tenant_conf: TenantConfOpt::default(),
274 4 : shard: ShardIdentity::unsharded(),
275 4 : }
276 4 : }
277 : }
278 :
279 : /// A tenant's calcuated configuration, which is the result of merging a
280 : /// tenant's TenantConfOpt with the global TenantConf from PageServerConf.
281 : ///
282 : /// For storing and transmitting individual tenant's configuration, see
283 : /// TenantConfOpt.
284 45 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
285 : pub struct TenantConf {
286 : // Flush out an inmemory layer, if it's holding WAL older than this
287 : // This puts a backstop on how much WAL needs to be re-digested if the
288 : // page server crashes.
289 : // This parameter actually determines L0 layer file size.
290 : pub checkpoint_distance: u64,
291 : // Inmemory layer is also flushed at least once in checkpoint_timeout to
292 : // eventually upload WAL after activity is stopped.
293 : #[serde(with = "humantime_serde")]
294 : pub checkpoint_timeout: Duration,
295 : // Target file size, when creating image and delta layers.
296 : // This parameter determines L1 layer file size.
297 : pub compaction_target_size: u64,
298 : // How often to check if there's compaction work to be done.
299 : // Duration::ZERO means automatic compaction is disabled.
300 : #[serde(with = "humantime_serde")]
301 : pub compaction_period: Duration,
302 : // Level0 delta layer threshold for compaction.
303 : pub compaction_threshold: usize,
304 : // Determines how much history is retained, to allow
305 : // branching and read replicas at an older point in time.
306 : // The unit is #of bytes of WAL.
307 : // Page versions older than this are garbage collected away.
308 : pub gc_horizon: u64,
309 : // Interval at which garbage collection is triggered.
310 : // Duration::ZERO means automatic GC is disabled
311 : #[serde(with = "humantime_serde")]
312 : pub gc_period: Duration,
313 : // Delta layer churn threshold to create L1 image layers.
314 : pub image_creation_threshold: usize,
315 : // Determines how much history is retained, to allow
316 : // branching and read replicas at an older point in time.
317 : // The unit is time.
318 : // Page versions older than this are garbage collected away.
319 : #[serde(with = "humantime_serde")]
320 : pub pitr_interval: Duration,
321 : /// Maximum amount of time to wait while opening a connection to receive wal, before erroring.
322 : #[serde(with = "humantime_serde")]
323 : pub walreceiver_connect_timeout: Duration,
324 : /// Considers safekeepers stalled after no WAL updates were received longer than this threshold.
325 : /// A stalled safekeeper will be changed to a newer one when it appears.
326 : #[serde(with = "humantime_serde")]
327 : pub lagging_wal_timeout: Duration,
328 : /// Considers safekeepers lagging when their WAL is behind another safekeeper for more than this threshold.
329 : /// A lagging safekeeper will be changed after `lagging_wal_timeout` time elapses since the last WAL update,
330 : /// to avoid eager reconnects.
331 : pub max_lsn_wal_lag: NonZeroU64,
332 : pub trace_read_requests: bool,
333 : pub eviction_policy: EvictionPolicy,
334 : pub min_resident_size_override: Option<u64>,
335 : // See the corresponding metric's help string.
336 : #[serde(with = "humantime_serde")]
337 : pub evictions_low_residence_duration_metric_threshold: Duration,
338 : pub gc_feedback: bool,
339 :
340 : /// If non-zero, the period between uploads of a heatmap from attached tenants. This
341 : /// may be disabled if a Tenant will not have secondary locations: only secondary
342 : /// locations will use the heatmap uploaded by attached locations.
343 : pub heatmap_period: Duration,
344 : }
345 :
346 : /// Same as TenantConf, but this struct preserves the information about
347 : /// which parameters are set and which are not.
348 30120 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
349 : pub struct TenantConfOpt {
350 : #[serde(skip_serializing_if = "Option::is_none")]
351 : #[serde(default)]
352 : pub checkpoint_distance: Option<u64>,
353 :
354 : #[serde(skip_serializing_if = "Option::is_none")]
355 : #[serde(with = "humantime_serde")]
356 : #[serde(default)]
357 : pub checkpoint_timeout: Option<Duration>,
358 :
359 : #[serde(skip_serializing_if = "Option::is_none")]
360 : #[serde(default)]
361 : pub compaction_target_size: Option<u64>,
362 :
363 : #[serde(skip_serializing_if = "Option::is_none")]
364 : #[serde(with = "humantime_serde")]
365 : #[serde(default)]
366 : pub compaction_period: Option<Duration>,
367 :
368 : #[serde(skip_serializing_if = "Option::is_none")]
369 : #[serde(default)]
370 : pub compaction_threshold: Option<usize>,
371 :
372 : #[serde(skip_serializing_if = "Option::is_none")]
373 : #[serde(default)]
374 : pub gc_horizon: Option<u64>,
375 :
376 : #[serde(skip_serializing_if = "Option::is_none")]
377 : #[serde(with = "humantime_serde")]
378 : #[serde(default)]
379 : pub gc_period: Option<Duration>,
380 :
381 : #[serde(skip_serializing_if = "Option::is_none")]
382 : #[serde(default)]
383 : pub image_creation_threshold: Option<usize>,
384 :
385 : #[serde(skip_serializing_if = "Option::is_none")]
386 : #[serde(with = "humantime_serde")]
387 : #[serde(default)]
388 : pub pitr_interval: Option<Duration>,
389 :
390 : #[serde(skip_serializing_if = "Option::is_none")]
391 : #[serde(with = "humantime_serde")]
392 : #[serde(default)]
393 : pub walreceiver_connect_timeout: Option<Duration>,
394 :
395 : #[serde(skip_serializing_if = "Option::is_none")]
396 : #[serde(with = "humantime_serde")]
397 : #[serde(default)]
398 : pub lagging_wal_timeout: Option<Duration>,
399 :
400 : #[serde(skip_serializing_if = "Option::is_none")]
401 : #[serde(default)]
402 : pub max_lsn_wal_lag: Option<NonZeroU64>,
403 :
404 : #[serde(skip_serializing_if = "Option::is_none")]
405 : #[serde(default)]
406 : pub trace_read_requests: Option<bool>,
407 :
408 : #[serde(skip_serializing_if = "Option::is_none")]
409 : #[serde(default)]
410 : pub eviction_policy: Option<EvictionPolicy>,
411 :
412 : #[serde(skip_serializing_if = "Option::is_none")]
413 : #[serde(default)]
414 : pub min_resident_size_override: Option<u64>,
415 :
416 : #[serde(skip_serializing_if = "Option::is_none")]
417 : #[serde(with = "humantime_serde")]
418 : #[serde(default)]
419 : pub evictions_low_residence_duration_metric_threshold: Option<Duration>,
420 :
421 : #[serde(skip_serializing_if = "Option::is_none")]
422 : #[serde(default)]
423 : pub gc_feedback: Option<bool>,
424 :
425 : #[serde(skip_serializing_if = "Option::is_none")]
426 : #[serde(with = "humantime_serde")]
427 : #[serde(default)]
428 : pub heatmap_period: Option<Duration>,
429 : }
430 :
431 70 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
432 : #[serde(tag = "kind")]
433 : pub enum EvictionPolicy {
434 : NoEviction,
435 : LayerAccessThreshold(EvictionPolicyLayerAccessThreshold),
436 : }
437 :
438 : impl EvictionPolicy {
439 1382 : pub fn discriminant_str(&self) -> &'static str {
440 1382 : match self {
441 1346 : EvictionPolicy::NoEviction => "NoEviction",
442 36 : EvictionPolicy::LayerAccessThreshold(_) => "LayerAccessThreshold",
443 : }
444 1382 : }
445 : }
446 :
447 84 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
448 : pub struct EvictionPolicyLayerAccessThreshold {
449 : #[serde(with = "humantime_serde")]
450 : pub period: Duration,
451 : #[serde(with = "humantime_serde")]
452 : pub threshold: Duration,
453 : }
454 :
455 : impl TenantConfOpt {
456 960 : pub fn merge(&self, global_conf: TenantConf) -> TenantConf {
457 960 : TenantConf {
458 960 : checkpoint_distance: self
459 960 : .checkpoint_distance
460 960 : .unwrap_or(global_conf.checkpoint_distance),
461 960 : checkpoint_timeout: self
462 960 : .checkpoint_timeout
463 960 : .unwrap_or(global_conf.checkpoint_timeout),
464 960 : compaction_target_size: self
465 960 : .compaction_target_size
466 960 : .unwrap_or(global_conf.compaction_target_size),
467 960 : compaction_period: self
468 960 : .compaction_period
469 960 : .unwrap_or(global_conf.compaction_period),
470 960 : compaction_threshold: self
471 960 : .compaction_threshold
472 960 : .unwrap_or(global_conf.compaction_threshold),
473 960 : gc_horizon: self.gc_horizon.unwrap_or(global_conf.gc_horizon),
474 960 : gc_period: self.gc_period.unwrap_or(global_conf.gc_period),
475 960 : image_creation_threshold: self
476 960 : .image_creation_threshold
477 960 : .unwrap_or(global_conf.image_creation_threshold),
478 960 : pitr_interval: self.pitr_interval.unwrap_or(global_conf.pitr_interval),
479 960 : walreceiver_connect_timeout: self
480 960 : .walreceiver_connect_timeout
481 960 : .unwrap_or(global_conf.walreceiver_connect_timeout),
482 960 : lagging_wal_timeout: self
483 960 : .lagging_wal_timeout
484 960 : .unwrap_or(global_conf.lagging_wal_timeout),
485 960 : max_lsn_wal_lag: self.max_lsn_wal_lag.unwrap_or(global_conf.max_lsn_wal_lag),
486 960 : trace_read_requests: self
487 960 : .trace_read_requests
488 960 : .unwrap_or(global_conf.trace_read_requests),
489 960 : eviction_policy: self.eviction_policy.unwrap_or(global_conf.eviction_policy),
490 960 : min_resident_size_override: self
491 960 : .min_resident_size_override
492 960 : .or(global_conf.min_resident_size_override),
493 960 : evictions_low_residence_duration_metric_threshold: self
494 960 : .evictions_low_residence_duration_metric_threshold
495 960 : .unwrap_or(global_conf.evictions_low_residence_duration_metric_threshold),
496 960 : gc_feedback: self.gc_feedback.unwrap_or(global_conf.gc_feedback),
497 960 : heatmap_period: self.heatmap_period.unwrap_or(global_conf.heatmap_period),
498 960 : }
499 960 : }
500 : }
501 :
502 : impl Default for TenantConf {
503 1918 : fn default() -> Self {
504 1918 : use defaults::*;
505 1918 : Self {
506 1918 : checkpoint_distance: DEFAULT_CHECKPOINT_DISTANCE,
507 1918 : checkpoint_timeout: humantime::parse_duration(DEFAULT_CHECKPOINT_TIMEOUT)
508 1918 : .expect("cannot parse default checkpoint timeout"),
509 1918 : compaction_target_size: DEFAULT_COMPACTION_TARGET_SIZE,
510 1918 : compaction_period: humantime::parse_duration(DEFAULT_COMPACTION_PERIOD)
511 1918 : .expect("cannot parse default compaction period"),
512 1918 : compaction_threshold: DEFAULT_COMPACTION_THRESHOLD,
513 1918 : gc_horizon: DEFAULT_GC_HORIZON,
514 1918 : gc_period: humantime::parse_duration(DEFAULT_GC_PERIOD)
515 1918 : .expect("cannot parse default gc period"),
516 1918 : image_creation_threshold: DEFAULT_IMAGE_CREATION_THRESHOLD,
517 1918 : pitr_interval: humantime::parse_duration(DEFAULT_PITR_INTERVAL)
518 1918 : .expect("cannot parse default PITR interval"),
519 1918 : walreceiver_connect_timeout: humantime::parse_duration(
520 1918 : DEFAULT_WALRECEIVER_CONNECT_TIMEOUT,
521 1918 : )
522 1918 : .expect("cannot parse default walreceiver connect timeout"),
523 1918 : lagging_wal_timeout: humantime::parse_duration(DEFAULT_WALRECEIVER_LAGGING_WAL_TIMEOUT)
524 1918 : .expect("cannot parse default walreceiver lagging wal timeout"),
525 1918 : max_lsn_wal_lag: NonZeroU64::new(DEFAULT_MAX_WALRECEIVER_LSN_WAL_LAG)
526 1918 : .expect("cannot parse default max walreceiver Lsn wal lag"),
527 1918 : trace_read_requests: false,
528 1918 : eviction_policy: EvictionPolicy::NoEviction,
529 1918 : min_resident_size_override: None,
530 1918 : evictions_low_residence_duration_metric_threshold: humantime::parse_duration(
531 1918 : DEFAULT_EVICTIONS_LOW_RESIDENCE_DURATION_METRIC_THRESHOLD,
532 1918 : )
533 1918 : .expect("cannot parse default evictions_low_residence_duration_metric_threshold"),
534 1918 : gc_feedback: false,
535 1918 : heatmap_period: Duration::ZERO,
536 1918 : }
537 1918 : }
538 : }
539 :
540 : impl TryFrom<&'_ models::TenantConfig> for TenantConfOpt {
541 : type Error = anyhow::Error;
542 :
543 624 : fn try_from(request_data: &'_ models::TenantConfig) -> Result<Self, Self::Error> {
544 : // Convert the request_data to a JSON Value
545 624 : let json_value: Value = serde_json::to_value(request_data)?;
546 :
547 : // Create a Deserializer from the JSON Value
548 624 : let deserializer = json_value.into_deserializer();
549 :
550 : // Use serde_path_to_error to deserialize the JSON Value into TenantConfOpt
551 624 : let tenant_conf: TenantConfOpt = serde_path_to_error::deserialize(deserializer)?;
552 :
553 623 : Ok(tenant_conf)
554 624 : }
555 : }
556 :
557 : impl TryFrom<toml_edit::Item> for TenantConfOpt {
558 : type Error = anyhow::Error;
559 :
560 913 : fn try_from(item: toml_edit::Item) -> Result<Self, Self::Error> {
561 913 : match item {
562 27 : toml_edit::Item::Value(value) => {
563 27 : let d = value.into_deserializer();
564 27 : return serde_path_to_error::deserialize(d)
565 27 : .map_err(|e| anyhow::anyhow!("{}: {}", e.path(), e.inner().message()));
566 : }
567 886 : toml_edit::Item::Table(table) => {
568 886 : let deserializer = toml_edit::de::Deserializer::new(table.into());
569 886 : return serde_path_to_error::deserialize(deserializer)
570 886 : .map_err(|e| anyhow::anyhow!("{}: {}", e.path(), e.inner().message()));
571 : }
572 : _ => {
573 UBC 0 : bail!("expected non-inline table but found {item}")
574 : }
575 : }
576 CBC 913 : }
577 : }
578 :
579 : #[cfg(test)]
580 : mod tests {
581 : use super::*;
582 : use models::TenantConfig;
583 :
584 1 : #[test]
585 1 : fn de_serializing_pageserver_config_omits_empty_values() {
586 1 : let small_conf = TenantConfOpt {
587 1 : gc_horizon: Some(42),
588 1 : ..TenantConfOpt::default()
589 1 : };
590 1 :
591 1 : let toml_form = toml_edit::ser::to_string(&small_conf).unwrap();
592 1 : assert_eq!(toml_form, "gc_horizon = 42\n");
593 1 : assert_eq!(small_conf, toml_edit::de::from_str(&toml_form).unwrap());
594 :
595 1 : let json_form = serde_json::to_string(&small_conf).unwrap();
596 1 : assert_eq!(json_form, "{\"gc_horizon\":42}");
597 1 : assert_eq!(small_conf, serde_json::from_str(&json_form).unwrap());
598 1 : }
599 :
600 1 : #[test]
601 1 : fn test_try_from_models_tenant_config_err() {
602 1 : let tenant_config = models::TenantConfig {
603 1 : lagging_wal_timeout: Some("5a".to_string()),
604 1 : ..TenantConfig::default()
605 1 : };
606 1 :
607 1 : let tenant_conf_opt = TenantConfOpt::try_from(&tenant_config);
608 :
609 1 : assert!(
610 1 : tenant_conf_opt.is_err(),
611 UBC 0 : "Suceeded to convert TenantConfig to TenantConfOpt"
612 : );
613 :
614 CBC 1 : let expected_error_str =
615 1 : "lagging_wal_timeout: invalid value: string \"5a\", expected a duration";
616 1 : assert_eq!(tenant_conf_opt.unwrap_err().to_string(), expected_error_str);
617 1 : }
618 :
619 1 : #[test]
620 1 : fn test_try_from_models_tenant_config_success() {
621 1 : let tenant_config = models::TenantConfig {
622 1 : lagging_wal_timeout: Some("5s".to_string()),
623 1 : ..TenantConfig::default()
624 1 : };
625 1 :
626 1 : let tenant_conf_opt = TenantConfOpt::try_from(&tenant_config).unwrap();
627 1 :
628 1 : assert_eq!(
629 1 : tenant_conf_opt.lagging_wal_timeout,
630 1 : Some(Duration::from_secs(5))
631 1 : );
632 1 : }
633 : }
|