Line data Source code
1 : //! Functions for handling per-tenant configuration options
2 : //!
3 : //! If tenant is created with --config option,
4 : //! the tenant-specific config will be stored in tenant's directory.
5 : //! Otherwise, global pageserver's config is used.
6 : //!
7 : //! If the tenant config file is corrupted, the tenant will be disabled.
8 : //! We cannot use global or default config instead, because wrong settings
9 : //! may lead to a data loss.
10 : //!
11 : use anyhow::bail;
12 : use pageserver_api::models;
13 : use pageserver_api::models::EvictionPolicy;
14 : use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize};
15 : use serde::de::IntoDeserializer;
16 : use serde::{Deserialize, Serialize};
17 : use serde_json::Value;
18 : use std::num::NonZeroU64;
19 : use std::time::Duration;
20 : use utils::generation::Generation;
21 :
22 : pub mod defaults {
23 : // FIXME: This current value is very low. I would imagine something like 1 GB or 10 GB
24 : // would be more appropriate. But a low value forces the code to be exercised more,
25 : // which is good for now to trigger bugs.
26 : // This parameter actually determines L0 layer file size.
27 : pub const DEFAULT_CHECKPOINT_DISTANCE: u64 = 256 * 1024 * 1024;
28 : pub const DEFAULT_CHECKPOINT_TIMEOUT: &str = "10 m";
29 :
30 : // Target file size, when creating image and delta layers.
31 : // This parameter determines L1 layer file size.
32 : pub const DEFAULT_COMPACTION_TARGET_SIZE: u64 = 128 * 1024 * 1024;
33 :
34 : pub const DEFAULT_COMPACTION_PERIOD: &str = "20 s";
35 : pub const DEFAULT_COMPACTION_THRESHOLD: usize = 10;
36 :
37 : pub const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024;
38 :
39 : // Large DEFAULT_GC_PERIOD is fine as long as PITR_INTERVAL is larger.
40 : // If there's a need to decrease this value, first make sure that GC
41 : // doesn't hold a layer map write lock for non-trivial operations.
42 : // Relevant: https://github.com/neondatabase/neon/issues/3394
43 : pub const DEFAULT_GC_PERIOD: &str = "1 hr";
44 : pub const DEFAULT_IMAGE_CREATION_THRESHOLD: usize = 3;
45 : pub const DEFAULT_PITR_INTERVAL: &str = "7 days";
46 : pub const DEFAULT_WALRECEIVER_CONNECT_TIMEOUT: &str = "10 seconds";
47 : pub const DEFAULT_WALRECEIVER_LAGGING_WAL_TIMEOUT: &str = "10 seconds";
48 : pub const DEFAULT_MAX_WALRECEIVER_LSN_WAL_LAG: u64 = 10 * 1024 * 1024;
49 : pub const DEFAULT_EVICTIONS_LOW_RESIDENCE_DURATION_METRIC_THRESHOLD: &str = "24 hour";
50 :
51 : pub const DEFAULT_INGEST_BATCH_SIZE: u64 = 100;
52 : }
53 :
54 1660 : #[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)]
55 : pub(crate) enum AttachmentMode {
56 : /// Our generation is current as far as we know, and as far as we know we are the only attached
57 : /// pageserver. This is the "normal" attachment mode.
58 : Single,
59 : /// Our generation number is current as far as we know, but we are advised that another
60 : /// pageserver is still attached, and therefore to avoid executing deletions. This is
61 : /// the attachment mode of a pagesever that is the destination of a migration.
62 : Multi,
63 : /// Our generation number is superseded, or about to be superseded. We are advised
64 : /// to avoid remote storage writes if possible, and to avoid sending billing data. This
65 : /// is the attachment mode of a pageserver that is the origin of a migration.
66 : Stale,
67 : }
68 :
69 1175 : #[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)]
70 : pub(crate) struct AttachedLocationConfig {
71 : pub(crate) generation: Generation,
72 : pub(crate) attach_mode: AttachmentMode,
73 : // TODO: add a flag to override AttachmentMode's policies under
74 : // disk pressure (i.e. unblock uploads under disk pressure in Stale
75 : // state, unblock deletions after timeout in Multi state)
76 : }
77 :
78 39 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
79 : pub(crate) struct SecondaryLocationConfig {
80 : /// If true, keep the local cache warm by polling remote storage
81 : pub(crate) warm: bool,
82 : }
83 :
84 983 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
85 : pub(crate) enum LocationMode {
86 : Attached(AttachedLocationConfig),
87 : Secondary(SecondaryLocationConfig),
88 : }
89 :
90 : /// Per-tenant, per-pageserver configuration. All pageservers use the same TenantConf,
91 : /// but have distinct LocationConf.
92 1224 : #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
93 : pub(crate) struct LocationConf {
94 : /// The location-specific part of the configuration, describes the operating
95 : /// mode of this pageserver for this tenant.
96 : pub(crate) mode: LocationMode,
97 :
98 : /// The detailed shard identity. This structure is already scoped within
99 : /// a TenantShardId, but we need the full ShardIdentity to enable calculating
100 : /// key->shard mappings.
101 : #[serde(default = "ShardIdentity::unsharded")]
102 : #[serde(skip_serializing_if = "ShardIdentity::is_unsharded")]
103 : pub(crate) shard: ShardIdentity,
104 :
105 : /// The pan-cluster tenant configuration, the same on all locations
106 : pub(crate) tenant_conf: TenantConfOpt,
107 : }
108 :
109 : impl std::fmt::Debug for LocationConf {
110 800 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
111 800 : match &self.mode {
112 766 : LocationMode::Attached(conf) => {
113 766 : write!(
114 766 : f,
115 766 : "Attached {:?}, gen={:?}",
116 766 : conf.attach_mode, conf.generation
117 766 : )
118 : }
119 34 : LocationMode::Secondary(conf) => {
120 34 : write!(f, "Secondary, warm={}", conf.warm)
121 : }
122 : }
123 800 : }
124 : }
125 :
126 : impl AttachedLocationConfig {
127 : /// Consult attachment mode to determine whether we are currently permitted
128 : /// to delete layers. This is only advisory, not required for data safety.
129 : /// See [`AttachmentMode`] for more context.
130 766 : pub(crate) fn may_delete_layers_hint(&self) -> bool {
131 766 : // TODO: add an override for disk pressure in AttachedLocationConfig,
132 766 : // and respect it here.
133 766 : match &self.attach_mode {
134 756 : AttachmentMode::Single => true,
135 : AttachmentMode::Multi | AttachmentMode::Stale => {
136 : // In Multi mode we avoid doing deletions because some other
137 : // attached pageserver might get 404 while trying to read
138 : // a layer we delete which is still referenced in their metadata.
139 : //
140 : // In Stale mode, we avoid doing deletions because we expect
141 : // that they would ultimately fail validation in the deletion
142 : // queue due to our stale generation.
143 10 : false
144 : }
145 : }
146 766 : }
147 :
148 : /// Whether we are currently hinted that it is worthwhile to upload layers.
149 : /// This is only advisory, not required for data safety.
150 : /// See [`AttachmentMode`] for more context.
151 399 : pub(crate) fn may_upload_layers_hint(&self) -> bool {
152 399 : // TODO: add an override for disk pressure in AttachedLocationConfig,
153 399 : // and respect it here.
154 399 : match &self.attach_mode {
155 399 : AttachmentMode::Single | AttachmentMode::Multi => true,
156 : AttachmentMode::Stale => {
157 : // In Stale mode, we avoid doing uploads because we expect that
158 : // our replacement pageserver will already have started its own
159 : // IndexPart that will never reference layers we upload: it is
160 : // wasteful.
161 0 : false
162 : }
163 : }
164 399 : }
165 : }
166 :
167 : impl LocationConf {
168 : /// For use when loading from a legacy configuration: presence of a tenant
169 : /// implies it is in AttachmentMode::Single, which used to be the only
170 : /// possible state. This function should eventually be removed.
171 275 : pub(crate) fn attached_single(
172 275 : tenant_conf: TenantConfOpt,
173 275 : generation: Generation,
174 275 : shard_params: &models::ShardParameters,
175 275 : ) -> Self {
176 275 : Self {
177 275 : mode: LocationMode::Attached(AttachedLocationConfig {
178 275 : generation,
179 275 : attach_mode: AttachmentMode::Single,
180 275 : }),
181 275 : shard: ShardIdentity::from_params(ShardNumber(0), shard_params),
182 275 : tenant_conf,
183 275 : }
184 275 : }
185 :
186 : /// For use when attaching/re-attaching: update the generation stored in this
187 : /// structure. If we were in a secondary state, promote to attached (posession
188 : /// of a fresh generation implies this).
189 221 : pub(crate) fn attach_in_generation(&mut self, generation: Generation) {
190 221 : match &mut self.mode {
191 221 : LocationMode::Attached(attach_conf) => {
192 221 : attach_conf.generation = generation;
193 221 : }
194 : LocationMode::Secondary(_) => {
195 : // We are promoted to attached by the control plane's re-attach response
196 0 : self.mode = LocationMode::Attached(AttachedLocationConfig {
197 0 : generation,
198 0 : attach_mode: AttachmentMode::Single,
199 0 : })
200 : }
201 : }
202 221 : }
203 :
204 631 : pub(crate) fn try_from(conf: &'_ models::LocationConfig) -> anyhow::Result<Self> {
205 631 : let tenant_conf = TenantConfOpt::try_from(&conf.tenant_conf)?;
206 :
207 597 : fn get_generation(conf: &'_ models::LocationConfig) -> Result<Generation, anyhow::Error> {
208 597 : conf.generation
209 597 : .map(Generation::new)
210 597 : .ok_or_else(|| anyhow::anyhow!("Generation must be set when attaching"))
211 597 : }
212 :
213 631 : let mode = match &conf.mode {
214 : models::LocationConfigMode::AttachedMulti => {
215 : LocationMode::Attached(AttachedLocationConfig {
216 43 : generation: get_generation(conf)?,
217 43 : attach_mode: AttachmentMode::Multi,
218 : })
219 : }
220 : models::LocationConfigMode::AttachedSingle => {
221 : LocationMode::Attached(AttachedLocationConfig {
222 528 : generation: get_generation(conf)?,
223 528 : attach_mode: AttachmentMode::Single,
224 : })
225 : }
226 : models::LocationConfigMode::AttachedStale => {
227 : LocationMode::Attached(AttachedLocationConfig {
228 26 : generation: get_generation(conf)?,
229 26 : attach_mode: AttachmentMode::Stale,
230 : })
231 : }
232 : models::LocationConfigMode::Secondary => {
233 34 : anyhow::ensure!(conf.generation.is_none());
234 :
235 34 : let warm = conf
236 34 : .secondary_conf
237 34 : .as_ref()
238 34 : .map(|c| c.warm)
239 34 : .unwrap_or(false);
240 34 : LocationMode::Secondary(SecondaryLocationConfig { warm })
241 : }
242 : models::LocationConfigMode::Detached => {
243 : // Should not have been called: API code should translate this mode
244 : // into a detach rather than trying to decode it as a LocationConf
245 0 : return Err(anyhow::anyhow!("Cannot decode a Detached configuration"));
246 : }
247 : };
248 :
249 631 : let shard = if conf.shard_count == 0 {
250 565 : ShardIdentity::unsharded()
251 : } else {
252 66 : ShardIdentity::new(
253 66 : ShardNumber(conf.shard_number),
254 66 : ShardCount(conf.shard_count),
255 66 : ShardStripeSize(conf.shard_stripe_size),
256 66 : )?
257 : };
258 :
259 631 : Ok(Self {
260 631 : shard,
261 631 : mode,
262 631 : tenant_conf,
263 631 : })
264 631 : }
265 : }
266 :
267 : impl Default for LocationConf {
268 : // TODO: this should be removed once tenant loading can guarantee that we are never
269 : // loading from a directory without a configuration.
270 : // => tech debt since https://github.com/neondatabase/neon/issues/1555
271 4 : fn default() -> Self {
272 4 : Self {
273 4 : mode: LocationMode::Attached(AttachedLocationConfig {
274 4 : generation: Generation::none(),
275 4 : attach_mode: AttachmentMode::Single,
276 4 : }),
277 4 : tenant_conf: TenantConfOpt::default(),
278 4 : shard: ShardIdentity::unsharded(),
279 4 : }
280 4 : }
281 : }
282 :
283 : /// A tenant's calcuated configuration, which is the result of merging a
284 : /// tenant's TenantConfOpt with the global TenantConf from PageServerConf.
285 : ///
286 : /// For storing and transmitting individual tenant's configuration, see
287 : /// TenantConfOpt.
288 45 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
289 : pub struct TenantConf {
290 : // Flush out an inmemory layer, if it's holding WAL older than this
291 : // This puts a backstop on how much WAL needs to be re-digested if the
292 : // page server crashes.
293 : // This parameter actually determines L0 layer file size.
294 : pub checkpoint_distance: u64,
295 : // Inmemory layer is also flushed at least once in checkpoint_timeout to
296 : // eventually upload WAL after activity is stopped.
297 : #[serde(with = "humantime_serde")]
298 : pub checkpoint_timeout: Duration,
299 : // Target file size, when creating image and delta layers.
300 : // This parameter determines L1 layer file size.
301 : pub compaction_target_size: u64,
302 : // How often to check if there's compaction work to be done.
303 : // Duration::ZERO means automatic compaction is disabled.
304 : #[serde(with = "humantime_serde")]
305 : pub compaction_period: Duration,
306 : // Level0 delta layer threshold for compaction.
307 : pub compaction_threshold: usize,
308 : // Determines how much history is retained, to allow
309 : // branching and read replicas at an older point in time.
310 : // The unit is #of bytes of WAL.
311 : // Page versions older than this are garbage collected away.
312 : pub gc_horizon: u64,
313 : // Interval at which garbage collection is triggered.
314 : // Duration::ZERO means automatic GC is disabled
315 : #[serde(with = "humantime_serde")]
316 : pub gc_period: Duration,
317 : // Delta layer churn threshold to create L1 image layers.
318 : pub image_creation_threshold: usize,
319 : // Determines how much history is retained, to allow
320 : // branching and read replicas at an older point in time.
321 : // The unit is time.
322 : // Page versions older than this are garbage collected away.
323 : #[serde(with = "humantime_serde")]
324 : pub pitr_interval: Duration,
325 : /// Maximum amount of time to wait while opening a connection to receive wal, before erroring.
326 : #[serde(with = "humantime_serde")]
327 : pub walreceiver_connect_timeout: Duration,
328 : /// Considers safekeepers stalled after no WAL updates were received longer than this threshold.
329 : /// A stalled safekeeper will be changed to a newer one when it appears.
330 : #[serde(with = "humantime_serde")]
331 : pub lagging_wal_timeout: Duration,
332 : /// Considers safekeepers lagging when their WAL is behind another safekeeper for more than this threshold.
333 : /// A lagging safekeeper will be changed after `lagging_wal_timeout` time elapses since the last WAL update,
334 : /// to avoid eager reconnects.
335 : pub max_lsn_wal_lag: NonZeroU64,
336 : pub trace_read_requests: bool,
337 : pub eviction_policy: EvictionPolicy,
338 : pub min_resident_size_override: Option<u64>,
339 : // See the corresponding metric's help string.
340 : #[serde(with = "humantime_serde")]
341 : pub evictions_low_residence_duration_metric_threshold: Duration,
342 : pub gc_feedback: bool,
343 :
344 : /// If non-zero, the period between uploads of a heatmap from attached tenants. This
345 : /// may be disabled if a Tenant will not have secondary locations: only secondary
346 : /// locations will use the heatmap uploaded by attached locations.
347 : pub heatmap_period: Duration,
348 :
349 : /// If true then SLRU segments are dowloaded on demand, if false SLRU segments are included in basebackup
350 : pub lazy_slru_download: bool,
351 : }
352 :
353 : /// Same as TenantConf, but this struct preserves the information about
354 : /// which parameters are set and which are not.
355 40839 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
356 : pub struct TenantConfOpt {
357 : #[serde(skip_serializing_if = "Option::is_none")]
358 : #[serde(default)]
359 : pub checkpoint_distance: Option<u64>,
360 :
361 : #[serde(skip_serializing_if = "Option::is_none")]
362 : #[serde(with = "humantime_serde")]
363 : #[serde(default)]
364 : pub checkpoint_timeout: Option<Duration>,
365 :
366 : #[serde(skip_serializing_if = "Option::is_none")]
367 : #[serde(default)]
368 : pub compaction_target_size: Option<u64>,
369 :
370 : #[serde(skip_serializing_if = "Option::is_none")]
371 : #[serde(with = "humantime_serde")]
372 : #[serde(default)]
373 : pub compaction_period: Option<Duration>,
374 :
375 : #[serde(skip_serializing_if = "Option::is_none")]
376 : #[serde(default)]
377 : pub compaction_threshold: Option<usize>,
378 :
379 : #[serde(skip_serializing_if = "Option::is_none")]
380 : #[serde(default)]
381 : pub gc_horizon: Option<u64>,
382 :
383 : #[serde(skip_serializing_if = "Option::is_none")]
384 : #[serde(with = "humantime_serde")]
385 : #[serde(default)]
386 : pub gc_period: Option<Duration>,
387 :
388 : #[serde(skip_serializing_if = "Option::is_none")]
389 : #[serde(default)]
390 : pub image_creation_threshold: Option<usize>,
391 :
392 : #[serde(skip_serializing_if = "Option::is_none")]
393 : #[serde(with = "humantime_serde")]
394 : #[serde(default)]
395 : pub pitr_interval: Option<Duration>,
396 :
397 : #[serde(skip_serializing_if = "Option::is_none")]
398 : #[serde(with = "humantime_serde")]
399 : #[serde(default)]
400 : pub walreceiver_connect_timeout: Option<Duration>,
401 :
402 : #[serde(skip_serializing_if = "Option::is_none")]
403 : #[serde(with = "humantime_serde")]
404 : #[serde(default)]
405 : pub lagging_wal_timeout: Option<Duration>,
406 :
407 : #[serde(skip_serializing_if = "Option::is_none")]
408 : #[serde(default)]
409 : pub max_lsn_wal_lag: Option<NonZeroU64>,
410 :
411 : #[serde(skip_serializing_if = "Option::is_none")]
412 : #[serde(default)]
413 : pub trace_read_requests: Option<bool>,
414 :
415 : #[serde(skip_serializing_if = "Option::is_none")]
416 : #[serde(default)]
417 : pub eviction_policy: Option<EvictionPolicy>,
418 :
419 : #[serde(skip_serializing_if = "Option::is_none")]
420 : #[serde(default)]
421 : pub min_resident_size_override: Option<u64>,
422 :
423 : #[serde(skip_serializing_if = "Option::is_none")]
424 : #[serde(with = "humantime_serde")]
425 : #[serde(default)]
426 : pub evictions_low_residence_duration_metric_threshold: Option<Duration>,
427 :
428 : #[serde(skip_serializing_if = "Option::is_none")]
429 : #[serde(default)]
430 : pub gc_feedback: Option<bool>,
431 :
432 : #[serde(skip_serializing_if = "Option::is_none")]
433 : #[serde(with = "humantime_serde")]
434 : #[serde(default)]
435 : pub heatmap_period: Option<Duration>,
436 :
437 : #[serde(skip_serializing_if = "Option::is_none")]
438 : #[serde(default)]
439 : pub lazy_slru_download: Option<bool>,
440 : }
441 :
442 : impl TenantConfOpt {
443 1087 : pub fn merge(&self, global_conf: TenantConf) -> TenantConf {
444 1087 : TenantConf {
445 1087 : checkpoint_distance: self
446 1087 : .checkpoint_distance
447 1087 : .unwrap_or(global_conf.checkpoint_distance),
448 1087 : checkpoint_timeout: self
449 1087 : .checkpoint_timeout
450 1087 : .unwrap_or(global_conf.checkpoint_timeout),
451 1087 : compaction_target_size: self
452 1087 : .compaction_target_size
453 1087 : .unwrap_or(global_conf.compaction_target_size),
454 1087 : compaction_period: self
455 1087 : .compaction_period
456 1087 : .unwrap_or(global_conf.compaction_period),
457 1087 : compaction_threshold: self
458 1087 : .compaction_threshold
459 1087 : .unwrap_or(global_conf.compaction_threshold),
460 1087 : gc_horizon: self.gc_horizon.unwrap_or(global_conf.gc_horizon),
461 1087 : gc_period: self.gc_period.unwrap_or(global_conf.gc_period),
462 1087 : image_creation_threshold: self
463 1087 : .image_creation_threshold
464 1087 : .unwrap_or(global_conf.image_creation_threshold),
465 1087 : pitr_interval: self.pitr_interval.unwrap_or(global_conf.pitr_interval),
466 1087 : walreceiver_connect_timeout: self
467 1087 : .walreceiver_connect_timeout
468 1087 : .unwrap_or(global_conf.walreceiver_connect_timeout),
469 1087 : lagging_wal_timeout: self
470 1087 : .lagging_wal_timeout
471 1087 : .unwrap_or(global_conf.lagging_wal_timeout),
472 1087 : max_lsn_wal_lag: self.max_lsn_wal_lag.unwrap_or(global_conf.max_lsn_wal_lag),
473 1087 : trace_read_requests: self
474 1087 : .trace_read_requests
475 1087 : .unwrap_or(global_conf.trace_read_requests),
476 1087 : eviction_policy: self.eviction_policy.unwrap_or(global_conf.eviction_policy),
477 1087 : min_resident_size_override: self
478 1087 : .min_resident_size_override
479 1087 : .or(global_conf.min_resident_size_override),
480 1087 : evictions_low_residence_duration_metric_threshold: self
481 1087 : .evictions_low_residence_duration_metric_threshold
482 1087 : .unwrap_or(global_conf.evictions_low_residence_duration_metric_threshold),
483 1087 : gc_feedback: self.gc_feedback.unwrap_or(global_conf.gc_feedback),
484 1087 : heatmap_period: self.heatmap_period.unwrap_or(global_conf.heatmap_period),
485 1087 : lazy_slru_download: self
486 1087 : .lazy_slru_download
487 1087 : .unwrap_or(global_conf.lazy_slru_download),
488 1087 : }
489 1087 : }
490 : }
491 :
492 : impl Default for TenantConf {
493 2264 : fn default() -> Self {
494 2264 : use defaults::*;
495 2264 : Self {
496 2264 : checkpoint_distance: DEFAULT_CHECKPOINT_DISTANCE,
497 2264 : checkpoint_timeout: humantime::parse_duration(DEFAULT_CHECKPOINT_TIMEOUT)
498 2264 : .expect("cannot parse default checkpoint timeout"),
499 2264 : compaction_target_size: DEFAULT_COMPACTION_TARGET_SIZE,
500 2264 : compaction_period: humantime::parse_duration(DEFAULT_COMPACTION_PERIOD)
501 2264 : .expect("cannot parse default compaction period"),
502 2264 : compaction_threshold: DEFAULT_COMPACTION_THRESHOLD,
503 2264 : gc_horizon: DEFAULT_GC_HORIZON,
504 2264 : gc_period: humantime::parse_duration(DEFAULT_GC_PERIOD)
505 2264 : .expect("cannot parse default gc period"),
506 2264 : image_creation_threshold: DEFAULT_IMAGE_CREATION_THRESHOLD,
507 2264 : pitr_interval: humantime::parse_duration(DEFAULT_PITR_INTERVAL)
508 2264 : .expect("cannot parse default PITR interval"),
509 2264 : walreceiver_connect_timeout: humantime::parse_duration(
510 2264 : DEFAULT_WALRECEIVER_CONNECT_TIMEOUT,
511 2264 : )
512 2264 : .expect("cannot parse default walreceiver connect timeout"),
513 2264 : lagging_wal_timeout: humantime::parse_duration(DEFAULT_WALRECEIVER_LAGGING_WAL_TIMEOUT)
514 2264 : .expect("cannot parse default walreceiver lagging wal timeout"),
515 2264 : max_lsn_wal_lag: NonZeroU64::new(DEFAULT_MAX_WALRECEIVER_LSN_WAL_LAG)
516 2264 : .expect("cannot parse default max walreceiver Lsn wal lag"),
517 2264 : trace_read_requests: false,
518 2264 : eviction_policy: EvictionPolicy::NoEviction,
519 2264 : min_resident_size_override: None,
520 2264 : evictions_low_residence_duration_metric_threshold: humantime::parse_duration(
521 2264 : DEFAULT_EVICTIONS_LOW_RESIDENCE_DURATION_METRIC_THRESHOLD,
522 2264 : )
523 2264 : .expect("cannot parse default evictions_low_residence_duration_metric_threshold"),
524 2264 : gc_feedback: false,
525 2264 : heatmap_period: Duration::ZERO,
526 2264 : lazy_slru_download: false,
527 2264 : }
528 2264 : }
529 : }
530 :
531 : impl TryFrom<&'_ models::TenantConfig> for TenantConfOpt {
532 : type Error = anyhow::Error;
533 :
534 824 : fn try_from(request_data: &'_ models::TenantConfig) -> Result<Self, Self::Error> {
535 : // Convert the request_data to a JSON Value
536 824 : let json_value: Value = serde_json::to_value(request_data)?;
537 :
538 : // Create a Deserializer from the JSON Value
539 824 : let deserializer = json_value.into_deserializer();
540 :
541 : // Use serde_path_to_error to deserialize the JSON Value into TenantConfOpt
542 824 : let tenant_conf: TenantConfOpt = serde_path_to_error::deserialize(deserializer)?;
543 :
544 822 : Ok(tenant_conf)
545 824 : }
546 : }
547 :
548 : impl TryFrom<toml_edit::Item> for TenantConfOpt {
549 : type Error = anyhow::Error;
550 :
551 1035 : fn try_from(item: toml_edit::Item) -> Result<Self, Self::Error> {
552 1035 : match item {
553 28 : toml_edit::Item::Value(value) => {
554 28 : let d = value.into_deserializer();
555 28 : return serde_path_to_error::deserialize(d)
556 28 : .map_err(|e| anyhow::anyhow!("{}: {}", e.path(), e.inner().message()));
557 : }
558 1007 : toml_edit::Item::Table(table) => {
559 1007 : let deserializer = toml_edit::de::Deserializer::new(table.into());
560 1007 : return serde_path_to_error::deserialize(deserializer)
561 1007 : .map_err(|e| anyhow::anyhow!("{}: {}", e.path(), e.inner().message()));
562 : }
563 : _ => {
564 0 : bail!("expected non-inline table but found {item}")
565 : }
566 : }
567 1035 : }
568 : }
569 :
570 : /// This is a conversion from our internal tenant config object to the one used
571 : /// in external APIs.
572 : impl From<TenantConfOpt> for models::TenantConfig {
573 5 : fn from(value: TenantConfOpt) -> Self {
574 5 : fn humantime(d: Duration) -> String {
575 3 : format!("{}s", d.as_secs())
576 3 : }
577 5 : Self {
578 5 : checkpoint_distance: value.checkpoint_distance,
579 5 : checkpoint_timeout: value.checkpoint_timeout.map(humantime),
580 5 : compaction_target_size: value.compaction_target_size,
581 5 : compaction_period: value.compaction_period.map(humantime),
582 5 : compaction_threshold: value.compaction_threshold,
583 5 : gc_horizon: value.gc_horizon,
584 5 : gc_period: value.gc_period.map(humantime),
585 5 : image_creation_threshold: value.image_creation_threshold,
586 5 : pitr_interval: value.pitr_interval.map(humantime),
587 5 : walreceiver_connect_timeout: value.walreceiver_connect_timeout.map(humantime),
588 5 : lagging_wal_timeout: value.lagging_wal_timeout.map(humantime),
589 5 : max_lsn_wal_lag: value.max_lsn_wal_lag,
590 5 : trace_read_requests: value.trace_read_requests,
591 5 : eviction_policy: value.eviction_policy,
592 5 : min_resident_size_override: value.min_resident_size_override,
593 5 : evictions_low_residence_duration_metric_threshold: value
594 5 : .evictions_low_residence_duration_metric_threshold
595 5 : .map(humantime),
596 5 : gc_feedback: value.gc_feedback,
597 5 : heatmap_period: value.heatmap_period.map(humantime),
598 5 : lazy_slru_download: value.lazy_slru_download,
599 5 : }
600 5 : }
601 : }
602 :
603 : #[cfg(test)]
604 : mod tests {
605 : use super::*;
606 : use models::TenantConfig;
607 :
608 2 : #[test]
609 2 : fn de_serializing_pageserver_config_omits_empty_values() {
610 2 : let small_conf = TenantConfOpt {
611 2 : gc_horizon: Some(42),
612 2 : ..TenantConfOpt::default()
613 2 : };
614 2 :
615 2 : let toml_form = toml_edit::ser::to_string(&small_conf).unwrap();
616 2 : assert_eq!(toml_form, "gc_horizon = 42\n");
617 2 : assert_eq!(small_conf, toml_edit::de::from_str(&toml_form).unwrap());
618 :
619 2 : let json_form = serde_json::to_string(&small_conf).unwrap();
620 2 : assert_eq!(json_form, "{\"gc_horizon\":42}");
621 2 : assert_eq!(small_conf, serde_json::from_str(&json_form).unwrap());
622 2 : }
623 :
624 2 : #[test]
625 2 : fn test_try_from_models_tenant_config_err() {
626 2 : let tenant_config = models::TenantConfig {
627 2 : lagging_wal_timeout: Some("5a".to_string()),
628 2 : ..TenantConfig::default()
629 2 : };
630 2 :
631 2 : let tenant_conf_opt = TenantConfOpt::try_from(&tenant_config);
632 2 :
633 2 : assert!(
634 2 : tenant_conf_opt.is_err(),
635 0 : "Suceeded to convert TenantConfig to TenantConfOpt"
636 : );
637 :
638 2 : let expected_error_str =
639 2 : "lagging_wal_timeout: invalid value: string \"5a\", expected a duration";
640 2 : assert_eq!(tenant_conf_opt.unwrap_err().to_string(), expected_error_str);
641 2 : }
642 :
643 2 : #[test]
644 2 : fn test_try_from_models_tenant_config_success() {
645 2 : let tenant_config = models::TenantConfig {
646 2 : lagging_wal_timeout: Some("5s".to_string()),
647 2 : ..TenantConfig::default()
648 2 : };
649 2 :
650 2 : let tenant_conf_opt = TenantConfOpt::try_from(&tenant_config).unwrap();
651 2 :
652 2 : assert_eq!(
653 2 : tenant_conf_opt.lagging_wal_timeout,
654 2 : Some(Duration::from_secs(5))
655 2 : );
656 2 : }
657 : }
|