Line data Source code
1 : //! Functions for handling per-tenant configuration options
2 : //!
3 : //! If tenant is created with --config option,
4 : //! the tenant-specific config will be stored in tenant's directory.
5 : //! Otherwise, global pageserver's config is used.
6 : //!
7 : //! If the tenant config file is corrupted, the tenant will be disabled.
8 : //! We cannot use global or default config instead, because wrong settings
9 : //! may lead to a data loss.
10 : //!
11 : use anyhow::bail;
12 : use pageserver_api::models::CompactionAlgorithm;
13 : use pageserver_api::models::EvictionPolicy;
14 : use pageserver_api::models::{self, ThrottleConfig};
15 : use pageserver_api::shard::{ShardCount, ShardIdentity, ShardNumber, ShardStripeSize};
16 : use serde::de::IntoDeserializer;
17 : use serde::{Deserialize, Serialize};
18 : use serde_json::Value;
19 : use std::num::NonZeroU64;
20 : use std::time::Duration;
21 : use utils::generation::Generation;
22 :
23 : pub mod defaults {
24 :
25 : // FIXME: This current value is very low. I would imagine something like 1 GB or 10 GB
26 : // would be more appropriate. But a low value forces the code to be exercised more,
27 : // which is good for now to trigger bugs.
28 : // This parameter actually determines L0 layer file size.
29 : pub const DEFAULT_CHECKPOINT_DISTANCE: u64 = 256 * 1024 * 1024;
30 : pub const DEFAULT_CHECKPOINT_TIMEOUT: &str = "10 m";
31 :
32 : // FIXME the below configs are only used by legacy algorithm. The new algorithm
33 : // has different parameters.
34 :
35 : // Target file size, when creating image and delta layers.
36 : // This parameter determines L1 layer file size.
37 : pub const DEFAULT_COMPACTION_TARGET_SIZE: u64 = 128 * 1024 * 1024;
38 :
39 : pub const DEFAULT_COMPACTION_PERIOD: &str = "20 s";
40 : pub const DEFAULT_COMPACTION_THRESHOLD: usize = 10;
41 : pub const DEFAULT_COMPACTION_ALGORITHM: super::CompactionAlgorithm =
42 : super::CompactionAlgorithm::Legacy;
43 :
44 : pub const DEFAULT_GC_HORIZON: u64 = 64 * 1024 * 1024;
45 :
46 : // Large DEFAULT_GC_PERIOD is fine as long as PITR_INTERVAL is larger.
47 : // If there's a need to decrease this value, first make sure that GC
48 : // doesn't hold a layer map write lock for non-trivial operations.
49 : // Relevant: https://github.com/neondatabase/neon/issues/3394
50 : pub const DEFAULT_GC_PERIOD: &str = "1 hr";
51 : pub const DEFAULT_IMAGE_CREATION_THRESHOLD: usize = 3;
52 : pub const DEFAULT_PITR_INTERVAL: &str = "7 days";
53 : pub const DEFAULT_WALRECEIVER_CONNECT_TIMEOUT: &str = "10 seconds";
54 : pub const DEFAULT_WALRECEIVER_LAGGING_WAL_TIMEOUT: &str = "10 seconds";
55 : // The default limit on WAL lag should be set to avoid causing disconnects under high throughput
56 : // scenarios: since the broker stats are updated ~1/s, a value of 1GiB should be sufficient for
57 : // throughputs up to 1GiB/s per timeline.
58 : pub const DEFAULT_MAX_WALRECEIVER_LSN_WAL_LAG: u64 = 1024 * 1024 * 1024;
59 : pub const DEFAULT_EVICTIONS_LOW_RESIDENCE_DURATION_METRIC_THRESHOLD: &str = "24 hour";
60 : // By default ingest enough WAL for two new L0 layers before checking if new image
61 : // image layers should be created.
62 : pub const DEFAULT_IMAGE_LAYER_CREATION_CHECK_THRESHOLD: u8 = 2;
63 :
64 : pub const DEFAULT_INGEST_BATCH_SIZE: u64 = 100;
65 : }
66 :
67 0 : #[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)]
68 : pub(crate) enum AttachmentMode {
69 : /// Our generation is current as far as we know, and as far as we know we are the only attached
70 : /// pageserver. This is the "normal" attachment mode.
71 : Single,
72 : /// Our generation number is current as far as we know, but we are advised that another
73 : /// pageserver is still attached, and therefore to avoid executing deletions. This is
74 : /// the attachment mode of a pagesever that is the destination of a migration.
75 : Multi,
76 : /// Our generation number is superseded, or about to be superseded. We are advised
77 : /// to avoid remote storage writes if possible, and to avoid sending billing data. This
78 : /// is the attachment mode of a pageserver that is the origin of a migration.
79 : Stale,
80 : }
81 :
82 0 : #[derive(Debug, Copy, Clone, Serialize, Deserialize, PartialEq, Eq)]
83 : pub(crate) struct AttachedLocationConfig {
84 : pub(crate) generation: Generation,
85 : pub(crate) attach_mode: AttachmentMode,
86 : // TODO: add a flag to override AttachmentMode's policies under
87 : // disk pressure (i.e. unblock uploads under disk pressure in Stale
88 : // state, unblock deletions after timeout in Multi state)
89 : }
90 :
91 0 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
92 : pub(crate) struct SecondaryLocationConfig {
93 : /// If true, keep the local cache warm by polling remote storage
94 : pub(crate) warm: bool,
95 : }
96 :
97 0 : #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
98 : pub(crate) enum LocationMode {
99 : Attached(AttachedLocationConfig),
100 : Secondary(SecondaryLocationConfig),
101 : }
102 :
103 : /// Per-tenant, per-pageserver configuration. All pageservers use the same TenantConf,
104 : /// but have distinct LocationConf.
105 0 : #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
106 : pub(crate) struct LocationConf {
107 : /// The location-specific part of the configuration, describes the operating
108 : /// mode of this pageserver for this tenant.
109 : pub(crate) mode: LocationMode,
110 :
111 : /// The detailed shard identity. This structure is already scoped within
112 : /// a TenantShardId, but we need the full ShardIdentity to enable calculating
113 : /// key->shard mappings.
114 : #[serde(default = "ShardIdentity::unsharded")]
115 : #[serde(skip_serializing_if = "ShardIdentity::is_unsharded")]
116 : pub(crate) shard: ShardIdentity,
117 :
118 : /// The pan-cluster tenant configuration, the same on all locations
119 : pub(crate) tenant_conf: TenantConfOpt,
120 : }
121 :
122 : impl std::fmt::Debug for LocationConf {
123 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
124 0 : match &self.mode {
125 0 : LocationMode::Attached(conf) => {
126 0 : write!(
127 0 : f,
128 0 : "Attached {:?}, gen={:?}",
129 0 : conf.attach_mode, conf.generation
130 0 : )
131 : }
132 0 : LocationMode::Secondary(conf) => {
133 0 : write!(f, "Secondary, warm={}", conf.warm)
134 : }
135 : }
136 0 : }
137 : }
138 :
139 : impl AttachedLocationConfig {
140 : /// Consult attachment mode to determine whether we are currently permitted
141 : /// to delete layers. This is only advisory, not required for data safety.
142 : /// See [`AttachmentMode`] for more context.
143 8 : pub(crate) fn may_delete_layers_hint(&self) -> bool {
144 8 : // TODO: add an override for disk pressure in AttachedLocationConfig,
145 8 : // and respect it here.
146 8 : match &self.attach_mode {
147 8 : AttachmentMode::Single => true,
148 : AttachmentMode::Multi | AttachmentMode::Stale => {
149 : // In Multi mode we avoid doing deletions because some other
150 : // attached pageserver might get 404 while trying to read
151 : // a layer we delete which is still referenced in their metadata.
152 : //
153 : // In Stale mode, we avoid doing deletions because we expect
154 : // that they would ultimately fail validation in the deletion
155 : // queue due to our stale generation.
156 0 : false
157 : }
158 : }
159 8 : }
160 :
161 : /// Whether we are currently hinted that it is worthwhile to upload layers.
162 : /// This is only advisory, not required for data safety.
163 : /// See [`AttachmentMode`] for more context.
164 0 : pub(crate) fn may_upload_layers_hint(&self) -> bool {
165 0 : // TODO: add an override for disk pressure in AttachedLocationConfig,
166 0 : // and respect it here.
167 0 : match &self.attach_mode {
168 0 : AttachmentMode::Single | AttachmentMode::Multi => true,
169 : AttachmentMode::Stale => {
170 : // In Stale mode, we avoid doing uploads because we expect that
171 : // our replacement pageserver will already have started its own
172 : // IndexPart that will never reference layers we upload: it is
173 : // wasteful.
174 0 : false
175 : }
176 : }
177 0 : }
178 : }
179 :
180 : impl LocationConf {
181 : /// For use when loading from a legacy configuration: presence of a tenant
182 : /// implies it is in AttachmentMode::Single, which used to be the only
183 : /// possible state. This function should eventually be removed.
184 108 : pub(crate) fn attached_single(
185 108 : tenant_conf: TenantConfOpt,
186 108 : generation: Generation,
187 108 : shard_params: &models::ShardParameters,
188 108 : ) -> Self {
189 108 : Self {
190 108 : mode: LocationMode::Attached(AttachedLocationConfig {
191 108 : generation,
192 108 : attach_mode: AttachmentMode::Single,
193 108 : }),
194 108 : shard: ShardIdentity::from_params(ShardNumber(0), shard_params),
195 108 : tenant_conf,
196 108 : }
197 108 : }
198 :
199 : /// For use when attaching/re-attaching: update the generation stored in this
200 : /// structure. If we were in a secondary state, promote to attached (posession
201 : /// of a fresh generation implies this).
202 0 : pub(crate) fn attach_in_generation(&mut self, mode: AttachmentMode, generation: Generation) {
203 0 : match &mut self.mode {
204 0 : LocationMode::Attached(attach_conf) => {
205 0 : attach_conf.generation = generation;
206 0 : attach_conf.attach_mode = mode;
207 0 : }
208 : LocationMode::Secondary(_) => {
209 : // We are promoted to attached by the control plane's re-attach response
210 0 : self.mode = LocationMode::Attached(AttachedLocationConfig {
211 0 : generation,
212 0 : attach_mode: mode,
213 0 : })
214 : }
215 : }
216 0 : }
217 :
218 0 : pub(crate) fn try_from(conf: &'_ models::LocationConfig) -> anyhow::Result<Self> {
219 0 : let tenant_conf = TenantConfOpt::try_from(&conf.tenant_conf)?;
220 :
221 0 : fn get_generation(conf: &'_ models::LocationConfig) -> Result<Generation, anyhow::Error> {
222 0 : conf.generation
223 0 : .map(Generation::new)
224 0 : .ok_or_else(|| anyhow::anyhow!("Generation must be set when attaching"))
225 0 : }
226 :
227 0 : let mode = match &conf.mode {
228 : models::LocationConfigMode::AttachedMulti => {
229 : LocationMode::Attached(AttachedLocationConfig {
230 0 : generation: get_generation(conf)?,
231 0 : attach_mode: AttachmentMode::Multi,
232 : })
233 : }
234 : models::LocationConfigMode::AttachedSingle => {
235 : LocationMode::Attached(AttachedLocationConfig {
236 0 : generation: get_generation(conf)?,
237 0 : attach_mode: AttachmentMode::Single,
238 : })
239 : }
240 : models::LocationConfigMode::AttachedStale => {
241 : LocationMode::Attached(AttachedLocationConfig {
242 0 : generation: get_generation(conf)?,
243 0 : attach_mode: AttachmentMode::Stale,
244 : })
245 : }
246 : models::LocationConfigMode::Secondary => {
247 0 : anyhow::ensure!(conf.generation.is_none());
248 :
249 0 : let warm = conf
250 0 : .secondary_conf
251 0 : .as_ref()
252 0 : .map(|c| c.warm)
253 0 : .unwrap_or(false);
254 0 : LocationMode::Secondary(SecondaryLocationConfig { warm })
255 : }
256 : models::LocationConfigMode::Detached => {
257 : // Should not have been called: API code should translate this mode
258 : // into a detach rather than trying to decode it as a LocationConf
259 0 : return Err(anyhow::anyhow!("Cannot decode a Detached configuration"));
260 : }
261 : };
262 :
263 0 : let shard = if conf.shard_count == 0 {
264 0 : ShardIdentity::unsharded()
265 : } else {
266 0 : ShardIdentity::new(
267 0 : ShardNumber(conf.shard_number),
268 0 : ShardCount::new(conf.shard_count),
269 0 : ShardStripeSize(conf.shard_stripe_size),
270 0 : )?
271 : };
272 :
273 0 : Ok(Self {
274 0 : shard,
275 0 : mode,
276 0 : tenant_conf,
277 0 : })
278 0 : }
279 : }
280 :
281 : impl Default for LocationConf {
282 : // TODO: this should be removed once tenant loading can guarantee that we are never
283 : // loading from a directory without a configuration.
284 : // => tech debt since https://github.com/neondatabase/neon/issues/1555
285 0 : fn default() -> Self {
286 0 : Self {
287 0 : mode: LocationMode::Attached(AttachedLocationConfig {
288 0 : generation: Generation::none(),
289 0 : attach_mode: AttachmentMode::Single,
290 0 : }),
291 0 : tenant_conf: TenantConfOpt::default(),
292 0 : shard: ShardIdentity::unsharded(),
293 0 : }
294 0 : }
295 : }
296 :
297 : /// A tenant's calcuated configuration, which is the result of merging a
298 : /// tenant's TenantConfOpt with the global TenantConf from PageServerConf.
299 : ///
300 : /// For storing and transmitting individual tenant's configuration, see
301 : /// TenantConfOpt.
302 0 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
303 : pub struct TenantConf {
304 : // Flush out an inmemory layer, if it's holding WAL older than this
305 : // This puts a backstop on how much WAL needs to be re-digested if the
306 : // page server crashes.
307 : // This parameter actually determines L0 layer file size.
308 : pub checkpoint_distance: u64,
309 : // Inmemory layer is also flushed at least once in checkpoint_timeout to
310 : // eventually upload WAL after activity is stopped.
311 : #[serde(with = "humantime_serde")]
312 : pub checkpoint_timeout: Duration,
313 : // Target file size, when creating image and delta layers.
314 : // This parameter determines L1 layer file size.
315 : pub compaction_target_size: u64,
316 : // How often to check if there's compaction work to be done.
317 : // Duration::ZERO means automatic compaction is disabled.
318 : #[serde(with = "humantime_serde")]
319 : pub compaction_period: Duration,
320 : // Level0 delta layer threshold for compaction.
321 : pub compaction_threshold: usize,
322 : pub compaction_algorithm: CompactionAlgorithm,
323 : // Determines how much history is retained, to allow
324 : // branching and read replicas at an older point in time.
325 : // The unit is #of bytes of WAL.
326 : // Page versions older than this are garbage collected away.
327 : pub gc_horizon: u64,
328 : // Interval at which garbage collection is triggered.
329 : // Duration::ZERO means automatic GC is disabled
330 : #[serde(with = "humantime_serde")]
331 : pub gc_period: Duration,
332 : // Delta layer churn threshold to create L1 image layers.
333 : pub image_creation_threshold: usize,
334 : // Determines how much history is retained, to allow
335 : // branching and read replicas at an older point in time.
336 : // The unit is time.
337 : // Page versions older than this are garbage collected away.
338 : #[serde(with = "humantime_serde")]
339 : pub pitr_interval: Duration,
340 : /// Maximum amount of time to wait while opening a connection to receive wal, before erroring.
341 : #[serde(with = "humantime_serde")]
342 : pub walreceiver_connect_timeout: Duration,
343 : /// Considers safekeepers stalled after no WAL updates were received longer than this threshold.
344 : /// A stalled safekeeper will be changed to a newer one when it appears.
345 : #[serde(with = "humantime_serde")]
346 : pub lagging_wal_timeout: Duration,
347 : /// Considers safekeepers lagging when their WAL is behind another safekeeper for more than this threshold.
348 : /// A lagging safekeeper will be changed after `lagging_wal_timeout` time elapses since the last WAL update,
349 : /// to avoid eager reconnects.
350 : pub max_lsn_wal_lag: NonZeroU64,
351 : pub trace_read_requests: bool,
352 : pub eviction_policy: EvictionPolicy,
353 : pub min_resident_size_override: Option<u64>,
354 : // See the corresponding metric's help string.
355 : #[serde(with = "humantime_serde")]
356 : pub evictions_low_residence_duration_metric_threshold: Duration,
357 :
358 : /// If non-zero, the period between uploads of a heatmap from attached tenants. This
359 : /// may be disabled if a Tenant will not have secondary locations: only secondary
360 : /// locations will use the heatmap uploaded by attached locations.
361 : #[serde(with = "humantime_serde")]
362 : pub heatmap_period: Duration,
363 :
364 : /// If true then SLRU segments are dowloaded on demand, if false SLRU segments are included in basebackup
365 : pub lazy_slru_download: bool,
366 :
367 : pub timeline_get_throttle: pageserver_api::models::ThrottleConfig,
368 :
369 : // How much WAL must be ingested before checking again whether a new image layer is required.
370 : // Expresed in multiples of checkpoint distance.
371 : pub image_layer_creation_check_threshold: u8,
372 : }
373 :
374 : /// Same as TenantConf, but this struct preserves the information about
375 : /// which parameters are set and which are not.
376 230 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
377 : pub struct TenantConfOpt {
378 : #[serde(skip_serializing_if = "Option::is_none")]
379 : #[serde(default)]
380 : pub checkpoint_distance: Option<u64>,
381 :
382 : #[serde(skip_serializing_if = "Option::is_none")]
383 : #[serde(with = "humantime_serde")]
384 : #[serde(default)]
385 : pub checkpoint_timeout: Option<Duration>,
386 :
387 : #[serde(skip_serializing_if = "Option::is_none")]
388 : #[serde(default)]
389 : pub compaction_target_size: Option<u64>,
390 :
391 : #[serde(skip_serializing_if = "Option::is_none")]
392 : #[serde(with = "humantime_serde")]
393 : #[serde(default)]
394 : pub compaction_period: Option<Duration>,
395 :
396 : #[serde(skip_serializing_if = "Option::is_none")]
397 : #[serde(default)]
398 : pub compaction_threshold: Option<usize>,
399 :
400 : #[serde(skip_serializing_if = "Option::is_none")]
401 : #[serde(default)]
402 : pub compaction_algorithm: Option<CompactionAlgorithm>,
403 :
404 : #[serde(skip_serializing_if = "Option::is_none")]
405 : #[serde(default)]
406 : pub gc_horizon: Option<u64>,
407 :
408 : #[serde(skip_serializing_if = "Option::is_none")]
409 : #[serde(with = "humantime_serde")]
410 : #[serde(default)]
411 : pub gc_period: Option<Duration>,
412 :
413 : #[serde(skip_serializing_if = "Option::is_none")]
414 : #[serde(default)]
415 : pub image_creation_threshold: Option<usize>,
416 :
417 : #[serde(skip_serializing_if = "Option::is_none")]
418 : #[serde(with = "humantime_serde")]
419 : #[serde(default)]
420 : pub pitr_interval: Option<Duration>,
421 :
422 : #[serde(skip_serializing_if = "Option::is_none")]
423 : #[serde(with = "humantime_serde")]
424 : #[serde(default)]
425 : pub walreceiver_connect_timeout: Option<Duration>,
426 :
427 : #[serde(skip_serializing_if = "Option::is_none")]
428 : #[serde(with = "humantime_serde")]
429 : #[serde(default)]
430 : pub lagging_wal_timeout: Option<Duration>,
431 :
432 : #[serde(skip_serializing_if = "Option::is_none")]
433 : #[serde(default)]
434 : pub max_lsn_wal_lag: Option<NonZeroU64>,
435 :
436 : #[serde(skip_serializing_if = "Option::is_none")]
437 : #[serde(default)]
438 : pub trace_read_requests: Option<bool>,
439 :
440 : #[serde(skip_serializing_if = "Option::is_none")]
441 : #[serde(default)]
442 : pub eviction_policy: Option<EvictionPolicy>,
443 :
444 : #[serde(skip_serializing_if = "Option::is_none")]
445 : #[serde(default)]
446 : pub min_resident_size_override: Option<u64>,
447 :
448 : #[serde(skip_serializing_if = "Option::is_none")]
449 : #[serde(with = "humantime_serde")]
450 : #[serde(default)]
451 : pub evictions_low_residence_duration_metric_threshold: Option<Duration>,
452 :
453 : #[serde(skip_serializing_if = "Option::is_none")]
454 : #[serde(with = "humantime_serde")]
455 : #[serde(default)]
456 : pub heatmap_period: Option<Duration>,
457 :
458 : #[serde(skip_serializing_if = "Option::is_none")]
459 : #[serde(default)]
460 : pub lazy_slru_download: Option<bool>,
461 :
462 : #[serde(skip_serializing_if = "Option::is_none")]
463 : pub timeline_get_throttle: Option<pageserver_api::models::ThrottleConfig>,
464 :
465 : #[serde(skip_serializing_if = "Option::is_none")]
466 : pub image_layer_creation_check_threshold: Option<u8>,
467 : }
468 :
469 : impl TenantConfOpt {
470 18 : pub fn merge(&self, global_conf: TenantConf) -> TenantConf {
471 18 : TenantConf {
472 18 : checkpoint_distance: self
473 18 : .checkpoint_distance
474 18 : .unwrap_or(global_conf.checkpoint_distance),
475 18 : checkpoint_timeout: self
476 18 : .checkpoint_timeout
477 18 : .unwrap_or(global_conf.checkpoint_timeout),
478 18 : compaction_target_size: self
479 18 : .compaction_target_size
480 18 : .unwrap_or(global_conf.compaction_target_size),
481 18 : compaction_period: self
482 18 : .compaction_period
483 18 : .unwrap_or(global_conf.compaction_period),
484 18 : compaction_threshold: self
485 18 : .compaction_threshold
486 18 : .unwrap_or(global_conf.compaction_threshold),
487 18 : compaction_algorithm: self
488 18 : .compaction_algorithm
489 18 : .unwrap_or(global_conf.compaction_algorithm),
490 18 : gc_horizon: self.gc_horizon.unwrap_or(global_conf.gc_horizon),
491 18 : gc_period: self.gc_period.unwrap_or(global_conf.gc_period),
492 18 : image_creation_threshold: self
493 18 : .image_creation_threshold
494 18 : .unwrap_or(global_conf.image_creation_threshold),
495 18 : pitr_interval: self.pitr_interval.unwrap_or(global_conf.pitr_interval),
496 18 : walreceiver_connect_timeout: self
497 18 : .walreceiver_connect_timeout
498 18 : .unwrap_or(global_conf.walreceiver_connect_timeout),
499 18 : lagging_wal_timeout: self
500 18 : .lagging_wal_timeout
501 18 : .unwrap_or(global_conf.lagging_wal_timeout),
502 18 : max_lsn_wal_lag: self.max_lsn_wal_lag.unwrap_or(global_conf.max_lsn_wal_lag),
503 18 : trace_read_requests: self
504 18 : .trace_read_requests
505 18 : .unwrap_or(global_conf.trace_read_requests),
506 18 : eviction_policy: self.eviction_policy.unwrap_or(global_conf.eviction_policy),
507 18 : min_resident_size_override: self
508 18 : .min_resident_size_override
509 18 : .or(global_conf.min_resident_size_override),
510 18 : evictions_low_residence_duration_metric_threshold: self
511 18 : .evictions_low_residence_duration_metric_threshold
512 18 : .unwrap_or(global_conf.evictions_low_residence_duration_metric_threshold),
513 18 : heatmap_period: self.heatmap_period.unwrap_or(global_conf.heatmap_period),
514 18 : lazy_slru_download: self
515 18 : .lazy_slru_download
516 18 : .unwrap_or(global_conf.lazy_slru_download),
517 18 : timeline_get_throttle: self
518 18 : .timeline_get_throttle
519 18 : .clone()
520 18 : .unwrap_or(global_conf.timeline_get_throttle),
521 18 : image_layer_creation_check_threshold: self
522 18 : .image_layer_creation_check_threshold
523 18 : .unwrap_or(global_conf.image_layer_creation_check_threshold),
524 18 : }
525 18 : }
526 : }
527 :
528 : impl Default for TenantConf {
529 264 : fn default() -> Self {
530 264 : use defaults::*;
531 264 : Self {
532 264 : checkpoint_distance: DEFAULT_CHECKPOINT_DISTANCE,
533 264 : checkpoint_timeout: humantime::parse_duration(DEFAULT_CHECKPOINT_TIMEOUT)
534 264 : .expect("cannot parse default checkpoint timeout"),
535 264 : compaction_target_size: DEFAULT_COMPACTION_TARGET_SIZE,
536 264 : compaction_period: humantime::parse_duration(DEFAULT_COMPACTION_PERIOD)
537 264 : .expect("cannot parse default compaction period"),
538 264 : compaction_threshold: DEFAULT_COMPACTION_THRESHOLD,
539 264 : compaction_algorithm: DEFAULT_COMPACTION_ALGORITHM,
540 264 : gc_horizon: DEFAULT_GC_HORIZON,
541 264 : gc_period: humantime::parse_duration(DEFAULT_GC_PERIOD)
542 264 : .expect("cannot parse default gc period"),
543 264 : image_creation_threshold: DEFAULT_IMAGE_CREATION_THRESHOLD,
544 264 : pitr_interval: humantime::parse_duration(DEFAULT_PITR_INTERVAL)
545 264 : .expect("cannot parse default PITR interval"),
546 264 : walreceiver_connect_timeout: humantime::parse_duration(
547 264 : DEFAULT_WALRECEIVER_CONNECT_TIMEOUT,
548 264 : )
549 264 : .expect("cannot parse default walreceiver connect timeout"),
550 264 : lagging_wal_timeout: humantime::parse_duration(DEFAULT_WALRECEIVER_LAGGING_WAL_TIMEOUT)
551 264 : .expect("cannot parse default walreceiver lagging wal timeout"),
552 264 : max_lsn_wal_lag: NonZeroU64::new(DEFAULT_MAX_WALRECEIVER_LSN_WAL_LAG)
553 264 : .expect("cannot parse default max walreceiver Lsn wal lag"),
554 264 : trace_read_requests: false,
555 264 : eviction_policy: EvictionPolicy::NoEviction,
556 264 : min_resident_size_override: None,
557 264 : evictions_low_residence_duration_metric_threshold: humantime::parse_duration(
558 264 : DEFAULT_EVICTIONS_LOW_RESIDENCE_DURATION_METRIC_THRESHOLD,
559 264 : )
560 264 : .expect("cannot parse default evictions_low_residence_duration_metric_threshold"),
561 264 : heatmap_period: Duration::ZERO,
562 264 : lazy_slru_download: false,
563 264 : timeline_get_throttle: crate::tenant::throttle::Config::disabled(),
564 264 : image_layer_creation_check_threshold: DEFAULT_IMAGE_LAYER_CREATION_CHECK_THRESHOLD,
565 264 : }
566 264 : }
567 : }
568 :
569 : impl TryFrom<&'_ models::TenantConfig> for TenantConfOpt {
570 : type Error = anyhow::Error;
571 :
572 4 : fn try_from(request_data: &'_ models::TenantConfig) -> Result<Self, Self::Error> {
573 : // Convert the request_data to a JSON Value
574 4 : let json_value: Value = serde_json::to_value(request_data)?;
575 :
576 : // Create a Deserializer from the JSON Value
577 4 : let deserializer = json_value.into_deserializer();
578 :
579 : // Use serde_path_to_error to deserialize the JSON Value into TenantConfOpt
580 4 : let tenant_conf: TenantConfOpt = serde_path_to_error::deserialize(deserializer)?;
581 :
582 2 : Ok(tenant_conf)
583 4 : }
584 : }
585 :
586 : impl TryFrom<toml_edit::Item> for TenantConfOpt {
587 : type Error = anyhow::Error;
588 :
589 10 : fn try_from(item: toml_edit::Item) -> Result<Self, Self::Error> {
590 10 : match item {
591 2 : toml_edit::Item::Value(value) => {
592 2 : let d = value.into_deserializer();
593 2 : return serde_path_to_error::deserialize(d)
594 2 : .map_err(|e| anyhow::anyhow!("{}: {}", e.path(), e.inner().message()));
595 : }
596 8 : toml_edit::Item::Table(table) => {
597 8 : let deserializer = toml_edit::de::Deserializer::new(table.into());
598 8 : return serde_path_to_error::deserialize(deserializer)
599 8 : .map_err(|e| anyhow::anyhow!("{}: {}", e.path(), e.inner().message()));
600 : }
601 : _ => {
602 0 : bail!("expected non-inline table but found {item}")
603 : }
604 : }
605 10 : }
606 : }
607 :
608 : /// This is a conversion from our internal tenant config object to the one used
609 : /// in external APIs.
610 : impl From<TenantConfOpt> for models::TenantConfig {
611 0 : fn from(value: TenantConfOpt) -> Self {
612 0 : fn humantime(d: Duration) -> String {
613 0 : format!("{}s", d.as_secs())
614 0 : }
615 0 : Self {
616 0 : checkpoint_distance: value.checkpoint_distance,
617 0 : checkpoint_timeout: value.checkpoint_timeout.map(humantime),
618 0 : compaction_algorithm: value.compaction_algorithm,
619 0 : compaction_target_size: value.compaction_target_size,
620 0 : compaction_period: value.compaction_period.map(humantime),
621 0 : compaction_threshold: value.compaction_threshold,
622 0 : gc_horizon: value.gc_horizon,
623 0 : gc_period: value.gc_period.map(humantime),
624 0 : image_creation_threshold: value.image_creation_threshold,
625 0 : pitr_interval: value.pitr_interval.map(humantime),
626 0 : walreceiver_connect_timeout: value.walreceiver_connect_timeout.map(humantime),
627 0 : lagging_wal_timeout: value.lagging_wal_timeout.map(humantime),
628 0 : max_lsn_wal_lag: value.max_lsn_wal_lag,
629 0 : trace_read_requests: value.trace_read_requests,
630 0 : eviction_policy: value.eviction_policy,
631 0 : min_resident_size_override: value.min_resident_size_override,
632 0 : evictions_low_residence_duration_metric_threshold: value
633 0 : .evictions_low_residence_duration_metric_threshold
634 0 : .map(humantime),
635 0 : heatmap_period: value.heatmap_period.map(humantime),
636 0 : lazy_slru_download: value.lazy_slru_download,
637 0 : timeline_get_throttle: value.timeline_get_throttle.map(ThrottleConfig::from),
638 0 : image_layer_creation_check_threshold: value.image_layer_creation_check_threshold,
639 0 : }
640 0 : }
641 : }
642 :
643 : #[cfg(test)]
644 : mod tests {
645 : use super::*;
646 : use models::TenantConfig;
647 :
648 : #[test]
649 2 : fn de_serializing_pageserver_config_omits_empty_values() {
650 2 : let small_conf = TenantConfOpt {
651 2 : gc_horizon: Some(42),
652 2 : ..TenantConfOpt::default()
653 2 : };
654 2 :
655 2 : let toml_form = toml_edit::ser::to_string(&small_conf).unwrap();
656 2 : assert_eq!(toml_form, "gc_horizon = 42\n");
657 2 : assert_eq!(small_conf, toml_edit::de::from_str(&toml_form).unwrap());
658 :
659 2 : let json_form = serde_json::to_string(&small_conf).unwrap();
660 2 : assert_eq!(json_form, "{\"gc_horizon\":42}");
661 2 : assert_eq!(small_conf, serde_json::from_str(&json_form).unwrap());
662 2 : }
663 :
664 : #[test]
665 2 : fn test_try_from_models_tenant_config_err() {
666 2 : let tenant_config = models::TenantConfig {
667 2 : lagging_wal_timeout: Some("5a".to_string()),
668 2 : ..TenantConfig::default()
669 2 : };
670 2 :
671 2 : let tenant_conf_opt = TenantConfOpt::try_from(&tenant_config);
672 2 :
673 2 : assert!(
674 2 : tenant_conf_opt.is_err(),
675 0 : "Suceeded to convert TenantConfig to TenantConfOpt"
676 : );
677 :
678 2 : let expected_error_str =
679 2 : "lagging_wal_timeout: invalid value: string \"5a\", expected a duration";
680 2 : assert_eq!(tenant_conf_opt.unwrap_err().to_string(), expected_error_str);
681 2 : }
682 :
683 : #[test]
684 2 : fn test_try_from_models_tenant_config_success() {
685 2 : let tenant_config = models::TenantConfig {
686 2 : lagging_wal_timeout: Some("5s".to_string()),
687 2 : ..TenantConfig::default()
688 2 : };
689 2 :
690 2 : let tenant_conf_opt = TenantConfOpt::try_from(&tenant_config).unwrap();
691 2 :
692 2 : assert_eq!(
693 2 : tenant_conf_opt.lagging_wal_timeout,
694 2 : Some(Duration::from_secs(5))
695 2 : );
696 2 : }
697 : }
|