Line data Source code
1 : pub mod detach_ancestor;
2 : pub mod partitioning;
3 : pub mod utilization;
4 :
5 : use core::ops::Range;
6 : use std::collections::HashMap;
7 : use std::fmt::Display;
8 : use std::num::{NonZeroU32, NonZeroU64, NonZeroUsize};
9 : use std::str::FromStr;
10 : use std::time::{Duration, SystemTime};
11 :
12 : #[cfg(feature = "testing")]
13 : use camino::Utf8PathBuf;
14 : use postgres_versioninfo::PgMajorVersion;
15 : use serde::{Deserialize, Deserializer, Serialize, Serializer};
16 : use serde_with::serde_as;
17 : pub use utilization::PageserverUtilization;
18 : use utils::id::{NodeId, TenantId, TimelineId};
19 : use utils::lsn::Lsn;
20 : use utils::{completion, serde_system_time};
21 :
22 : use crate::config::Ratio;
23 : use crate::key::{CompactKey, Key};
24 : use crate::shard::{
25 : DEFAULT_STRIPE_SIZE, ShardCount, ShardIdentity, ShardStripeSize, TenantShardId,
26 : };
27 :
28 : /// The state of a tenant in this pageserver.
29 : ///
30 : /// ```mermaid
31 : /// stateDiagram-v2
32 : ///
33 : /// [*] --> Attaching: spawn_attach()
34 : ///
35 : /// Attaching --> Activating: activate()
36 : /// Activating --> Active: infallible
37 : ///
38 : /// Attaching --> Broken: attach() failure
39 : ///
40 : /// Active --> Stopping: set_stopping(), part of shutdown & detach
41 : /// Stopping --> Broken: late error in remove_tenant_from_memory
42 : ///
43 : /// Broken --> [*]: ignore / detach / shutdown
44 : /// Stopping --> [*]: remove_from_memory complete
45 : ///
46 : /// Active --> Broken: cfg(testing)-only tenant break point
47 : /// ```
48 : #[derive(
49 : Clone,
50 : PartialEq,
51 : Eq,
52 0 : serde::Serialize,
53 0 : serde::Deserialize,
54 : strum_macros::Display,
55 : strum_macros::VariantNames,
56 : strum_macros::AsRefStr,
57 : strum_macros::IntoStaticStr,
58 : )]
59 : #[serde(tag = "slug", content = "data")]
60 : pub enum TenantState {
61 : /// This tenant is being attached to the pageserver.
62 : ///
63 : /// `set_stopping()` and `set_broken()` do not work in this state and wait for it to pass.
64 : Attaching,
65 : /// The tenant is transitioning from Loading/Attaching to Active.
66 : ///
67 : /// While in this state, the individual timelines are being activated.
68 : ///
69 : /// `set_stopping()` and `set_broken()` do not work in this state and wait for it to pass.
70 : Activating(ActivatingFrom),
71 : /// The tenant has finished activating and is open for business.
72 : ///
73 : /// Transitions out of this state are possible through `set_stopping()` and `set_broken()`.
74 : Active,
75 : /// The tenant is recognized by pageserver, but it is being detached or the
76 : /// system is being shut down.
77 : ///
78 : /// Transitions out of this state are possible through `set_broken()`.
79 : Stopping {
80 : /// The barrier can be used to wait for shutdown to complete. The first caller to set
81 : /// Some(Barrier) is responsible for driving shutdown to completion. Subsequent callers
82 : /// will wait for the first caller's existing barrier.
83 : ///
84 : /// None is set when an attach is cancelled, to signal to shutdown that the attach has in
85 : /// fact cancelled:
86 : ///
87 : /// 1. `shutdown` sees `TenantState::Attaching`, and cancels the tenant.
88 : /// 2. `attach` sets `TenantState::Stopping(None)` and exits.
89 : /// 3. `set_stopping` waits for `TenantState::Stopping(None)` and sets
90 : /// `TenantState::Stopping(Some)` to claim the barrier as the shutdown owner.
91 : //
92 : // Because of https://github.com/serde-rs/serde/issues/2105 this has to be a named field,
93 : // otherwise it will not be skipped during deserialization
94 : #[serde(skip)]
95 : progress: Option<completion::Barrier>,
96 : },
97 : /// The tenant is recognized by the pageserver, but can no longer be used for
98 : /// any operations.
99 : ///
100 : /// If the tenant fails to load or attach, it will transition to this state
101 : /// and it is guaranteed that no background tasks are running in its name.
102 : ///
103 : /// The other way to transition into this state is from `Stopping` state
104 : /// through `set_broken()` called from `remove_tenant_from_memory()`. That happens
105 : /// if the cleanup future executed by `remove_tenant_from_memory()` fails.
106 : Broken { reason: String, backtrace: String },
107 : }
108 :
109 : impl TenantState {
110 0 : pub fn attachment_status(&self) -> TenantAttachmentStatus {
111 : use TenantAttachmentStatus::*;
112 :
113 : // Below TenantState::Activating is used as "transient" or "transparent" state for
114 : // attachment_status determining.
115 0 : match self {
116 : // The attach procedure writes the marker file before adding the Attaching tenant to the tenants map.
117 : // So, technically, we can return Attached here.
118 : // However, as soon as Console observes Attached, it will proceed with the Postgres-level health check.
119 : // But, our attach task might still be fetching the remote timelines, etc.
120 : // So, return `Maybe` while Attaching, making Console wait for the attach task to finish.
121 0 : Self::Attaching | Self::Activating(ActivatingFrom::Attaching) => Maybe,
122 : // We only reach Active after successful load / attach.
123 : // So, call atttachment status Attached.
124 0 : Self::Active => Attached,
125 : // If the (initial or resumed) attach procedure fails, the tenant becomes Broken.
126 : // However, it also becomes Broken if the regular load fails.
127 : // From Console's perspective there's no practical difference
128 : // because attachment_status is polled by console only during attach operation execution.
129 0 : Self::Broken { reason, .. } => Failed {
130 0 : reason: reason.to_owned(),
131 0 : },
132 : // Why is Stopping a Maybe case? Because, during pageserver shutdown,
133 : // we set the Stopping state irrespective of whether the tenant
134 : // has finished attaching or not.
135 0 : Self::Stopping { .. } => Maybe,
136 : }
137 0 : }
138 :
139 0 : pub fn broken_from_reason(reason: String) -> Self {
140 0 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
141 0 : Self::Broken {
142 0 : reason,
143 0 : backtrace: backtrace_str,
144 0 : }
145 0 : }
146 : }
147 :
148 : impl std::fmt::Debug for TenantState {
149 2 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
150 2 : match self {
151 2 : Self::Broken { reason, backtrace } if !reason.is_empty() => {
152 2 : write!(f, "Broken due to: {reason}. Backtrace:\n{backtrace}")
153 : }
154 0 : _ => write!(f, "{self}"),
155 : }
156 2 : }
157 : }
158 :
159 : /// A temporary lease to a specific lsn inside a timeline.
160 : /// Access to the lsn is guaranteed by the pageserver until the expiration indicated by `valid_until`.
161 : #[serde_as]
162 : #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
163 : pub struct LsnLease {
164 : #[serde_as(as = "SystemTimeAsRfc3339Millis")]
165 : pub valid_until: SystemTime,
166 : }
167 :
168 : serde_with::serde_conv!(
169 : SystemTimeAsRfc3339Millis,
170 : SystemTime,
171 0 : |time: &SystemTime| humantime::format_rfc3339_millis(*time).to_string(),
172 0 : |value: String| -> Result<_, humantime::TimestampError> { humantime::parse_rfc3339(&value) }
173 : );
174 :
175 : impl LsnLease {
176 : /// The default length for an explicit LSN lease request (10 minutes).
177 : pub const DEFAULT_LENGTH: Duration = Duration::from_secs(10 * 60);
178 :
179 : /// The default length for an implicit LSN lease granted during
180 : /// `get_lsn_by_timestamp` request (1 minutes).
181 : pub const DEFAULT_LENGTH_FOR_TS: Duration = Duration::from_secs(60);
182 :
183 : /// Checks whether the lease is expired.
184 3 : pub fn is_expired(&self, now: &SystemTime) -> bool {
185 3 : now > &self.valid_until
186 3 : }
187 : }
188 :
189 : /// Controls the detach ancestor behavior.
190 : /// - When set to `NoAncestorAndReparent`, we will only detach a branch if its ancestor is a root branch. It will automatically reparent any children of the ancestor before and at the branch point.
191 : /// - When set to `MultiLevelAndNoReparent`, we will detach a branch from multiple levels of ancestors, and no reparenting will happen at all.
192 : #[derive(Debug, Clone, Copy, Default)]
193 : pub enum DetachBehavior {
194 : #[default]
195 : NoAncestorAndReparent,
196 : MultiLevelAndNoReparent,
197 : }
198 :
199 : impl std::str::FromStr for DetachBehavior {
200 : type Err = &'static str;
201 :
202 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
203 0 : match s {
204 0 : "no_ancestor_and_reparent" => Ok(DetachBehavior::NoAncestorAndReparent),
205 0 : "multi_level_and_no_reparent" => Ok(DetachBehavior::MultiLevelAndNoReparent),
206 0 : "v1" => Ok(DetachBehavior::NoAncestorAndReparent),
207 0 : "v2" => Ok(DetachBehavior::MultiLevelAndNoReparent),
208 0 : _ => Err("cannot parse detach behavior"),
209 : }
210 0 : }
211 : }
212 :
213 : impl std::fmt::Display for DetachBehavior {
214 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
215 0 : match self {
216 0 : DetachBehavior::NoAncestorAndReparent => write!(f, "no_ancestor_and_reparent"),
217 0 : DetachBehavior::MultiLevelAndNoReparent => write!(f, "multi_level_and_no_reparent"),
218 : }
219 0 : }
220 : }
221 :
222 : /// The only [`TenantState`] variants we could be `TenantState::Activating` from.
223 : ///
224 : /// XXX: We used to have more variants here, but now it's just one, which makes this rather
225 : /// useless. Remove, once we've checked that there's no client code left that looks at this.
226 0 : #[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
227 : pub enum ActivatingFrom {
228 : /// Arrived to [`TenantState::Activating`] from [`TenantState::Attaching`]
229 : Attaching,
230 : }
231 :
232 : /// A state of a timeline in pageserver's memory.
233 0 : #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
234 : pub enum TimelineState {
235 : /// The timeline is recognized by the pageserver but is not yet operational.
236 : /// In particular, the walreceiver connection loop is not running for this timeline.
237 : /// It will eventually transition to state Active or Broken.
238 : Loading,
239 : /// The timeline is fully operational.
240 : /// It can be queried, and the walreceiver connection loop is running.
241 : Active,
242 : /// The timeline was previously Loading or Active but is shutting down.
243 : /// It cannot transition back into any other state.
244 : Stopping,
245 : /// The timeline is broken and not operational (previous states: Loading or Active).
246 : Broken { reason: String, backtrace: String },
247 : }
248 :
249 : #[serde_with::serde_as]
250 0 : #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
251 : pub struct CompactLsnRange {
252 : pub start: Lsn,
253 : pub end: Lsn,
254 : }
255 :
256 : #[serde_with::serde_as]
257 : #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
258 : pub struct CompactKeyRange {
259 : #[serde_as(as = "serde_with::DisplayFromStr")]
260 : pub start: Key,
261 : #[serde_as(as = "serde_with::DisplayFromStr")]
262 : pub end: Key,
263 : }
264 :
265 : impl From<Range<Lsn>> for CompactLsnRange {
266 3 : fn from(range: Range<Lsn>) -> Self {
267 3 : Self {
268 3 : start: range.start,
269 3 : end: range.end,
270 3 : }
271 3 : }
272 : }
273 :
274 : impl From<Range<Key>> for CompactKeyRange {
275 8 : fn from(range: Range<Key>) -> Self {
276 8 : Self {
277 8 : start: range.start,
278 8 : end: range.end,
279 8 : }
280 8 : }
281 : }
282 :
283 : impl From<CompactLsnRange> for Range<Lsn> {
284 5 : fn from(range: CompactLsnRange) -> Self {
285 5 : range.start..range.end
286 5 : }
287 : }
288 :
289 : impl From<CompactKeyRange> for Range<Key> {
290 8 : fn from(range: CompactKeyRange) -> Self {
291 8 : range.start..range.end
292 8 : }
293 : }
294 :
295 : impl CompactLsnRange {
296 2 : pub fn above(lsn: Lsn) -> Self {
297 2 : Self {
298 2 : start: lsn,
299 2 : end: Lsn::MAX,
300 2 : }
301 2 : }
302 : }
303 :
304 : #[derive(Debug, Clone, Serialize)]
305 : pub struct CompactInfoResponse {
306 : pub compact_key_range: Option<CompactKeyRange>,
307 : pub compact_lsn_range: Option<CompactLsnRange>,
308 : pub sub_compaction: bool,
309 : pub running: bool,
310 : pub job_id: usize,
311 : }
312 :
313 0 : #[derive(Serialize, Deserialize, Clone)]
314 : pub struct TimelineCreateRequest {
315 : pub new_timeline_id: TimelineId,
316 : #[serde(flatten)]
317 : pub mode: TimelineCreateRequestMode,
318 : }
319 :
320 : impl TimelineCreateRequest {
321 0 : pub fn mode_tag(&self) -> &'static str {
322 0 : match &self.mode {
323 0 : TimelineCreateRequestMode::Branch { .. } => "branch",
324 0 : TimelineCreateRequestMode::ImportPgdata { .. } => "import",
325 0 : TimelineCreateRequestMode::Bootstrap { .. } => "bootstrap",
326 : }
327 0 : }
328 :
329 0 : pub fn is_import(&self) -> bool {
330 0 : matches!(self.mode, TimelineCreateRequestMode::ImportPgdata { .. })
331 0 : }
332 : }
333 :
334 0 : #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
335 : pub enum ShardImportStatus {
336 : InProgress(Option<ShardImportProgress>),
337 : Done,
338 : Error(String),
339 : }
340 :
341 0 : #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
342 : pub enum ShardImportProgress {
343 : V1(ShardImportProgressV1),
344 : }
345 :
346 0 : #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
347 : pub struct ShardImportProgressV1 {
348 : /// Total number of jobs in the import plan
349 : pub jobs: usize,
350 : /// Number of jobs completed
351 : pub completed: usize,
352 : /// Hash of the plan
353 : pub import_plan_hash: u64,
354 : /// Soft limit for the job size
355 : /// This needs to remain constant throughout the import
356 : pub job_soft_size_limit: usize,
357 : }
358 :
359 : impl ShardImportStatus {
360 0 : pub fn is_terminal(&self) -> bool {
361 0 : match self {
362 0 : ShardImportStatus::InProgress(_) => false,
363 0 : ShardImportStatus::Done | ShardImportStatus::Error(_) => true,
364 : }
365 0 : }
366 : }
367 :
368 : /// Storage controller specific extensions to [`TimelineInfo`].
369 0 : #[derive(Serialize, Deserialize, Clone)]
370 : pub struct TimelineCreateResponseStorcon {
371 : #[serde(flatten)]
372 : pub timeline_info: TimelineInfo,
373 :
374 : pub safekeepers: Option<SafekeepersInfo>,
375 : }
376 :
377 : /// Safekeepers as returned in timeline creation request to storcon or pushed to
378 : /// cplane in the post migration hook.
379 0 : #[derive(Serialize, Deserialize, Clone)]
380 : pub struct SafekeepersInfo {
381 : pub tenant_id: TenantId,
382 : pub timeline_id: TimelineId,
383 : pub generation: u32,
384 : pub safekeepers: Vec<SafekeeperInfo>,
385 : }
386 :
387 0 : #[derive(Serialize, Deserialize, Clone)]
388 : pub struct SafekeeperInfo {
389 : pub id: NodeId,
390 : pub hostname: String,
391 : }
392 :
393 0 : #[derive(Serialize, Deserialize, Clone)]
394 : #[serde(untagged)]
395 : pub enum TimelineCreateRequestMode {
396 : Branch {
397 : ancestor_timeline_id: TimelineId,
398 : #[serde(default)]
399 : ancestor_start_lsn: Option<Lsn>,
400 : // TODO: cplane sets this, but, the branching code always
401 : // inherits the ancestor's pg_version. Earlier code wasn't
402 : // using a flattened enum, so, it was an accepted field, and
403 : // we continue to accept it by having it here.
404 : pg_version: Option<PgMajorVersion>,
405 : #[serde(default, skip_serializing_if = "std::ops::Not::not")]
406 : read_only: bool,
407 : },
408 : ImportPgdata {
409 : import_pgdata: TimelineCreateRequestModeImportPgdata,
410 : },
411 : // NB: Bootstrap is all-optional, and thus the serde(untagged) will cause serde to stop at Bootstrap.
412 : // (serde picks the first matching enum variant, in declaration order).
413 : Bootstrap {
414 : #[serde(default)]
415 : existing_initdb_timeline_id: Option<TimelineId>,
416 : pg_version: Option<PgMajorVersion>,
417 : },
418 : }
419 :
420 0 : #[derive(Serialize, Deserialize, Clone)]
421 : pub struct TimelineCreateRequestModeImportPgdata {
422 : pub location: ImportPgdataLocation,
423 : pub idempotency_key: ImportPgdataIdempotencyKey,
424 : }
425 :
426 0 : #[derive(Serialize, Deserialize, Clone, Debug)]
427 : pub enum ImportPgdataLocation {
428 : #[cfg(feature = "testing")]
429 : LocalFs { path: Utf8PathBuf },
430 : AwsS3 {
431 : region: String,
432 : bucket: String,
433 : /// A better name for this would be `prefix`; changing requires coordination with cplane.
434 : /// See <https://github.com/neondatabase/cloud/issues/20646>.
435 : key: String,
436 : },
437 : }
438 :
439 : #[derive(Serialize, Deserialize, Clone)]
440 : #[serde(transparent)]
441 : pub struct ImportPgdataIdempotencyKey(pub String);
442 :
443 : impl ImportPgdataIdempotencyKey {
444 0 : pub fn random() -> Self {
445 : use rand::Rng;
446 : use rand::distributions::Alphanumeric;
447 0 : Self(
448 0 : rand::thread_rng()
449 0 : .sample_iter(&Alphanumeric)
450 0 : .take(20)
451 0 : .map(char::from)
452 0 : .collect(),
453 0 : )
454 0 : }
455 : }
456 :
457 0 : #[derive(Serialize, Deserialize, Clone)]
458 : pub struct LsnLeaseRequest {
459 : pub lsn: Lsn,
460 : }
461 :
462 0 : #[derive(Serialize, Deserialize)]
463 : pub struct TenantShardSplitRequest {
464 : pub new_shard_count: u8,
465 :
466 : // A tenant's stripe size is only meaningful the first time their shard count goes
467 : // above 1: therefore during a split from 1->N shards, we may modify the stripe size.
468 : //
469 : // If this is set while the stripe count is being increased from an already >1 value,
470 : // then the request will fail with 400.
471 : pub new_stripe_size: Option<ShardStripeSize>,
472 : }
473 :
474 0 : #[derive(Serialize, Deserialize)]
475 : pub struct TenantShardSplitResponse {
476 : pub new_shards: Vec<TenantShardId>,
477 : }
478 :
479 : /// Parameters that apply to all shards in a tenant. Used during tenant creation.
480 0 : #[derive(Clone, Copy, Serialize, Deserialize, Debug)]
481 : #[serde(deny_unknown_fields)]
482 : pub struct ShardParameters {
483 : pub count: ShardCount,
484 : pub stripe_size: ShardStripeSize,
485 : }
486 :
487 : impl ShardParameters {
488 0 : pub fn is_unsharded(&self) -> bool {
489 0 : self.count.is_unsharded()
490 0 : }
491 : }
492 :
493 : impl Default for ShardParameters {
494 119 : fn default() -> Self {
495 119 : Self {
496 119 : count: ShardCount::new(0),
497 119 : stripe_size: DEFAULT_STRIPE_SIZE,
498 119 : }
499 119 : }
500 : }
501 :
502 : impl From<ShardIdentity> for ShardParameters {
503 0 : fn from(identity: ShardIdentity) -> Self {
504 0 : Self {
505 0 : count: identity.count,
506 0 : stripe_size: identity.stripe_size,
507 0 : }
508 0 : }
509 : }
510 :
511 : #[derive(Debug, Default, Clone, Eq, PartialEq)]
512 : pub enum FieldPatch<T> {
513 : Upsert(T),
514 : Remove,
515 : #[default]
516 : Noop,
517 : }
518 :
519 : impl<T> FieldPatch<T> {
520 76 : fn is_noop(&self) -> bool {
521 76 : matches!(self, FieldPatch::Noop)
522 76 : }
523 :
524 38 : pub fn apply(self, target: &mut Option<T>) {
525 38 : match self {
526 1 : Self::Upsert(v) => *target = Some(v),
527 1 : Self::Remove => *target = None,
528 36 : Self::Noop => {}
529 : }
530 38 : }
531 :
532 10 : pub fn map<U, E, F: FnOnce(T) -> Result<U, E>>(self, map: F) -> Result<FieldPatch<U>, E> {
533 10 : match self {
534 0 : Self::Upsert(v) => Ok(FieldPatch::<U>::Upsert(map(v)?)),
535 0 : Self::Remove => Ok(FieldPatch::<U>::Remove),
536 10 : Self::Noop => Ok(FieldPatch::<U>::Noop),
537 : }
538 10 : }
539 : }
540 :
541 : impl<'de, T: Deserialize<'de>> Deserialize<'de> for FieldPatch<T> {
542 2 : fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
543 2 : where
544 2 : D: Deserializer<'de>,
545 : {
546 2 : Option::deserialize(deserializer).map(|opt| match opt {
547 1 : None => FieldPatch::Remove,
548 1 : Some(val) => FieldPatch::Upsert(val),
549 2 : })
550 2 : }
551 : }
552 :
553 : impl<T: Serialize> Serialize for FieldPatch<T> {
554 2 : fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
555 2 : where
556 2 : S: Serializer,
557 : {
558 2 : match self {
559 1 : FieldPatch::Upsert(val) => serializer.serialize_some(val),
560 1 : FieldPatch::Remove => serializer.serialize_none(),
561 0 : FieldPatch::Noop => unreachable!(),
562 : }
563 2 : }
564 : }
565 :
566 0 : #[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
567 : #[serde(default)]
568 : pub struct TenantConfigPatch {
569 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
570 : pub checkpoint_distance: FieldPatch<u64>,
571 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
572 : pub checkpoint_timeout: FieldPatch<String>,
573 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
574 : pub compaction_target_size: FieldPatch<u64>,
575 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
576 : pub compaction_period: FieldPatch<String>,
577 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
578 : pub compaction_threshold: FieldPatch<usize>,
579 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
580 : pub compaction_upper_limit: FieldPatch<usize>,
581 : // defer parsing compaction_algorithm, like eviction_policy
582 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
583 : pub compaction_algorithm: FieldPatch<CompactionAlgorithmSettings>,
584 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
585 : pub compaction_shard_ancestor: FieldPatch<bool>,
586 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
587 : pub compaction_l0_first: FieldPatch<bool>,
588 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
589 : pub compaction_l0_semaphore: FieldPatch<bool>,
590 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
591 : pub l0_flush_delay_threshold: FieldPatch<usize>,
592 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
593 : pub l0_flush_stall_threshold: FieldPatch<usize>,
594 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
595 : pub gc_horizon: FieldPatch<u64>,
596 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
597 : pub gc_period: FieldPatch<String>,
598 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
599 : pub image_creation_threshold: FieldPatch<usize>,
600 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
601 : pub pitr_interval: FieldPatch<String>,
602 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
603 : pub walreceiver_connect_timeout: FieldPatch<String>,
604 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
605 : pub lagging_wal_timeout: FieldPatch<String>,
606 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
607 : pub max_lsn_wal_lag: FieldPatch<NonZeroU64>,
608 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
609 : pub eviction_policy: FieldPatch<EvictionPolicy>,
610 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
611 : pub min_resident_size_override: FieldPatch<u64>,
612 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
613 : pub evictions_low_residence_duration_metric_threshold: FieldPatch<String>,
614 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
615 : pub heatmap_period: FieldPatch<String>,
616 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
617 : pub lazy_slru_download: FieldPatch<bool>,
618 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
619 : pub timeline_get_throttle: FieldPatch<ThrottleConfig>,
620 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
621 : pub image_layer_creation_check_threshold: FieldPatch<u8>,
622 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
623 : pub image_creation_preempt_threshold: FieldPatch<usize>,
624 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
625 : pub lsn_lease_length: FieldPatch<String>,
626 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
627 : pub lsn_lease_length_for_ts: FieldPatch<String>,
628 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
629 : pub timeline_offloading: FieldPatch<bool>,
630 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
631 : pub rel_size_v2_enabled: FieldPatch<bool>,
632 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
633 : pub gc_compaction_enabled: FieldPatch<bool>,
634 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
635 : pub gc_compaction_verification: FieldPatch<bool>,
636 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
637 : pub gc_compaction_initial_threshold_kb: FieldPatch<u64>,
638 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
639 : pub gc_compaction_ratio_percent: FieldPatch<u64>,
640 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
641 : pub sampling_ratio: FieldPatch<Option<Ratio>>,
642 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
643 : pub relsize_snapshot_cache_capacity: FieldPatch<usize>,
644 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
645 : pub basebackup_cache_enabled: FieldPatch<bool>,
646 : }
647 :
648 : /// Like [`crate::config::TenantConfigToml`], but preserves the information
649 : /// about which parameters are set and which are not.
650 : ///
651 : /// Used in many places, including durably stored ones.
652 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
653 : #[serde(default)] // this maps omitted fields in deserialization to None
654 : pub struct TenantConfig {
655 : #[serde(skip_serializing_if = "Option::is_none")]
656 : pub checkpoint_distance: Option<u64>,
657 :
658 : #[serde(skip_serializing_if = "Option::is_none")]
659 : #[serde(with = "humantime_serde")]
660 : pub checkpoint_timeout: Option<Duration>,
661 :
662 : #[serde(skip_serializing_if = "Option::is_none")]
663 : pub compaction_target_size: Option<u64>,
664 :
665 : #[serde(skip_serializing_if = "Option::is_none")]
666 : #[serde(with = "humantime_serde")]
667 : pub compaction_period: Option<Duration>,
668 :
669 : #[serde(skip_serializing_if = "Option::is_none")]
670 : pub compaction_threshold: Option<usize>,
671 :
672 : #[serde(skip_serializing_if = "Option::is_none")]
673 : pub compaction_upper_limit: Option<usize>,
674 :
675 : #[serde(skip_serializing_if = "Option::is_none")]
676 : pub compaction_algorithm: Option<CompactionAlgorithmSettings>,
677 :
678 : #[serde(skip_serializing_if = "Option::is_none")]
679 : pub compaction_shard_ancestor: Option<bool>,
680 :
681 : #[serde(skip_serializing_if = "Option::is_none")]
682 : pub compaction_l0_first: Option<bool>,
683 :
684 : #[serde(skip_serializing_if = "Option::is_none")]
685 : pub compaction_l0_semaphore: Option<bool>,
686 :
687 : #[serde(skip_serializing_if = "Option::is_none")]
688 : pub l0_flush_delay_threshold: Option<usize>,
689 :
690 : #[serde(skip_serializing_if = "Option::is_none")]
691 : pub l0_flush_stall_threshold: Option<usize>,
692 :
693 : #[serde(skip_serializing_if = "Option::is_none")]
694 : pub gc_horizon: Option<u64>,
695 :
696 : #[serde(skip_serializing_if = "Option::is_none")]
697 : #[serde(with = "humantime_serde")]
698 : pub gc_period: Option<Duration>,
699 :
700 : #[serde(skip_serializing_if = "Option::is_none")]
701 : pub image_creation_threshold: Option<usize>,
702 :
703 : #[serde(skip_serializing_if = "Option::is_none")]
704 : #[serde(with = "humantime_serde")]
705 : pub pitr_interval: Option<Duration>,
706 :
707 : #[serde(skip_serializing_if = "Option::is_none")]
708 : #[serde(with = "humantime_serde")]
709 : pub walreceiver_connect_timeout: Option<Duration>,
710 :
711 : #[serde(skip_serializing_if = "Option::is_none")]
712 : #[serde(with = "humantime_serde")]
713 : pub lagging_wal_timeout: Option<Duration>,
714 :
715 : #[serde(skip_serializing_if = "Option::is_none")]
716 : pub max_lsn_wal_lag: Option<NonZeroU64>,
717 :
718 : #[serde(skip_serializing_if = "Option::is_none")]
719 : pub eviction_policy: Option<EvictionPolicy>,
720 :
721 : #[serde(skip_serializing_if = "Option::is_none")]
722 : pub min_resident_size_override: Option<u64>,
723 :
724 : #[serde(skip_serializing_if = "Option::is_none")]
725 : #[serde(with = "humantime_serde")]
726 : pub evictions_low_residence_duration_metric_threshold: Option<Duration>,
727 :
728 : #[serde(skip_serializing_if = "Option::is_none")]
729 : #[serde(with = "humantime_serde")]
730 : pub heatmap_period: Option<Duration>,
731 :
732 : #[serde(skip_serializing_if = "Option::is_none")]
733 : pub lazy_slru_download: Option<bool>,
734 :
735 : #[serde(skip_serializing_if = "Option::is_none")]
736 : pub timeline_get_throttle: Option<ThrottleConfig>,
737 :
738 : #[serde(skip_serializing_if = "Option::is_none")]
739 : pub image_layer_creation_check_threshold: Option<u8>,
740 :
741 : #[serde(skip_serializing_if = "Option::is_none")]
742 : pub image_creation_preempt_threshold: Option<usize>,
743 :
744 : #[serde(skip_serializing_if = "Option::is_none")]
745 : #[serde(with = "humantime_serde")]
746 : pub lsn_lease_length: Option<Duration>,
747 :
748 : #[serde(skip_serializing_if = "Option::is_none")]
749 : #[serde(with = "humantime_serde")]
750 : pub lsn_lease_length_for_ts: Option<Duration>,
751 :
752 : #[serde(skip_serializing_if = "Option::is_none")]
753 : pub timeline_offloading: Option<bool>,
754 :
755 : #[serde(skip_serializing_if = "Option::is_none")]
756 : pub rel_size_v2_enabled: Option<bool>,
757 :
758 : #[serde(skip_serializing_if = "Option::is_none")]
759 : pub gc_compaction_enabled: Option<bool>,
760 :
761 : #[serde(skip_serializing_if = "Option::is_none")]
762 : pub gc_compaction_verification: Option<bool>,
763 :
764 : #[serde(skip_serializing_if = "Option::is_none")]
765 : pub gc_compaction_initial_threshold_kb: Option<u64>,
766 :
767 : #[serde(skip_serializing_if = "Option::is_none")]
768 : pub gc_compaction_ratio_percent: Option<u64>,
769 :
770 : #[serde(skip_serializing_if = "Option::is_none")]
771 : pub sampling_ratio: Option<Option<Ratio>>,
772 :
773 : #[serde(skip_serializing_if = "Option::is_none")]
774 : pub relsize_snapshot_cache_capacity: Option<usize>,
775 :
776 : #[serde(skip_serializing_if = "Option::is_none")]
777 : pub basebackup_cache_enabled: Option<bool>,
778 : }
779 :
780 : impl TenantConfig {
781 1 : pub fn apply_patch(
782 1 : self,
783 1 : patch: TenantConfigPatch,
784 1 : ) -> Result<TenantConfig, humantime::DurationError> {
785 : let Self {
786 1 : mut checkpoint_distance,
787 1 : mut checkpoint_timeout,
788 1 : mut compaction_target_size,
789 1 : mut compaction_period,
790 1 : mut compaction_threshold,
791 1 : mut compaction_upper_limit,
792 1 : mut compaction_algorithm,
793 1 : mut compaction_shard_ancestor,
794 1 : mut compaction_l0_first,
795 1 : mut compaction_l0_semaphore,
796 1 : mut l0_flush_delay_threshold,
797 1 : mut l0_flush_stall_threshold,
798 1 : mut gc_horizon,
799 1 : mut gc_period,
800 1 : mut image_creation_threshold,
801 1 : mut pitr_interval,
802 1 : mut walreceiver_connect_timeout,
803 1 : mut lagging_wal_timeout,
804 1 : mut max_lsn_wal_lag,
805 1 : mut eviction_policy,
806 1 : mut min_resident_size_override,
807 1 : mut evictions_low_residence_duration_metric_threshold,
808 1 : mut heatmap_period,
809 1 : mut lazy_slru_download,
810 1 : mut timeline_get_throttle,
811 1 : mut image_layer_creation_check_threshold,
812 1 : mut image_creation_preempt_threshold,
813 1 : mut lsn_lease_length,
814 1 : mut lsn_lease_length_for_ts,
815 1 : mut timeline_offloading,
816 1 : mut rel_size_v2_enabled,
817 1 : mut gc_compaction_enabled,
818 1 : mut gc_compaction_verification,
819 1 : mut gc_compaction_initial_threshold_kb,
820 1 : mut gc_compaction_ratio_percent,
821 1 : mut sampling_ratio,
822 1 : mut relsize_snapshot_cache_capacity,
823 1 : mut basebackup_cache_enabled,
824 1 : } = self;
825 :
826 1 : patch.checkpoint_distance.apply(&mut checkpoint_distance);
827 1 : patch
828 1 : .checkpoint_timeout
829 1 : .map(|v| humantime::parse_duration(&v))?
830 1 : .apply(&mut checkpoint_timeout);
831 1 : patch
832 1 : .compaction_target_size
833 1 : .apply(&mut compaction_target_size);
834 1 : patch
835 1 : .compaction_period
836 1 : .map(|v| humantime::parse_duration(&v))?
837 1 : .apply(&mut compaction_period);
838 1 : patch.compaction_threshold.apply(&mut compaction_threshold);
839 1 : patch
840 1 : .compaction_upper_limit
841 1 : .apply(&mut compaction_upper_limit);
842 1 : patch.compaction_algorithm.apply(&mut compaction_algorithm);
843 1 : patch
844 1 : .compaction_shard_ancestor
845 1 : .apply(&mut compaction_shard_ancestor);
846 1 : patch.compaction_l0_first.apply(&mut compaction_l0_first);
847 1 : patch
848 1 : .compaction_l0_semaphore
849 1 : .apply(&mut compaction_l0_semaphore);
850 1 : patch
851 1 : .l0_flush_delay_threshold
852 1 : .apply(&mut l0_flush_delay_threshold);
853 1 : patch
854 1 : .l0_flush_stall_threshold
855 1 : .apply(&mut l0_flush_stall_threshold);
856 1 : patch.gc_horizon.apply(&mut gc_horizon);
857 1 : patch
858 1 : .gc_period
859 1 : .map(|v| humantime::parse_duration(&v))?
860 1 : .apply(&mut gc_period);
861 1 : patch
862 1 : .image_creation_threshold
863 1 : .apply(&mut image_creation_threshold);
864 1 : patch
865 1 : .pitr_interval
866 1 : .map(|v| humantime::parse_duration(&v))?
867 1 : .apply(&mut pitr_interval);
868 1 : patch
869 1 : .walreceiver_connect_timeout
870 1 : .map(|v| humantime::parse_duration(&v))?
871 1 : .apply(&mut walreceiver_connect_timeout);
872 1 : patch
873 1 : .lagging_wal_timeout
874 1 : .map(|v| humantime::parse_duration(&v))?
875 1 : .apply(&mut lagging_wal_timeout);
876 1 : patch.max_lsn_wal_lag.apply(&mut max_lsn_wal_lag);
877 1 : patch.eviction_policy.apply(&mut eviction_policy);
878 1 : patch
879 1 : .min_resident_size_override
880 1 : .apply(&mut min_resident_size_override);
881 1 : patch
882 1 : .evictions_low_residence_duration_metric_threshold
883 1 : .map(|v| humantime::parse_duration(&v))?
884 1 : .apply(&mut evictions_low_residence_duration_metric_threshold);
885 1 : patch
886 1 : .heatmap_period
887 1 : .map(|v| humantime::parse_duration(&v))?
888 1 : .apply(&mut heatmap_period);
889 1 : patch.lazy_slru_download.apply(&mut lazy_slru_download);
890 1 : patch
891 1 : .timeline_get_throttle
892 1 : .apply(&mut timeline_get_throttle);
893 1 : patch
894 1 : .image_layer_creation_check_threshold
895 1 : .apply(&mut image_layer_creation_check_threshold);
896 1 : patch
897 1 : .image_creation_preempt_threshold
898 1 : .apply(&mut image_creation_preempt_threshold);
899 1 : patch
900 1 : .lsn_lease_length
901 1 : .map(|v| humantime::parse_duration(&v))?
902 1 : .apply(&mut lsn_lease_length);
903 1 : patch
904 1 : .lsn_lease_length_for_ts
905 1 : .map(|v| humantime::parse_duration(&v))?
906 1 : .apply(&mut lsn_lease_length_for_ts);
907 1 : patch.timeline_offloading.apply(&mut timeline_offloading);
908 1 : patch.rel_size_v2_enabled.apply(&mut rel_size_v2_enabled);
909 1 : patch
910 1 : .gc_compaction_enabled
911 1 : .apply(&mut gc_compaction_enabled);
912 1 : patch
913 1 : .gc_compaction_verification
914 1 : .apply(&mut gc_compaction_verification);
915 1 : patch
916 1 : .gc_compaction_initial_threshold_kb
917 1 : .apply(&mut gc_compaction_initial_threshold_kb);
918 1 : patch
919 1 : .gc_compaction_ratio_percent
920 1 : .apply(&mut gc_compaction_ratio_percent);
921 1 : patch.sampling_ratio.apply(&mut sampling_ratio);
922 1 : patch
923 1 : .relsize_snapshot_cache_capacity
924 1 : .apply(&mut relsize_snapshot_cache_capacity);
925 1 : patch
926 1 : .basebackup_cache_enabled
927 1 : .apply(&mut basebackup_cache_enabled);
928 :
929 1 : Ok(Self {
930 1 : checkpoint_distance,
931 1 : checkpoint_timeout,
932 1 : compaction_target_size,
933 1 : compaction_period,
934 1 : compaction_threshold,
935 1 : compaction_upper_limit,
936 1 : compaction_algorithm,
937 1 : compaction_shard_ancestor,
938 1 : compaction_l0_first,
939 1 : compaction_l0_semaphore,
940 1 : l0_flush_delay_threshold,
941 1 : l0_flush_stall_threshold,
942 1 : gc_horizon,
943 1 : gc_period,
944 1 : image_creation_threshold,
945 1 : pitr_interval,
946 1 : walreceiver_connect_timeout,
947 1 : lagging_wal_timeout,
948 1 : max_lsn_wal_lag,
949 1 : eviction_policy,
950 1 : min_resident_size_override,
951 1 : evictions_low_residence_duration_metric_threshold,
952 1 : heatmap_period,
953 1 : lazy_slru_download,
954 1 : timeline_get_throttle,
955 1 : image_layer_creation_check_threshold,
956 1 : image_creation_preempt_threshold,
957 1 : lsn_lease_length,
958 1 : lsn_lease_length_for_ts,
959 1 : timeline_offloading,
960 1 : rel_size_v2_enabled,
961 1 : gc_compaction_enabled,
962 1 : gc_compaction_verification,
963 1 : gc_compaction_initial_threshold_kb,
964 1 : gc_compaction_ratio_percent,
965 1 : sampling_ratio,
966 1 : relsize_snapshot_cache_capacity,
967 1 : basebackup_cache_enabled,
968 1 : })
969 1 : }
970 :
971 0 : pub fn merge(
972 0 : &self,
973 0 : global_conf: crate::config::TenantConfigToml,
974 0 : ) -> crate::config::TenantConfigToml {
975 0 : crate::config::TenantConfigToml {
976 0 : checkpoint_distance: self
977 0 : .checkpoint_distance
978 0 : .unwrap_or(global_conf.checkpoint_distance),
979 0 : checkpoint_timeout: self
980 0 : .checkpoint_timeout
981 0 : .unwrap_or(global_conf.checkpoint_timeout),
982 0 : compaction_target_size: self
983 0 : .compaction_target_size
984 0 : .unwrap_or(global_conf.compaction_target_size),
985 0 : compaction_period: self
986 0 : .compaction_period
987 0 : .unwrap_or(global_conf.compaction_period),
988 0 : compaction_threshold: self
989 0 : .compaction_threshold
990 0 : .unwrap_or(global_conf.compaction_threshold),
991 0 : compaction_upper_limit: self
992 0 : .compaction_upper_limit
993 0 : .unwrap_or(global_conf.compaction_upper_limit),
994 0 : compaction_algorithm: self
995 0 : .compaction_algorithm
996 0 : .as_ref()
997 0 : .unwrap_or(&global_conf.compaction_algorithm)
998 0 : .clone(),
999 0 : compaction_shard_ancestor: self
1000 0 : .compaction_shard_ancestor
1001 0 : .unwrap_or(global_conf.compaction_shard_ancestor),
1002 0 : compaction_l0_first: self
1003 0 : .compaction_l0_first
1004 0 : .unwrap_or(global_conf.compaction_l0_first),
1005 0 : compaction_l0_semaphore: self
1006 0 : .compaction_l0_semaphore
1007 0 : .unwrap_or(global_conf.compaction_l0_semaphore),
1008 0 : l0_flush_delay_threshold: self
1009 0 : .l0_flush_delay_threshold
1010 0 : .or(global_conf.l0_flush_delay_threshold),
1011 0 : l0_flush_stall_threshold: self
1012 0 : .l0_flush_stall_threshold
1013 0 : .or(global_conf.l0_flush_stall_threshold),
1014 0 : gc_horizon: self.gc_horizon.unwrap_or(global_conf.gc_horizon),
1015 0 : gc_period: self.gc_period.unwrap_or(global_conf.gc_period),
1016 0 : image_creation_threshold: self
1017 0 : .image_creation_threshold
1018 0 : .unwrap_or(global_conf.image_creation_threshold),
1019 0 : pitr_interval: self.pitr_interval.unwrap_or(global_conf.pitr_interval),
1020 0 : walreceiver_connect_timeout: self
1021 0 : .walreceiver_connect_timeout
1022 0 : .unwrap_or(global_conf.walreceiver_connect_timeout),
1023 0 : lagging_wal_timeout: self
1024 0 : .lagging_wal_timeout
1025 0 : .unwrap_or(global_conf.lagging_wal_timeout),
1026 0 : max_lsn_wal_lag: self.max_lsn_wal_lag.unwrap_or(global_conf.max_lsn_wal_lag),
1027 0 : eviction_policy: self.eviction_policy.unwrap_or(global_conf.eviction_policy),
1028 0 : min_resident_size_override: self
1029 0 : .min_resident_size_override
1030 0 : .or(global_conf.min_resident_size_override),
1031 0 : evictions_low_residence_duration_metric_threshold: self
1032 0 : .evictions_low_residence_duration_metric_threshold
1033 0 : .unwrap_or(global_conf.evictions_low_residence_duration_metric_threshold),
1034 0 : heatmap_period: self.heatmap_period.unwrap_or(global_conf.heatmap_period),
1035 0 : lazy_slru_download: self
1036 0 : .lazy_slru_download
1037 0 : .unwrap_or(global_conf.lazy_slru_download),
1038 0 : timeline_get_throttle: self
1039 0 : .timeline_get_throttle
1040 0 : .clone()
1041 0 : .unwrap_or(global_conf.timeline_get_throttle),
1042 0 : image_layer_creation_check_threshold: self
1043 0 : .image_layer_creation_check_threshold
1044 0 : .unwrap_or(global_conf.image_layer_creation_check_threshold),
1045 0 : image_creation_preempt_threshold: self
1046 0 : .image_creation_preempt_threshold
1047 0 : .unwrap_or(global_conf.image_creation_preempt_threshold),
1048 0 : lsn_lease_length: self
1049 0 : .lsn_lease_length
1050 0 : .unwrap_or(global_conf.lsn_lease_length),
1051 0 : lsn_lease_length_for_ts: self
1052 0 : .lsn_lease_length_for_ts
1053 0 : .unwrap_or(global_conf.lsn_lease_length_for_ts),
1054 0 : timeline_offloading: self
1055 0 : .timeline_offloading
1056 0 : .unwrap_or(global_conf.timeline_offloading),
1057 0 : rel_size_v2_enabled: self
1058 0 : .rel_size_v2_enabled
1059 0 : .unwrap_or(global_conf.rel_size_v2_enabled),
1060 0 : gc_compaction_enabled: self
1061 0 : .gc_compaction_enabled
1062 0 : .unwrap_or(global_conf.gc_compaction_enabled),
1063 0 : gc_compaction_verification: self
1064 0 : .gc_compaction_verification
1065 0 : .unwrap_or(global_conf.gc_compaction_verification),
1066 0 : gc_compaction_initial_threshold_kb: self
1067 0 : .gc_compaction_initial_threshold_kb
1068 0 : .unwrap_or(global_conf.gc_compaction_initial_threshold_kb),
1069 0 : gc_compaction_ratio_percent: self
1070 0 : .gc_compaction_ratio_percent
1071 0 : .unwrap_or(global_conf.gc_compaction_ratio_percent),
1072 0 : sampling_ratio: self.sampling_ratio.unwrap_or(global_conf.sampling_ratio),
1073 0 : relsize_snapshot_cache_capacity: self
1074 0 : .relsize_snapshot_cache_capacity
1075 0 : .unwrap_or(global_conf.relsize_snapshot_cache_capacity),
1076 0 : basebackup_cache_enabled: self
1077 0 : .basebackup_cache_enabled
1078 0 : .unwrap_or(global_conf.basebackup_cache_enabled),
1079 0 : }
1080 0 : }
1081 : }
1082 :
1083 : /// The policy for the aux file storage.
1084 : ///
1085 : /// It can be switched through `switch_aux_file_policy` tenant config.
1086 : /// When the first aux file written, the policy will be persisted in the
1087 : /// `index_part.json` file and has a limited migration path.
1088 : ///
1089 : /// Currently, we only allow the following migration path:
1090 : ///
1091 : /// Unset -> V1
1092 : /// -> V2
1093 : /// -> CrossValidation -> V2
1094 : #[derive(
1095 : Eq,
1096 : PartialEq,
1097 : Debug,
1098 : Copy,
1099 : Clone,
1100 : strum_macros::EnumString,
1101 : strum_macros::Display,
1102 0 : serde_with::DeserializeFromStr,
1103 : serde_with::SerializeDisplay,
1104 : )]
1105 : #[strum(serialize_all = "kebab-case")]
1106 : pub enum AuxFilePolicy {
1107 : /// V1 aux file policy: store everything in AUX_FILE_KEY
1108 : #[strum(ascii_case_insensitive)]
1109 : V1,
1110 : /// V2 aux file policy: store in the AUX_FILE keyspace
1111 : #[strum(ascii_case_insensitive)]
1112 : V2,
1113 : /// Cross validation runs both formats on the write path and does validation
1114 : /// on the read path.
1115 : #[strum(ascii_case_insensitive)]
1116 : CrossValidation,
1117 : }
1118 :
1119 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
1120 : #[serde(tag = "kind")]
1121 : pub enum EvictionPolicy {
1122 : NoEviction,
1123 : LayerAccessThreshold(EvictionPolicyLayerAccessThreshold),
1124 : OnlyImitiate(EvictionPolicyLayerAccessThreshold),
1125 : }
1126 :
1127 : impl EvictionPolicy {
1128 0 : pub fn discriminant_str(&self) -> &'static str {
1129 0 : match self {
1130 0 : EvictionPolicy::NoEviction => "NoEviction",
1131 0 : EvictionPolicy::LayerAccessThreshold(_) => "LayerAccessThreshold",
1132 0 : EvictionPolicy::OnlyImitiate(_) => "OnlyImitiate",
1133 : }
1134 0 : }
1135 : }
1136 :
1137 : #[derive(
1138 : Eq,
1139 : PartialEq,
1140 : Debug,
1141 : Copy,
1142 : Clone,
1143 : strum_macros::EnumString,
1144 : strum_macros::Display,
1145 0 : serde_with::DeserializeFromStr,
1146 : serde_with::SerializeDisplay,
1147 : )]
1148 : #[strum(serialize_all = "kebab-case")]
1149 : pub enum CompactionAlgorithm {
1150 : Legacy,
1151 : Tiered,
1152 : }
1153 :
1154 : #[derive(
1155 0 : Debug, Clone, Copy, PartialEq, Eq, serde_with::DeserializeFromStr, serde_with::SerializeDisplay,
1156 : )]
1157 : pub enum ImageCompressionAlgorithm {
1158 : // Disabled for writes, support decompressing during read path
1159 : Disabled,
1160 : /// Zstandard compression. Level 0 means and None mean the same (default level). Levels can be negative as well.
1161 : /// For details, see the [manual](http://facebook.github.io/zstd/zstd_manual.html).
1162 : Zstd {
1163 : level: Option<i8>,
1164 : },
1165 : }
1166 :
1167 : impl FromStr for ImageCompressionAlgorithm {
1168 : type Err = anyhow::Error;
1169 8 : fn from_str(s: &str) -> Result<Self, Self::Err> {
1170 8 : let mut components = s.split(['(', ')']);
1171 8 : let first = components
1172 8 : .next()
1173 8 : .ok_or_else(|| anyhow::anyhow!("empty string"))?;
1174 8 : match first {
1175 8 : "disabled" => Ok(ImageCompressionAlgorithm::Disabled),
1176 6 : "zstd" => {
1177 6 : let level = if let Some(v) = components.next() {
1178 4 : let v: i8 = v.parse()?;
1179 4 : Some(v)
1180 : } else {
1181 2 : None
1182 : };
1183 :
1184 6 : Ok(ImageCompressionAlgorithm::Zstd { level })
1185 : }
1186 0 : _ => anyhow::bail!("invalid specifier '{first}'"),
1187 : }
1188 8 : }
1189 : }
1190 :
1191 : impl Display for ImageCompressionAlgorithm {
1192 12 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1193 12 : match self {
1194 3 : ImageCompressionAlgorithm::Disabled => write!(f, "disabled"),
1195 9 : ImageCompressionAlgorithm::Zstd { level } => {
1196 9 : if let Some(level) = level {
1197 6 : write!(f, "zstd({level})")
1198 : } else {
1199 3 : write!(f, "zstd")
1200 : }
1201 : }
1202 : }
1203 12 : }
1204 : }
1205 :
1206 0 : #[derive(Eq, PartialEq, Debug, Clone, Serialize, Deserialize)]
1207 : pub struct CompactionAlgorithmSettings {
1208 : pub kind: CompactionAlgorithm,
1209 : }
1210 :
1211 0 : #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]
1212 : #[serde(tag = "mode", rename_all = "kebab-case")]
1213 : pub enum L0FlushConfig {
1214 : #[serde(rename_all = "snake_case")]
1215 : Direct { max_concurrency: NonZeroUsize },
1216 : }
1217 :
1218 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
1219 : pub struct EvictionPolicyLayerAccessThreshold {
1220 : #[serde(with = "humantime_serde")]
1221 : pub period: Duration,
1222 : #[serde(with = "humantime_serde")]
1223 : pub threshold: Duration,
1224 : }
1225 :
1226 : #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
1227 : pub struct ThrottleConfig {
1228 : /// See [`ThrottleConfigTaskKinds`] for why we do the serde `rename`.
1229 : #[serde(rename = "task_kinds")]
1230 : pub enabled: ThrottleConfigTaskKinds,
1231 : pub initial: u32,
1232 : #[serde(with = "humantime_serde")]
1233 : pub refill_interval: Duration,
1234 : pub refill_amount: NonZeroU32,
1235 : pub max: u32,
1236 : }
1237 :
1238 : /// Before <https://github.com/neondatabase/neon/pull/9962>
1239 : /// the throttle was a per `Timeline::get`/`Timeline::get_vectored` call.
1240 : /// The `task_kinds` field controlled which Pageserver "Task Kind"s
1241 : /// were subject to the throttle.
1242 : ///
1243 : /// After that PR, the throttle is applied at pagestream request level
1244 : /// and the `task_kinds` field does not apply since the only task kind
1245 : /// that us subject to the throttle is that of the page service.
1246 : ///
1247 : /// However, we don't want to make a breaking config change right now
1248 : /// because it means we have to migrate all the tenant configs.
1249 : /// This will be done in a future PR.
1250 : ///
1251 : /// In the meantime, we use emptiness / non-emptsiness of the `task_kinds`
1252 : /// field to determine if the throttle is enabled or not.
1253 : #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
1254 : #[serde(transparent)]
1255 : pub struct ThrottleConfigTaskKinds(Vec<String>);
1256 :
1257 : impl ThrottleConfigTaskKinds {
1258 136 : pub fn disabled() -> Self {
1259 136 : Self(vec![])
1260 136 : }
1261 120 : pub fn is_enabled(&self) -> bool {
1262 120 : !self.0.is_empty()
1263 120 : }
1264 : }
1265 :
1266 : impl ThrottleConfig {
1267 136 : pub fn disabled() -> Self {
1268 136 : Self {
1269 136 : enabled: ThrottleConfigTaskKinds::disabled(),
1270 136 : // other values don't matter with emtpy `task_kinds`.
1271 136 : initial: 0,
1272 136 : refill_interval: Duration::from_millis(1),
1273 136 : refill_amount: NonZeroU32::new(1).unwrap(),
1274 136 : max: 1,
1275 136 : }
1276 136 : }
1277 : /// The requests per second allowed by the given config.
1278 0 : pub fn steady_rps(&self) -> f64 {
1279 0 : (self.refill_amount.get() as f64) / (self.refill_interval.as_secs_f64())
1280 0 : }
1281 : }
1282 :
1283 : #[cfg(test)]
1284 : mod throttle_config_tests {
1285 : use super::*;
1286 :
1287 : #[test]
1288 1 : fn test_disabled_is_disabled() {
1289 1 : let config = ThrottleConfig::disabled();
1290 1 : assert!(!config.enabled.is_enabled());
1291 1 : }
1292 : #[test]
1293 1 : fn test_enabled_backwards_compat() {
1294 1 : let input = serde_json::json!({
1295 1 : "task_kinds": ["PageRequestHandler"],
1296 1 : "initial": 40000,
1297 1 : "refill_interval": "50ms",
1298 1 : "refill_amount": 1000,
1299 1 : "max": 40000,
1300 1 : "fair": true
1301 : });
1302 1 : let config: ThrottleConfig = serde_json::from_value(input).unwrap();
1303 1 : assert!(config.enabled.is_enabled());
1304 1 : }
1305 : }
1306 :
1307 : /// A flattened analog of a `pagesever::tenant::LocationMode`, which
1308 : /// lists out all possible states (and the virtual "Detached" state)
1309 : /// in a flat form rather than using rust-style enums.
1310 0 : #[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
1311 : pub enum LocationConfigMode {
1312 : AttachedSingle,
1313 : AttachedMulti,
1314 : AttachedStale,
1315 : Secondary,
1316 : Detached,
1317 : }
1318 :
1319 0 : #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
1320 : pub struct LocationConfigSecondary {
1321 : pub warm: bool,
1322 : }
1323 :
1324 : /// An alternative representation of `pageserver::tenant::LocationConf`,
1325 : /// for use in external-facing APIs.
1326 0 : #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
1327 : pub struct LocationConfig {
1328 : pub mode: LocationConfigMode,
1329 : /// If attaching, in what generation?
1330 : #[serde(default)]
1331 : pub generation: Option<u32>,
1332 :
1333 : // If requesting mode `Secondary`, configuration for that.
1334 : #[serde(default)]
1335 : pub secondary_conf: Option<LocationConfigSecondary>,
1336 :
1337 : // Shard parameters: if shard_count is nonzero, then other shard_* fields
1338 : // must be set accurately.
1339 : #[serde(default)]
1340 : pub shard_number: u8,
1341 : #[serde(default)]
1342 : pub shard_count: u8,
1343 : #[serde(default)]
1344 : pub shard_stripe_size: u32,
1345 :
1346 : // This configuration only affects attached mode, but should be provided irrespective
1347 : // of the mode, as a secondary location might transition on startup if the response
1348 : // to the `/re-attach` control plane API requests it.
1349 : pub tenant_conf: TenantConfig,
1350 : }
1351 :
1352 0 : #[derive(Serialize, Deserialize)]
1353 : pub struct LocationConfigListResponse {
1354 : pub tenant_shards: Vec<(TenantShardId, Option<LocationConfig>)>,
1355 : }
1356 :
1357 : #[derive(Serialize)]
1358 : pub struct StatusResponse {
1359 : pub id: NodeId,
1360 : }
1361 :
1362 0 : #[derive(Serialize, Deserialize, Debug)]
1363 : #[serde(deny_unknown_fields)]
1364 : pub struct TenantLocationConfigRequest {
1365 : #[serde(flatten)]
1366 : pub config: LocationConfig, // as we have a flattened field, we should reject all unknown fields in it
1367 : }
1368 :
1369 0 : #[derive(Serialize, Deserialize, Debug)]
1370 : #[serde(deny_unknown_fields)]
1371 : pub struct TenantTimeTravelRequest {
1372 : pub shard_counts: Vec<ShardCount>,
1373 : }
1374 :
1375 0 : #[derive(Serialize, Deserialize, Debug)]
1376 : #[serde(deny_unknown_fields)]
1377 : pub struct TenantShardLocation {
1378 : pub shard_id: TenantShardId,
1379 : pub node_id: NodeId,
1380 : }
1381 :
1382 0 : #[derive(Serialize, Deserialize, Debug)]
1383 : #[serde(deny_unknown_fields)]
1384 : pub struct TenantLocationConfigResponse {
1385 : pub shards: Vec<TenantShardLocation>,
1386 : // If the shards' ShardCount count is >1, stripe_size will be set.
1387 : pub stripe_size: Option<ShardStripeSize>,
1388 : }
1389 :
1390 0 : #[derive(Serialize, Deserialize, Debug)]
1391 : #[serde(deny_unknown_fields)]
1392 : pub struct TenantConfigRequest {
1393 : pub tenant_id: TenantId,
1394 : #[serde(flatten)]
1395 : pub config: TenantConfig, // as we have a flattened field, we should reject all unknown fields in it
1396 : }
1397 :
1398 : impl std::ops::Deref for TenantConfigRequest {
1399 : type Target = TenantConfig;
1400 :
1401 0 : fn deref(&self) -> &Self::Target {
1402 0 : &self.config
1403 0 : }
1404 : }
1405 :
1406 : impl TenantConfigRequest {
1407 0 : pub fn new(tenant_id: TenantId) -> TenantConfigRequest {
1408 0 : let config = TenantConfig::default();
1409 0 : TenantConfigRequest { tenant_id, config }
1410 0 : }
1411 : }
1412 :
1413 0 : #[derive(Serialize, Deserialize, Debug)]
1414 : #[serde(deny_unknown_fields)]
1415 : pub struct TenantConfigPatchRequest {
1416 : pub tenant_id: TenantId,
1417 : #[serde(flatten)]
1418 : pub config: TenantConfigPatch, // as we have a flattened field, we should reject all unknown fields in it
1419 : }
1420 :
1421 0 : #[derive(Serialize, Deserialize, Debug)]
1422 : pub struct TenantWaitLsnRequest {
1423 : #[serde(flatten)]
1424 : pub timelines: HashMap<TimelineId, Lsn>,
1425 : pub timeout: Duration,
1426 : }
1427 :
1428 : /// See [`TenantState::attachment_status`] and the OpenAPI docs for context.
1429 0 : #[derive(Serialize, Deserialize, Clone)]
1430 : #[serde(tag = "slug", content = "data", rename_all = "snake_case")]
1431 : pub enum TenantAttachmentStatus {
1432 : Maybe,
1433 : Attached,
1434 : Failed { reason: String },
1435 : }
1436 :
1437 0 : #[derive(Serialize, Deserialize, Clone)]
1438 : pub struct TenantInfo {
1439 : pub id: TenantShardId,
1440 : // NB: intentionally not part of OpenAPI, we don't want to commit to a specific set of TenantState's
1441 : pub state: TenantState,
1442 : /// Sum of the size of all layer files.
1443 : /// If a layer is present in both local FS and S3, it counts only once.
1444 : pub current_physical_size: Option<u64>, // physical size is only included in `tenant_status` endpoint
1445 : pub attachment_status: TenantAttachmentStatus,
1446 : pub generation: u32,
1447 :
1448 : /// Opaque explanation if gc is being blocked.
1449 : ///
1450 : /// Only looked up for the individual tenant detail, not the listing.
1451 : #[serde(skip_serializing_if = "Option::is_none")]
1452 : pub gc_blocking: Option<String>,
1453 : }
1454 :
1455 0 : #[derive(Serialize, Deserialize, Clone)]
1456 : pub struct TenantDetails {
1457 : #[serde(flatten)]
1458 : pub tenant_info: TenantInfo,
1459 :
1460 : pub walredo: Option<WalRedoManagerStatus>,
1461 :
1462 : pub timelines: Vec<TimelineId>,
1463 : }
1464 :
1465 0 : #[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Copy, Debug)]
1466 : pub enum TimelineArchivalState {
1467 : Archived,
1468 : Unarchived,
1469 : }
1470 :
1471 0 : #[derive(Serialize, Deserialize, PartialEq, Eq, Clone)]
1472 : pub enum TimelineVisibilityState {
1473 : Visible,
1474 : Invisible,
1475 : }
1476 :
1477 0 : #[derive(Serialize, Deserialize, PartialEq, Eq, Clone)]
1478 : pub struct TimelineArchivalConfigRequest {
1479 : pub state: TimelineArchivalState,
1480 : }
1481 :
1482 0 : #[derive(Serialize, Deserialize, PartialEq, Eq, Clone)]
1483 : pub struct TimelinePatchIndexPartRequest {
1484 : pub rel_size_migration: Option<RelSizeMigration>,
1485 : pub gc_compaction_last_completed_lsn: Option<Lsn>,
1486 : pub applied_gc_cutoff_lsn: Option<Lsn>,
1487 : #[serde(default)]
1488 : pub force_index_update: bool,
1489 : }
1490 :
1491 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1492 : pub struct TimelinesInfoAndOffloaded {
1493 : pub timelines: Vec<TimelineInfo>,
1494 : pub offloaded: Vec<OffloadedTimelineInfo>,
1495 : }
1496 :
1497 : /// Analog of [`TimelineInfo`] for offloaded timelines.
1498 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1499 : pub struct OffloadedTimelineInfo {
1500 : pub tenant_id: TenantShardId,
1501 : pub timeline_id: TimelineId,
1502 : /// Whether the timeline has a parent it has been branched off from or not
1503 : pub ancestor_timeline_id: Option<TimelineId>,
1504 : /// Whether to retain the branch lsn at the ancestor or not
1505 : pub ancestor_retain_lsn: Option<Lsn>,
1506 : /// The time point when the timeline was archived
1507 : pub archived_at: chrono::DateTime<chrono::Utc>,
1508 : }
1509 :
1510 0 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
1511 : #[serde(rename_all = "camelCase")]
1512 : pub enum RelSizeMigration {
1513 : /// The tenant is using the old rel_size format.
1514 : /// Note that this enum is persisted as `Option<RelSizeMigration>` in the index part, so
1515 : /// `None` is the same as `Some(RelSizeMigration::Legacy)`.
1516 : Legacy,
1517 : /// The tenant is migrating to the new rel_size format. Both old and new rel_size format are
1518 : /// persisted in the index part. The read path will read both formats and merge them.
1519 : Migrating,
1520 : /// The tenant has migrated to the new rel_size format. Only the new rel_size format is persisted
1521 : /// in the index part, and the read path will not read the old format.
1522 : Migrated,
1523 : }
1524 :
1525 : /// This represents the output of the "timeline_detail" and "timeline_list" API calls.
1526 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1527 : pub struct TimelineInfo {
1528 : pub tenant_id: TenantShardId,
1529 : pub timeline_id: TimelineId,
1530 :
1531 : pub ancestor_timeline_id: Option<TimelineId>,
1532 : pub ancestor_lsn: Option<Lsn>,
1533 : pub last_record_lsn: Lsn,
1534 : pub prev_record_lsn: Option<Lsn>,
1535 :
1536 : /// The LSN up to which GC has advanced: older data may still exist but it is not available for clients.
1537 : /// This LSN is not suitable for deciding where to create branches etc: use [`TimelineInfo::min_readable_lsn`] instead,
1538 : /// as it is easier to reason about.
1539 : #[serde(default)]
1540 : pub applied_gc_cutoff_lsn: Lsn,
1541 :
1542 : /// The upper bound of data which is either already GC'ed, or elegible to be GC'ed at any time based on PITR interval.
1543 : /// This LSN represents the "end of history" for this timeline, and callers should use it to figure out the oldest
1544 : /// LSN at which it is legal to create a branch or ephemeral endpoint.
1545 : ///
1546 : /// Note that holders of valid LSN leases may be able to create branches and read pages earlier
1547 : /// than this LSN, but new leases may not be taken out earlier than this LSN.
1548 : #[serde(default)]
1549 : pub min_readable_lsn: Lsn,
1550 :
1551 : pub disk_consistent_lsn: Lsn,
1552 :
1553 : /// The LSN that we have succesfully uploaded to remote storage
1554 : pub remote_consistent_lsn: Lsn,
1555 :
1556 : /// The LSN that we are advertizing to safekeepers
1557 : pub remote_consistent_lsn_visible: Lsn,
1558 :
1559 : /// The LSN from the start of the root timeline (never changes)
1560 : pub initdb_lsn: Lsn,
1561 :
1562 : pub current_logical_size: u64,
1563 : pub current_logical_size_is_accurate: bool,
1564 :
1565 : pub directory_entries_counts: Vec<u64>,
1566 :
1567 : /// Sum of the size of all layer files.
1568 : /// If a layer is present in both local FS and S3, it counts only once.
1569 : pub current_physical_size: Option<u64>, // is None when timeline is Unloaded
1570 : pub current_logical_size_non_incremental: Option<u64>,
1571 :
1572 : /// How many bytes of WAL are within this branch's pitr_interval. If the pitr_interval goes
1573 : /// beyond the branch's branch point, we only count up to the branch point.
1574 : pub pitr_history_size: u64,
1575 :
1576 : /// Whether this branch's branch point is within its ancestor's PITR interval (i.e. any
1577 : /// ancestor data used by this branch would have been retained anyway). If this is false, then
1578 : /// this branch may be imposing a cost on the ancestor by causing it to retain layers that it would
1579 : /// otherwise be able to GC.
1580 : pub within_ancestor_pitr: bool,
1581 :
1582 : pub timeline_dir_layer_file_size_sum: Option<u64>,
1583 :
1584 : pub wal_source_connstr: Option<String>,
1585 : pub last_received_msg_lsn: Option<Lsn>,
1586 : /// the timestamp (in microseconds) of the last received message
1587 : pub last_received_msg_ts: Option<u128>,
1588 : pub pg_version: PgMajorVersion,
1589 :
1590 : pub state: TimelineState,
1591 :
1592 : pub walreceiver_status: String,
1593 :
1594 : // ALWAYS add new fields at the end of the struct with `Option` to ensure forward/backward compatibility.
1595 : // Backward compatibility: you will get a JSON not containing the newly-added field.
1596 : // Forward compatibility: a previous version of the pageserver will receive a JSON. serde::Deserialize does
1597 : // not deny unknown fields by default so it's safe to set the field to some value, though it won't be
1598 : // read.
1599 : /// Whether the timeline is archived.
1600 : pub is_archived: Option<bool>,
1601 :
1602 : /// The status of the rel_size migration.
1603 : pub rel_size_migration: Option<RelSizeMigration>,
1604 :
1605 : /// Whether the timeline is invisible in synthetic size calculations.
1606 : pub is_invisible: Option<bool>,
1607 : }
1608 :
1609 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1610 : pub struct LayerMapInfo {
1611 : pub in_memory_layers: Vec<InMemoryLayerInfo>,
1612 : pub historic_layers: Vec<HistoricLayerInfo>,
1613 : }
1614 :
1615 : /// The residence status of a layer
1616 0 : #[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1617 : pub enum LayerResidenceStatus {
1618 : /// Residence status for a layer file that exists locally.
1619 : /// It may also exist on the remote, we don't care here.
1620 : Resident,
1621 : /// Residence status for a layer file that only exists on the remote.
1622 : Evicted,
1623 : }
1624 :
1625 : #[serde_as]
1626 : #[derive(Debug, Clone, Serialize, Deserialize)]
1627 : pub struct LayerAccessStats {
1628 : #[serde_as(as = "serde_with::TimestampMilliSeconds")]
1629 : pub access_time: SystemTime,
1630 :
1631 : #[serde_as(as = "serde_with::TimestampMilliSeconds")]
1632 : pub residence_time: SystemTime,
1633 :
1634 : pub visible: bool,
1635 : }
1636 :
1637 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1638 : #[serde(tag = "kind")]
1639 : pub enum InMemoryLayerInfo {
1640 : Open { lsn_start: Lsn },
1641 : Frozen { lsn_start: Lsn, lsn_end: Lsn },
1642 : }
1643 :
1644 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1645 : #[serde(tag = "kind")]
1646 : pub enum HistoricLayerInfo {
1647 : Delta {
1648 : layer_file_name: String,
1649 : layer_file_size: u64,
1650 :
1651 : lsn_start: Lsn,
1652 : lsn_end: Lsn,
1653 : remote: bool,
1654 : access_stats: LayerAccessStats,
1655 :
1656 : l0: bool,
1657 : },
1658 : Image {
1659 : layer_file_name: String,
1660 : layer_file_size: u64,
1661 :
1662 : lsn_start: Lsn,
1663 : remote: bool,
1664 : access_stats: LayerAccessStats,
1665 : },
1666 : }
1667 :
1668 : impl HistoricLayerInfo {
1669 0 : pub fn layer_file_name(&self) -> &str {
1670 0 : match self {
1671 : HistoricLayerInfo::Delta {
1672 0 : layer_file_name, ..
1673 0 : } => layer_file_name,
1674 : HistoricLayerInfo::Image {
1675 0 : layer_file_name, ..
1676 0 : } => layer_file_name,
1677 : }
1678 0 : }
1679 0 : pub fn is_remote(&self) -> bool {
1680 0 : match self {
1681 0 : HistoricLayerInfo::Delta { remote, .. } => *remote,
1682 0 : HistoricLayerInfo::Image { remote, .. } => *remote,
1683 : }
1684 0 : }
1685 0 : pub fn set_remote(&mut self, value: bool) {
1686 0 : let field = match self {
1687 0 : HistoricLayerInfo::Delta { remote, .. } => remote,
1688 0 : HistoricLayerInfo::Image { remote, .. } => remote,
1689 : };
1690 0 : *field = value;
1691 0 : }
1692 0 : pub fn layer_file_size(&self) -> u64 {
1693 0 : match self {
1694 : HistoricLayerInfo::Delta {
1695 0 : layer_file_size, ..
1696 0 : } => *layer_file_size,
1697 : HistoricLayerInfo::Image {
1698 0 : layer_file_size, ..
1699 0 : } => *layer_file_size,
1700 : }
1701 0 : }
1702 : }
1703 :
1704 0 : #[derive(Debug, Serialize, Deserialize)]
1705 : pub struct DownloadRemoteLayersTaskSpawnRequest {
1706 : pub max_concurrent_downloads: NonZeroUsize,
1707 : }
1708 :
1709 0 : #[derive(Debug, Serialize, Deserialize)]
1710 : pub struct IngestAuxFilesRequest {
1711 : pub aux_files: HashMap<String, String>,
1712 : }
1713 :
1714 0 : #[derive(Debug, Serialize, Deserialize)]
1715 : pub struct ListAuxFilesRequest {
1716 : pub lsn: Lsn,
1717 : }
1718 :
1719 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1720 : pub struct DownloadRemoteLayersTaskInfo {
1721 : pub task_id: String,
1722 : pub state: DownloadRemoteLayersTaskState,
1723 : pub total_layer_count: u64, // stable once `completed`
1724 : pub successful_download_count: u64, // stable once `completed`
1725 : pub failed_download_count: u64, // stable once `completed`
1726 : }
1727 :
1728 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1729 : pub enum DownloadRemoteLayersTaskState {
1730 : Running,
1731 : Completed,
1732 : ShutDown,
1733 : }
1734 :
1735 0 : #[derive(Debug, Serialize, Deserialize)]
1736 : pub struct TimelineGcRequest {
1737 : pub gc_horizon: Option<u64>,
1738 : }
1739 :
1740 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1741 : pub struct WalRedoManagerProcessStatus {
1742 : pub pid: u32,
1743 : }
1744 :
1745 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1746 : pub struct WalRedoManagerStatus {
1747 : pub last_redo_at: Option<chrono::DateTime<chrono::Utc>>,
1748 : pub process: Option<WalRedoManagerProcessStatus>,
1749 : }
1750 :
1751 : /// The progress of a secondary tenant.
1752 : ///
1753 : /// It is mostly useful when doing a long running download: e.g. initiating
1754 : /// a download job, timing out while waiting for it to run, and then inspecting this status to understand
1755 : /// what's happening.
1756 0 : #[derive(Default, Debug, Serialize, Deserialize, Clone)]
1757 : pub struct SecondaryProgress {
1758 : /// The remote storage LastModified time of the heatmap object we last downloaded.
1759 : pub heatmap_mtime: Option<serde_system_time::SystemTime>,
1760 :
1761 : /// The number of layers currently on-disk
1762 : pub layers_downloaded: usize,
1763 : /// The number of layers in the most recently seen heatmap
1764 : pub layers_total: usize,
1765 :
1766 : /// The number of layer bytes currently on-disk
1767 : pub bytes_downloaded: u64,
1768 : /// The number of layer bytes in the most recently seen heatmap
1769 : pub bytes_total: u64,
1770 : }
1771 :
1772 0 : #[derive(Serialize, Deserialize, Debug)]
1773 : pub struct TenantScanRemoteStorageShard {
1774 : pub tenant_shard_id: TenantShardId,
1775 : pub generation: Option<u32>,
1776 : pub stripe_size: Option<ShardStripeSize>,
1777 : }
1778 :
1779 0 : #[derive(Serialize, Deserialize, Debug, Default)]
1780 : pub struct TenantScanRemoteStorageResponse {
1781 : pub shards: Vec<TenantScanRemoteStorageShard>,
1782 : }
1783 :
1784 0 : #[derive(Serialize, Deserialize, Debug, Clone)]
1785 : #[serde(rename_all = "snake_case")]
1786 : pub enum TenantSorting {
1787 : /// Total size of layers on local disk for all timelines in a shard.
1788 : ResidentSize,
1789 : /// The logical size of the largest timeline within a _tenant_ (not shard). Only tracked on
1790 : /// shard 0, contains the sum across all shards.
1791 : MaxLogicalSize,
1792 : /// The logical size of the largest timeline within a _tenant_ (not shard), divided by number of
1793 : /// shards. Only tracked on shard 0, and estimates the per-shard logical size.
1794 : MaxLogicalSizePerShard,
1795 : }
1796 :
1797 : impl Default for TenantSorting {
1798 0 : fn default() -> Self {
1799 0 : Self::ResidentSize
1800 0 : }
1801 : }
1802 :
1803 0 : #[derive(Serialize, Deserialize, Debug, Clone)]
1804 : pub struct TopTenantShardsRequest {
1805 : // How would you like to sort the tenants?
1806 : pub order_by: TenantSorting,
1807 :
1808 : // How many results?
1809 : pub limit: usize,
1810 :
1811 : // Omit tenants with more than this many shards (e.g. if this is the max number of shards
1812 : // that the caller would ever split to)
1813 : pub where_shards_lt: Option<ShardCount>,
1814 :
1815 : // Omit tenants where the ordering metric is less than this (this is an optimization to
1816 : // let us quickly exclude numerous tiny shards)
1817 : pub where_gt: Option<u64>,
1818 : }
1819 :
1820 0 : #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
1821 : pub struct TopTenantShardItem {
1822 : pub id: TenantShardId,
1823 :
1824 : /// Total size of layers on local disk for all timelines in this shard.
1825 : pub resident_size: u64,
1826 :
1827 : /// Total size of layers in remote storage for all timelines in this shard.
1828 : pub physical_size: u64,
1829 :
1830 : /// The largest logical size of a timeline within this _tenant_ (not shard). This is only
1831 : /// tracked on shard 0, and contains the sum of the logical size across all shards.
1832 : pub max_logical_size: u64,
1833 :
1834 : /// The largest logical size of a timeline within this _tenant_ (not shard) divided by number of
1835 : /// shards. This is only tracked on shard 0, and is only an estimate as we divide it evenly by
1836 : /// shard count, rounded up.
1837 : pub max_logical_size_per_shard: u64,
1838 : }
1839 :
1840 0 : #[derive(Serialize, Deserialize, Debug, Default)]
1841 : pub struct TopTenantShardsResponse {
1842 : pub shards: Vec<TopTenantShardItem>,
1843 : }
1844 :
1845 : pub mod virtual_file {
1846 :
1847 : #[derive(
1848 : Copy,
1849 : Clone,
1850 : PartialEq,
1851 : Eq,
1852 : Hash,
1853 : strum_macros::EnumString,
1854 : strum_macros::Display,
1855 0 : serde_with::DeserializeFromStr,
1856 : serde_with::SerializeDisplay,
1857 : Debug,
1858 : )]
1859 : #[strum(serialize_all = "kebab-case")]
1860 : pub enum IoEngineKind {
1861 : StdFs,
1862 : #[cfg(target_os = "linux")]
1863 : TokioEpollUring,
1864 : }
1865 :
1866 : /// Direct IO modes for a pageserver.
1867 : #[derive(
1868 : Copy,
1869 : Clone,
1870 : PartialEq,
1871 : Eq,
1872 : Hash,
1873 : strum_macros::EnumString,
1874 : strum_macros::EnumIter,
1875 : strum_macros::Display,
1876 0 : serde_with::DeserializeFromStr,
1877 : serde_with::SerializeDisplay,
1878 : Debug,
1879 : )]
1880 : #[strum(serialize_all = "kebab-case")]
1881 : #[repr(u8)]
1882 : pub enum IoMode {
1883 : /// Uses buffered IO.
1884 : Buffered,
1885 : /// Uses direct IO for reads only.
1886 : Direct,
1887 : /// Use direct IO for reads and writes.
1888 : DirectRw,
1889 : }
1890 :
1891 : impl IoMode {
1892 256 : pub fn preferred() -> Self {
1893 256 : IoMode::DirectRw
1894 256 : }
1895 : }
1896 :
1897 : impl TryFrom<u8> for IoMode {
1898 : type Error = u8;
1899 :
1900 2601 : fn try_from(value: u8) -> Result<Self, Self::Error> {
1901 2601 : Ok(match value {
1902 2601 : v if v == (IoMode::Buffered as u8) => IoMode::Buffered,
1903 2601 : v if v == (IoMode::Direct as u8) => IoMode::Direct,
1904 2601 : v if v == (IoMode::DirectRw as u8) => IoMode::DirectRw,
1905 0 : x => return Err(x),
1906 : })
1907 2601 : }
1908 : }
1909 : }
1910 :
1911 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1912 : pub struct ScanDisposableKeysResponse {
1913 : pub disposable_count: usize,
1914 : pub not_disposable_count: usize,
1915 : }
1916 :
1917 : // This is a cut-down version of TenantHistorySize from the pageserver crate, omitting fields
1918 : // that require pageserver-internal types. It is sufficient to get the total size.
1919 0 : #[derive(Serialize, Deserialize, Debug)]
1920 : pub struct TenantHistorySize {
1921 : pub id: TenantId,
1922 : /// Size is a mixture of WAL and logical size, so the unit is bytes.
1923 : ///
1924 : /// Will be none if `?inputs_only=true` was given.
1925 : pub size: Option<u64>,
1926 : }
1927 :
1928 0 : #[derive(Debug, Serialize, Deserialize)]
1929 : pub struct PageTraceEvent {
1930 : pub key: CompactKey,
1931 : pub effective_lsn: Lsn,
1932 : pub time: SystemTime,
1933 : }
1934 :
1935 : impl Default for PageTraceEvent {
1936 0 : fn default() -> Self {
1937 0 : Self {
1938 0 : key: Default::default(),
1939 0 : effective_lsn: Default::default(),
1940 0 : time: std::time::UNIX_EPOCH,
1941 0 : }
1942 0 : }
1943 : }
1944 :
1945 : #[cfg(test)]
1946 : mod tests {
1947 : use std::str::FromStr;
1948 :
1949 : use serde_json::json;
1950 :
1951 : use super::*;
1952 :
1953 : #[test]
1954 1 : fn test_tenantinfo_serde() {
1955 : // Test serialization/deserialization of TenantInfo
1956 1 : let original_active = TenantInfo {
1957 1 : id: TenantShardId::unsharded(TenantId::generate()),
1958 1 : state: TenantState::Active,
1959 1 : current_physical_size: Some(42),
1960 1 : attachment_status: TenantAttachmentStatus::Attached,
1961 1 : generation: 1,
1962 1 : gc_blocking: None,
1963 1 : };
1964 1 : let expected_active = json!({
1965 1 : "id": original_active.id.to_string(),
1966 1 : "state": {
1967 1 : "slug": "Active",
1968 : },
1969 1 : "current_physical_size": 42,
1970 1 : "attachment_status": {
1971 1 : "slug":"attached",
1972 : },
1973 1 : "generation" : 1
1974 : });
1975 :
1976 1 : let original_broken = TenantInfo {
1977 1 : id: TenantShardId::unsharded(TenantId::generate()),
1978 1 : state: TenantState::Broken {
1979 1 : reason: "reason".into(),
1980 1 : backtrace: "backtrace info".into(),
1981 1 : },
1982 1 : current_physical_size: Some(42),
1983 1 : attachment_status: TenantAttachmentStatus::Attached,
1984 1 : generation: 1,
1985 1 : gc_blocking: None,
1986 1 : };
1987 1 : let expected_broken = json!({
1988 1 : "id": original_broken.id.to_string(),
1989 1 : "state": {
1990 1 : "slug": "Broken",
1991 1 : "data": {
1992 1 : "backtrace": "backtrace info",
1993 1 : "reason": "reason",
1994 : }
1995 : },
1996 1 : "current_physical_size": 42,
1997 1 : "attachment_status": {
1998 1 : "slug":"attached",
1999 : },
2000 1 : "generation" : 1
2001 : });
2002 :
2003 1 : assert_eq!(
2004 1 : serde_json::to_value(&original_active).unwrap(),
2005 : expected_active
2006 : );
2007 :
2008 1 : assert_eq!(
2009 1 : serde_json::to_value(&original_broken).unwrap(),
2010 : expected_broken
2011 : );
2012 1 : assert!(format!("{:?}", &original_broken.state).contains("reason"));
2013 1 : assert!(format!("{:?}", &original_broken.state).contains("backtrace info"));
2014 1 : }
2015 :
2016 : #[test]
2017 1 : fn test_reject_unknown_field() {
2018 1 : let id = TenantId::generate();
2019 1 : let config_request = json!({
2020 1 : "tenant_id": id.to_string(),
2021 1 : "unknown_field": "unknown_value".to_string(),
2022 : });
2023 1 : let err = serde_json::from_value::<TenantConfigRequest>(config_request).unwrap_err();
2024 1 : assert!(
2025 1 : err.to_string().contains("unknown field `unknown_field`"),
2026 0 : "expect unknown field `unknown_field` error, got: {err}"
2027 : );
2028 1 : }
2029 :
2030 : #[test]
2031 1 : fn tenantstatus_activating_serde() {
2032 1 : let states = [TenantState::Activating(ActivatingFrom::Attaching)];
2033 1 : let expected = "[{\"slug\":\"Activating\",\"data\":\"Attaching\"}]";
2034 :
2035 1 : let actual = serde_json::to_string(&states).unwrap();
2036 :
2037 1 : assert_eq!(actual, expected);
2038 :
2039 1 : let parsed = serde_json::from_str::<Vec<TenantState>>(&actual).unwrap();
2040 :
2041 1 : assert_eq!(states.as_slice(), &parsed);
2042 1 : }
2043 :
2044 : #[test]
2045 1 : fn tenantstatus_activating_strum() {
2046 : // tests added, because we use these for metrics
2047 1 : let examples = [
2048 1 : (line!(), TenantState::Attaching, "Attaching"),
2049 1 : (
2050 1 : line!(),
2051 1 : TenantState::Activating(ActivatingFrom::Attaching),
2052 1 : "Activating",
2053 1 : ),
2054 1 : (line!(), TenantState::Active, "Active"),
2055 1 : (
2056 1 : line!(),
2057 1 : TenantState::Stopping { progress: None },
2058 1 : "Stopping",
2059 1 : ),
2060 1 : (
2061 1 : line!(),
2062 1 : TenantState::Stopping {
2063 1 : progress: Some(completion::Barrier::default()),
2064 1 : },
2065 1 : "Stopping",
2066 1 : ),
2067 1 : (
2068 1 : line!(),
2069 1 : TenantState::Broken {
2070 1 : reason: "Example".into(),
2071 1 : backtrace: "Looooong backtrace".into(),
2072 1 : },
2073 1 : "Broken",
2074 1 : ),
2075 1 : ];
2076 :
2077 7 : for (line, rendered, expected) in examples {
2078 6 : let actual: &'static str = rendered.into();
2079 6 : assert_eq!(actual, expected, "example on {line}");
2080 : }
2081 1 : }
2082 :
2083 : #[test]
2084 1 : fn test_image_compression_algorithm_parsing() {
2085 : use ImageCompressionAlgorithm::*;
2086 1 : let cases = [
2087 1 : ("disabled", Disabled),
2088 1 : ("zstd", Zstd { level: None }),
2089 1 : ("zstd(18)", Zstd { level: Some(18) }),
2090 1 : ("zstd(-3)", Zstd { level: Some(-3) }),
2091 1 : ];
2092 :
2093 5 : for (display, expected) in cases {
2094 4 : assert_eq!(
2095 4 : ImageCompressionAlgorithm::from_str(display).unwrap(),
2096 : expected,
2097 0 : "parsing works"
2098 : );
2099 4 : assert_eq!(format!("{expected}"), display, "Display FromStr roundtrip");
2100 :
2101 4 : let ser = serde_json::to_string(&expected).expect("serialization");
2102 4 : assert_eq!(
2103 4 : serde_json::from_str::<ImageCompressionAlgorithm>(&ser).unwrap(),
2104 : expected,
2105 0 : "serde roundtrip"
2106 : );
2107 :
2108 4 : assert_eq!(
2109 4 : serde_json::Value::String(display.to_string()),
2110 4 : serde_json::to_value(expected).unwrap(),
2111 0 : "Display is the serde serialization"
2112 : );
2113 : }
2114 1 : }
2115 :
2116 : #[test]
2117 1 : fn test_tenant_config_patch_request_serde() {
2118 1 : let patch_request = TenantConfigPatchRequest {
2119 1 : tenant_id: TenantId::from_str("17c6d121946a61e5ab0fe5a2fd4d8215").unwrap(),
2120 1 : config: TenantConfigPatch {
2121 1 : checkpoint_distance: FieldPatch::Upsert(42),
2122 1 : gc_horizon: FieldPatch::Remove,
2123 1 : compaction_threshold: FieldPatch::Noop,
2124 1 : ..TenantConfigPatch::default()
2125 1 : },
2126 1 : };
2127 :
2128 1 : let json = serde_json::to_string(&patch_request).unwrap();
2129 :
2130 1 : let expected = r#"{"tenant_id":"17c6d121946a61e5ab0fe5a2fd4d8215","checkpoint_distance":42,"gc_horizon":null}"#;
2131 1 : assert_eq!(json, expected);
2132 :
2133 1 : let decoded: TenantConfigPatchRequest = serde_json::from_str(&json).unwrap();
2134 1 : assert_eq!(decoded.tenant_id, patch_request.tenant_id);
2135 1 : assert_eq!(decoded.config, patch_request.config);
2136 :
2137 : // Now apply the patch to a config to demonstrate semantics
2138 :
2139 1 : let base = TenantConfig {
2140 1 : checkpoint_distance: Some(28),
2141 1 : gc_horizon: Some(100),
2142 1 : compaction_target_size: Some(1024),
2143 1 : ..Default::default()
2144 1 : };
2145 :
2146 1 : let expected = TenantConfig {
2147 1 : checkpoint_distance: Some(42),
2148 1 : gc_horizon: None,
2149 1 : ..base.clone()
2150 1 : };
2151 :
2152 1 : let patched = base.apply_patch(decoded.config).unwrap();
2153 :
2154 1 : assert_eq!(patched, expected);
2155 1 : }
2156 : }
|