Line data Source code
1 : pub mod detach_ancestor;
2 : pub mod partitioning;
3 : pub mod utilization;
4 :
5 : use core::ops::Range;
6 : use std::collections::HashMap;
7 : use std::fmt::Display;
8 : use std::io::{BufRead, Read};
9 : use std::num::{NonZeroU32, NonZeroU64, NonZeroUsize};
10 : use std::str::FromStr;
11 : use std::time::{Duration, SystemTime};
12 :
13 : use byteorder::{BigEndian, ReadBytesExt};
14 : use bytes::{Buf, BufMut, Bytes, BytesMut};
15 : #[cfg(feature = "testing")]
16 : use camino::Utf8PathBuf;
17 : use postgres_ffi::BLCKSZ;
18 : use serde::{Deserialize, Deserializer, Serialize, Serializer};
19 : use serde_with::serde_as;
20 : pub use utilization::PageserverUtilization;
21 : use utils::id::{NodeId, TenantId, TimelineId};
22 : use utils::lsn::Lsn;
23 : use utils::postgres_client::PostgresClientProtocol;
24 : use utils::{completion, serde_system_time};
25 :
26 : use crate::key::{CompactKey, Key};
27 : use crate::reltag::RelTag;
28 : use crate::shard::{ShardCount, ShardStripeSize, TenantShardId};
29 :
30 : /// The state of a tenant in this pageserver.
31 : ///
32 : /// ```mermaid
33 : /// stateDiagram-v2
34 : ///
35 : /// [*] --> Attaching: spawn_attach()
36 : ///
37 : /// Attaching --> Activating: activate()
38 : /// Activating --> Active: infallible
39 : ///
40 : /// Attaching --> Broken: attach() failure
41 : ///
42 : /// Active --> Stopping: set_stopping(), part of shutdown & detach
43 : /// Stopping --> Broken: late error in remove_tenant_from_memory
44 : ///
45 : /// Broken --> [*]: ignore / detach / shutdown
46 : /// Stopping --> [*]: remove_from_memory complete
47 : ///
48 : /// Active --> Broken: cfg(testing)-only tenant break point
49 : /// ```
50 : #[derive(
51 : Clone,
52 : PartialEq,
53 : Eq,
54 0 : serde::Serialize,
55 1 : serde::Deserialize,
56 : strum_macros::Display,
57 : strum_macros::VariantNames,
58 : strum_macros::AsRefStr,
59 : strum_macros::IntoStaticStr,
60 : )]
61 : #[serde(tag = "slug", content = "data")]
62 : pub enum TenantState {
63 : /// This tenant is being attached to the pageserver.
64 : ///
65 : /// `set_stopping()` and `set_broken()` do not work in this state and wait for it to pass.
66 : Attaching,
67 : /// The tenant is transitioning from Loading/Attaching to Active.
68 : ///
69 : /// While in this state, the individual timelines are being activated.
70 : ///
71 : /// `set_stopping()` and `set_broken()` do not work in this state and wait for it to pass.
72 : Activating(ActivatingFrom),
73 : /// The tenant has finished activating and is open for business.
74 : ///
75 : /// Transitions out of this state are possible through `set_stopping()` and `set_broken()`.
76 : Active,
77 : /// The tenant is recognized by pageserver, but it is being detached or the
78 : /// system is being shut down.
79 : ///
80 : /// Transitions out of this state are possible through `set_broken()`.
81 : Stopping {
82 : // Because of https://github.com/serde-rs/serde/issues/2105 this has to be a named field,
83 : // otherwise it will not be skipped during deserialization
84 : #[serde(skip)]
85 : progress: completion::Barrier,
86 : },
87 : /// The tenant is recognized by the pageserver, but can no longer be used for
88 : /// any operations.
89 : ///
90 : /// If the tenant fails to load or attach, it will transition to this state
91 : /// and it is guaranteed that no background tasks are running in its name.
92 : ///
93 : /// The other way to transition into this state is from `Stopping` state
94 : /// through `set_broken()` called from `remove_tenant_from_memory()`. That happens
95 : /// if the cleanup future executed by `remove_tenant_from_memory()` fails.
96 : Broken { reason: String, backtrace: String },
97 : }
98 :
99 : impl TenantState {
100 0 : pub fn attachment_status(&self) -> TenantAttachmentStatus {
101 : use TenantAttachmentStatus::*;
102 :
103 : // Below TenantState::Activating is used as "transient" or "transparent" state for
104 : // attachment_status determining.
105 0 : match self {
106 : // The attach procedure writes the marker file before adding the Attaching tenant to the tenants map.
107 : // So, technically, we can return Attached here.
108 : // However, as soon as Console observes Attached, it will proceed with the Postgres-level health check.
109 : // But, our attach task might still be fetching the remote timelines, etc.
110 : // So, return `Maybe` while Attaching, making Console wait for the attach task to finish.
111 0 : Self::Attaching | Self::Activating(ActivatingFrom::Attaching) => Maybe,
112 : // We only reach Active after successful load / attach.
113 : // So, call atttachment status Attached.
114 0 : Self::Active => Attached,
115 : // If the (initial or resumed) attach procedure fails, the tenant becomes Broken.
116 : // However, it also becomes Broken if the regular load fails.
117 : // From Console's perspective there's no practical difference
118 : // because attachment_status is polled by console only during attach operation execution.
119 0 : Self::Broken { reason, .. } => Failed {
120 0 : reason: reason.to_owned(),
121 0 : },
122 : // Why is Stopping a Maybe case? Because, during pageserver shutdown,
123 : // we set the Stopping state irrespective of whether the tenant
124 : // has finished attaching or not.
125 0 : Self::Stopping { .. } => Maybe,
126 : }
127 0 : }
128 :
129 0 : pub fn broken_from_reason(reason: String) -> Self {
130 0 : let backtrace_str: String = format!("{}", std::backtrace::Backtrace::force_capture());
131 0 : Self::Broken {
132 0 : reason,
133 0 : backtrace: backtrace_str,
134 0 : }
135 0 : }
136 : }
137 :
138 : impl std::fmt::Debug for TenantState {
139 2 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
140 2 : match self {
141 2 : Self::Broken { reason, backtrace } if !reason.is_empty() => {
142 2 : write!(f, "Broken due to: {reason}. Backtrace:\n{backtrace}")
143 : }
144 0 : _ => write!(f, "{self}"),
145 : }
146 2 : }
147 : }
148 :
149 : /// A temporary lease to a specific lsn inside a timeline.
150 : /// Access to the lsn is guaranteed by the pageserver until the expiration indicated by `valid_until`.
151 : #[serde_as]
152 0 : #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
153 : pub struct LsnLease {
154 : #[serde_as(as = "SystemTimeAsRfc3339Millis")]
155 : pub valid_until: SystemTime,
156 : }
157 :
158 : serde_with::serde_conv!(
159 : SystemTimeAsRfc3339Millis,
160 : SystemTime,
161 0 : |time: &SystemTime| humantime::format_rfc3339_millis(*time).to_string(),
162 0 : |value: String| -> Result<_, humantime::TimestampError> { humantime::parse_rfc3339(&value) }
163 : );
164 :
165 : impl LsnLease {
166 : /// The default length for an explicit LSN lease request (10 minutes).
167 : pub const DEFAULT_LENGTH: Duration = Duration::from_secs(10 * 60);
168 :
169 : /// The default length for an implicit LSN lease granted during
170 : /// `get_lsn_by_timestamp` request (1 minutes).
171 : pub const DEFAULT_LENGTH_FOR_TS: Duration = Duration::from_secs(60);
172 :
173 : /// Checks whether the lease is expired.
174 12 : pub fn is_expired(&self, now: &SystemTime) -> bool {
175 12 : now > &self.valid_until
176 12 : }
177 : }
178 :
179 : /// Controls the detach ancestor behavior.
180 : /// - When set to `NoAncestorAndReparent`, we will only detach a branch if its ancestor is a root branch. It will automatically reparent any children of the ancestor before and at the branch point.
181 : /// - When set to `MultiLevelAndNoReparent`, we will detach a branch from multiple levels of ancestors, and no reparenting will happen at all.
182 : #[derive(Debug, Clone, Copy, Default)]
183 : pub enum DetachBehavior {
184 : #[default]
185 : NoAncestorAndReparent,
186 : MultiLevelAndNoReparent,
187 : }
188 :
189 : impl std::str::FromStr for DetachBehavior {
190 : type Err = &'static str;
191 :
192 0 : fn from_str(s: &str) -> Result<Self, Self::Err> {
193 0 : match s {
194 0 : "no_ancestor_and_reparent" => Ok(DetachBehavior::NoAncestorAndReparent),
195 0 : "multi_level_and_no_reparent" => Ok(DetachBehavior::MultiLevelAndNoReparent),
196 0 : "v1" => Ok(DetachBehavior::NoAncestorAndReparent),
197 0 : "v2" => Ok(DetachBehavior::MultiLevelAndNoReparent),
198 0 : _ => Err("cannot parse detach behavior"),
199 : }
200 0 : }
201 : }
202 :
203 : impl std::fmt::Display for DetachBehavior {
204 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
205 0 : match self {
206 0 : DetachBehavior::NoAncestorAndReparent => write!(f, "no_ancestor_and_reparent"),
207 0 : DetachBehavior::MultiLevelAndNoReparent => write!(f, "multi_level_and_no_reparent"),
208 : }
209 0 : }
210 : }
211 :
212 : /// The only [`TenantState`] variants we could be `TenantState::Activating` from.
213 : ///
214 : /// XXX: We used to have more variants here, but now it's just one, which makes this rather
215 : /// useless. Remove, once we've checked that there's no client code left that looks at this.
216 1 : #[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
217 : pub enum ActivatingFrom {
218 : /// Arrived to [`TenantState::Activating`] from [`TenantState::Attaching`]
219 : Attaching,
220 : }
221 :
222 : /// A state of a timeline in pageserver's memory.
223 0 : #[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
224 : pub enum TimelineState {
225 : /// The timeline is recognized by the pageserver but is not yet operational.
226 : /// In particular, the walreceiver connection loop is not running for this timeline.
227 : /// It will eventually transition to state Active or Broken.
228 : Loading,
229 : /// The timeline is fully operational.
230 : /// It can be queried, and the walreceiver connection loop is running.
231 : Active,
232 : /// The timeline was previously Loading or Active but is shutting down.
233 : /// It cannot transition back into any other state.
234 : Stopping,
235 : /// The timeline is broken and not operational (previous states: Loading or Active).
236 : Broken { reason: String, backtrace: String },
237 : }
238 :
239 : #[serde_with::serde_as]
240 0 : #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
241 : pub struct CompactLsnRange {
242 : pub start: Lsn,
243 : pub end: Lsn,
244 : }
245 :
246 : #[serde_with::serde_as]
247 0 : #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
248 : pub struct CompactKeyRange {
249 : #[serde_as(as = "serde_with::DisplayFromStr")]
250 : pub start: Key,
251 : #[serde_as(as = "serde_with::DisplayFromStr")]
252 : pub end: Key,
253 : }
254 :
255 : impl From<Range<Lsn>> for CompactLsnRange {
256 12 : fn from(range: Range<Lsn>) -> Self {
257 12 : Self {
258 12 : start: range.start,
259 12 : end: range.end,
260 12 : }
261 12 : }
262 : }
263 :
264 : impl From<Range<Key>> for CompactKeyRange {
265 32 : fn from(range: Range<Key>) -> Self {
266 32 : Self {
267 32 : start: range.start,
268 32 : end: range.end,
269 32 : }
270 32 : }
271 : }
272 :
273 : impl From<CompactLsnRange> for Range<Lsn> {
274 20 : fn from(range: CompactLsnRange) -> Self {
275 20 : range.start..range.end
276 20 : }
277 : }
278 :
279 : impl From<CompactKeyRange> for Range<Key> {
280 32 : fn from(range: CompactKeyRange) -> Self {
281 32 : range.start..range.end
282 32 : }
283 : }
284 :
285 : impl CompactLsnRange {
286 8 : pub fn above(lsn: Lsn) -> Self {
287 8 : Self {
288 8 : start: lsn,
289 8 : end: Lsn::MAX,
290 8 : }
291 8 : }
292 : }
293 :
294 : #[derive(Debug, Clone, Serialize)]
295 : pub struct CompactInfoResponse {
296 : pub compact_key_range: Option<CompactKeyRange>,
297 : pub compact_lsn_range: Option<CompactLsnRange>,
298 : pub sub_compaction: bool,
299 : pub running: bool,
300 : pub job_id: usize,
301 : }
302 :
303 0 : #[derive(Serialize, Deserialize, Clone)]
304 : pub struct TimelineCreateRequest {
305 : pub new_timeline_id: TimelineId,
306 : #[serde(flatten)]
307 : pub mode: TimelineCreateRequestMode,
308 : }
309 :
310 : /// Storage controller specific extensions to [`TimelineInfo`].
311 0 : #[derive(Serialize, Deserialize, Clone)]
312 : pub struct TimelineCreateResponseStorcon {
313 : #[serde(flatten)]
314 : pub timeline_info: TimelineInfo,
315 :
316 : pub safekeepers: Option<SafekeepersInfo>,
317 : }
318 :
319 : /// Safekeepers as returned in timeline creation request to storcon or pushed to
320 : /// cplane in the post migration hook.
321 0 : #[derive(Serialize, Deserialize, Clone)]
322 : pub struct SafekeepersInfo {
323 : pub tenant_id: TenantId,
324 : pub timeline_id: TimelineId,
325 : pub generation: u32,
326 : pub safekeepers: Vec<SafekeeperInfo>,
327 : }
328 :
329 0 : #[derive(Serialize, Deserialize, Clone)]
330 : pub struct SafekeeperInfo {
331 : pub id: NodeId,
332 : pub hostname: String,
333 : }
334 :
335 0 : #[derive(Serialize, Deserialize, Clone)]
336 : #[serde(untagged)]
337 : pub enum TimelineCreateRequestMode {
338 : Branch {
339 : ancestor_timeline_id: TimelineId,
340 : #[serde(default)]
341 : ancestor_start_lsn: Option<Lsn>,
342 : // TODO: cplane sets this, but, the branching code always
343 : // inherits the ancestor's pg_version. Earlier code wasn't
344 : // using a flattened enum, so, it was an accepted field, and
345 : // we continue to accept it by having it here.
346 : pg_version: Option<u32>,
347 : },
348 : ImportPgdata {
349 : import_pgdata: TimelineCreateRequestModeImportPgdata,
350 : },
351 : // NB: Bootstrap is all-optional, and thus the serde(untagged) will cause serde to stop at Bootstrap.
352 : // (serde picks the first matching enum variant, in declaration order).
353 : Bootstrap {
354 : #[serde(default)]
355 : existing_initdb_timeline_id: Option<TimelineId>,
356 : pg_version: Option<u32>,
357 : },
358 : }
359 :
360 0 : #[derive(Serialize, Deserialize, Clone)]
361 : pub struct TimelineCreateRequestModeImportPgdata {
362 : pub location: ImportPgdataLocation,
363 : pub idempotency_key: ImportPgdataIdempotencyKey,
364 : }
365 :
366 0 : #[derive(Serialize, Deserialize, Clone, Debug)]
367 : pub enum ImportPgdataLocation {
368 : #[cfg(feature = "testing")]
369 : LocalFs { path: Utf8PathBuf },
370 : AwsS3 {
371 : region: String,
372 : bucket: String,
373 : /// A better name for this would be `prefix`; changing requires coordination with cplane.
374 : /// See <https://github.com/neondatabase/cloud/issues/20646>.
375 : key: String,
376 : },
377 : }
378 :
379 0 : #[derive(Serialize, Deserialize, Clone)]
380 : #[serde(transparent)]
381 : pub struct ImportPgdataIdempotencyKey(pub String);
382 :
383 : impl ImportPgdataIdempotencyKey {
384 0 : pub fn random() -> Self {
385 : use rand::Rng;
386 : use rand::distributions::Alphanumeric;
387 0 : Self(
388 0 : rand::thread_rng()
389 0 : .sample_iter(&Alphanumeric)
390 0 : .take(20)
391 0 : .map(char::from)
392 0 : .collect(),
393 0 : )
394 0 : }
395 : }
396 :
397 0 : #[derive(Serialize, Deserialize, Clone)]
398 : pub struct LsnLeaseRequest {
399 : pub lsn: Lsn,
400 : }
401 :
402 0 : #[derive(Serialize, Deserialize)]
403 : pub struct TenantShardSplitRequest {
404 : pub new_shard_count: u8,
405 :
406 : // A tenant's stripe size is only meaningful the first time their shard count goes
407 : // above 1: therefore during a split from 1->N shards, we may modify the stripe size.
408 : //
409 : // If this is set while the stripe count is being increased from an already >1 value,
410 : // then the request will fail with 400.
411 : pub new_stripe_size: Option<ShardStripeSize>,
412 : }
413 :
414 0 : #[derive(Serialize, Deserialize)]
415 : pub struct TenantShardSplitResponse {
416 : pub new_shards: Vec<TenantShardId>,
417 : }
418 :
419 : /// Parameters that apply to all shards in a tenant. Used during tenant creation.
420 0 : #[derive(Serialize, Deserialize, Debug)]
421 : #[serde(deny_unknown_fields)]
422 : pub struct ShardParameters {
423 : pub count: ShardCount,
424 : pub stripe_size: ShardStripeSize,
425 : }
426 :
427 : impl ShardParameters {
428 : pub const DEFAULT_STRIPE_SIZE: ShardStripeSize = ShardStripeSize(256 * 1024 / 8);
429 :
430 0 : pub fn is_unsharded(&self) -> bool {
431 0 : self.count.is_unsharded()
432 0 : }
433 : }
434 :
435 : impl Default for ShardParameters {
436 453 : fn default() -> Self {
437 453 : Self {
438 453 : count: ShardCount::new(0),
439 453 : stripe_size: Self::DEFAULT_STRIPE_SIZE,
440 453 : }
441 453 : }
442 : }
443 :
444 : #[derive(Debug, Default, Clone, Eq, PartialEq)]
445 : pub enum FieldPatch<T> {
446 : Upsert(T),
447 : Remove,
448 : #[default]
449 : Noop,
450 : }
451 :
452 : impl<T> FieldPatch<T> {
453 70 : fn is_noop(&self) -> bool {
454 70 : matches!(self, FieldPatch::Noop)
455 70 : }
456 :
457 35 : pub fn apply(self, target: &mut Option<T>) {
458 35 : match self {
459 1 : Self::Upsert(v) => *target = Some(v),
460 1 : Self::Remove => *target = None,
461 33 : Self::Noop => {}
462 : }
463 35 : }
464 :
465 10 : pub fn map<U, E, F: FnOnce(T) -> Result<U, E>>(self, map: F) -> Result<FieldPatch<U>, E> {
466 10 : match self {
467 0 : Self::Upsert(v) => Ok(FieldPatch::<U>::Upsert(map(v)?)),
468 0 : Self::Remove => Ok(FieldPatch::<U>::Remove),
469 10 : Self::Noop => Ok(FieldPatch::<U>::Noop),
470 : }
471 10 : }
472 : }
473 :
474 : impl<'de, T: Deserialize<'de>> Deserialize<'de> for FieldPatch<T> {
475 2 : fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
476 2 : where
477 2 : D: Deserializer<'de>,
478 2 : {
479 2 : Option::deserialize(deserializer).map(|opt| match opt {
480 1 : None => FieldPatch::Remove,
481 1 : Some(val) => FieldPatch::Upsert(val),
482 2 : })
483 2 : }
484 : }
485 :
486 : impl<T: Serialize> Serialize for FieldPatch<T> {
487 2 : fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
488 2 : where
489 2 : S: Serializer,
490 2 : {
491 2 : match self {
492 1 : FieldPatch::Upsert(val) => serializer.serialize_some(val),
493 1 : FieldPatch::Remove => serializer.serialize_none(),
494 0 : FieldPatch::Noop => unreachable!(),
495 : }
496 2 : }
497 : }
498 :
499 2 : #[derive(Serialize, Deserialize, Debug, Default, Clone, Eq, PartialEq)]
500 : #[serde(default)]
501 : pub struct TenantConfigPatch {
502 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
503 : pub checkpoint_distance: FieldPatch<u64>,
504 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
505 : pub checkpoint_timeout: FieldPatch<String>,
506 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
507 : pub compaction_target_size: FieldPatch<u64>,
508 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
509 : pub compaction_period: FieldPatch<String>,
510 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
511 : pub compaction_threshold: FieldPatch<usize>,
512 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
513 : pub compaction_upper_limit: FieldPatch<usize>,
514 : // defer parsing compaction_algorithm, like eviction_policy
515 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
516 : pub compaction_algorithm: FieldPatch<CompactionAlgorithmSettings>,
517 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
518 : pub compaction_l0_first: FieldPatch<bool>,
519 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
520 : pub compaction_l0_semaphore: FieldPatch<bool>,
521 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
522 : pub l0_flush_delay_threshold: FieldPatch<usize>,
523 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
524 : pub l0_flush_stall_threshold: FieldPatch<usize>,
525 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
526 : pub l0_flush_wait_upload: FieldPatch<bool>,
527 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
528 : pub gc_horizon: FieldPatch<u64>,
529 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
530 : pub gc_period: FieldPatch<String>,
531 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
532 : pub image_creation_threshold: FieldPatch<usize>,
533 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
534 : pub pitr_interval: FieldPatch<String>,
535 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
536 : pub walreceiver_connect_timeout: FieldPatch<String>,
537 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
538 : pub lagging_wal_timeout: FieldPatch<String>,
539 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
540 : pub max_lsn_wal_lag: FieldPatch<NonZeroU64>,
541 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
542 : pub eviction_policy: FieldPatch<EvictionPolicy>,
543 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
544 : pub min_resident_size_override: FieldPatch<u64>,
545 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
546 : pub evictions_low_residence_duration_metric_threshold: FieldPatch<String>,
547 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
548 : pub heatmap_period: FieldPatch<String>,
549 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
550 : pub lazy_slru_download: FieldPatch<bool>,
551 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
552 : pub timeline_get_throttle: FieldPatch<ThrottleConfig>,
553 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
554 : pub image_layer_creation_check_threshold: FieldPatch<u8>,
555 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
556 : pub image_creation_preempt_threshold: FieldPatch<usize>,
557 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
558 : pub lsn_lease_length: FieldPatch<String>,
559 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
560 : pub lsn_lease_length_for_ts: FieldPatch<String>,
561 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
562 : pub timeline_offloading: FieldPatch<bool>,
563 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
564 : pub wal_receiver_protocol_override: FieldPatch<PostgresClientProtocol>,
565 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
566 : pub rel_size_v2_enabled: FieldPatch<bool>,
567 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
568 : pub gc_compaction_enabled: FieldPatch<bool>,
569 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
570 : pub gc_compaction_initial_threshold_kb: FieldPatch<u64>,
571 : #[serde(skip_serializing_if = "FieldPatch::is_noop")]
572 : pub gc_compaction_ratio_percent: FieldPatch<u64>,
573 : }
574 :
575 : /// Like [`crate::config::TenantConfigToml`], but preserves the information
576 : /// about which parameters are set and which are not.
577 : ///
578 : /// Used in many places, including durably stored ones.
579 8 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
580 : #[serde(default)] // this maps omitted fields in deserialization to None
581 : pub struct TenantConfig {
582 : #[serde(skip_serializing_if = "Option::is_none")]
583 : pub checkpoint_distance: Option<u64>,
584 :
585 : #[serde(skip_serializing_if = "Option::is_none")]
586 : #[serde(with = "humantime_serde")]
587 : pub checkpoint_timeout: Option<Duration>,
588 :
589 : #[serde(skip_serializing_if = "Option::is_none")]
590 : pub compaction_target_size: Option<u64>,
591 :
592 : #[serde(skip_serializing_if = "Option::is_none")]
593 : #[serde(with = "humantime_serde")]
594 : pub compaction_period: Option<Duration>,
595 :
596 : #[serde(skip_serializing_if = "Option::is_none")]
597 : pub compaction_threshold: Option<usize>,
598 :
599 : #[serde(skip_serializing_if = "Option::is_none")]
600 : pub compaction_upper_limit: Option<usize>,
601 :
602 : #[serde(skip_serializing_if = "Option::is_none")]
603 : pub compaction_algorithm: Option<CompactionAlgorithmSettings>,
604 :
605 : #[serde(skip_serializing_if = "Option::is_none")]
606 : pub compaction_l0_first: Option<bool>,
607 :
608 : #[serde(skip_serializing_if = "Option::is_none")]
609 : pub compaction_l0_semaphore: Option<bool>,
610 :
611 : #[serde(skip_serializing_if = "Option::is_none")]
612 : pub l0_flush_delay_threshold: Option<usize>,
613 :
614 : #[serde(skip_serializing_if = "Option::is_none")]
615 : pub l0_flush_stall_threshold: Option<usize>,
616 :
617 : #[serde(skip_serializing_if = "Option::is_none")]
618 : pub l0_flush_wait_upload: Option<bool>,
619 :
620 : #[serde(skip_serializing_if = "Option::is_none")]
621 : pub gc_horizon: Option<u64>,
622 :
623 : #[serde(skip_serializing_if = "Option::is_none")]
624 : #[serde(with = "humantime_serde")]
625 : pub gc_period: Option<Duration>,
626 :
627 : #[serde(skip_serializing_if = "Option::is_none")]
628 : pub image_creation_threshold: Option<usize>,
629 :
630 : #[serde(skip_serializing_if = "Option::is_none")]
631 : #[serde(with = "humantime_serde")]
632 : pub pitr_interval: Option<Duration>,
633 :
634 : #[serde(skip_serializing_if = "Option::is_none")]
635 : #[serde(with = "humantime_serde")]
636 : pub walreceiver_connect_timeout: Option<Duration>,
637 :
638 : #[serde(skip_serializing_if = "Option::is_none")]
639 : #[serde(with = "humantime_serde")]
640 : pub lagging_wal_timeout: Option<Duration>,
641 :
642 : #[serde(skip_serializing_if = "Option::is_none")]
643 : pub max_lsn_wal_lag: Option<NonZeroU64>,
644 :
645 : #[serde(skip_serializing_if = "Option::is_none")]
646 : pub eviction_policy: Option<EvictionPolicy>,
647 :
648 : #[serde(skip_serializing_if = "Option::is_none")]
649 : pub min_resident_size_override: Option<u64>,
650 :
651 : #[serde(skip_serializing_if = "Option::is_none")]
652 : #[serde(with = "humantime_serde")]
653 : pub evictions_low_residence_duration_metric_threshold: Option<Duration>,
654 :
655 : #[serde(skip_serializing_if = "Option::is_none")]
656 : #[serde(with = "humantime_serde")]
657 : pub heatmap_period: Option<Duration>,
658 :
659 : #[serde(skip_serializing_if = "Option::is_none")]
660 : pub lazy_slru_download: Option<bool>,
661 :
662 : #[serde(skip_serializing_if = "Option::is_none")]
663 : pub timeline_get_throttle: Option<ThrottleConfig>,
664 :
665 : #[serde(skip_serializing_if = "Option::is_none")]
666 : pub image_layer_creation_check_threshold: Option<u8>,
667 :
668 : #[serde(skip_serializing_if = "Option::is_none")]
669 : pub image_creation_preempt_threshold: Option<usize>,
670 :
671 : #[serde(skip_serializing_if = "Option::is_none")]
672 : #[serde(with = "humantime_serde")]
673 : pub lsn_lease_length: Option<Duration>,
674 :
675 : #[serde(skip_serializing_if = "Option::is_none")]
676 : #[serde(with = "humantime_serde")]
677 : pub lsn_lease_length_for_ts: Option<Duration>,
678 :
679 : #[serde(skip_serializing_if = "Option::is_none")]
680 : pub timeline_offloading: Option<bool>,
681 :
682 : #[serde(skip_serializing_if = "Option::is_none")]
683 : pub wal_receiver_protocol_override: Option<PostgresClientProtocol>,
684 :
685 : #[serde(skip_serializing_if = "Option::is_none")]
686 : pub rel_size_v2_enabled: Option<bool>,
687 :
688 : #[serde(skip_serializing_if = "Option::is_none")]
689 : pub gc_compaction_enabled: Option<bool>,
690 :
691 : #[serde(skip_serializing_if = "Option::is_none")]
692 : pub gc_compaction_initial_threshold_kb: Option<u64>,
693 :
694 : #[serde(skip_serializing_if = "Option::is_none")]
695 : pub gc_compaction_ratio_percent: Option<u64>,
696 : }
697 :
698 : impl TenantConfig {
699 1 : pub fn apply_patch(
700 1 : self,
701 1 : patch: TenantConfigPatch,
702 1 : ) -> Result<TenantConfig, humantime::DurationError> {
703 1 : let Self {
704 1 : mut checkpoint_distance,
705 1 : mut checkpoint_timeout,
706 1 : mut compaction_target_size,
707 1 : mut compaction_period,
708 1 : mut compaction_threshold,
709 1 : mut compaction_upper_limit,
710 1 : mut compaction_algorithm,
711 1 : mut compaction_l0_first,
712 1 : mut compaction_l0_semaphore,
713 1 : mut l0_flush_delay_threshold,
714 1 : mut l0_flush_stall_threshold,
715 1 : mut l0_flush_wait_upload,
716 1 : mut gc_horizon,
717 1 : mut gc_period,
718 1 : mut image_creation_threshold,
719 1 : mut pitr_interval,
720 1 : mut walreceiver_connect_timeout,
721 1 : mut lagging_wal_timeout,
722 1 : mut max_lsn_wal_lag,
723 1 : mut eviction_policy,
724 1 : mut min_resident_size_override,
725 1 : mut evictions_low_residence_duration_metric_threshold,
726 1 : mut heatmap_period,
727 1 : mut lazy_slru_download,
728 1 : mut timeline_get_throttle,
729 1 : mut image_layer_creation_check_threshold,
730 1 : mut image_creation_preempt_threshold,
731 1 : mut lsn_lease_length,
732 1 : mut lsn_lease_length_for_ts,
733 1 : mut timeline_offloading,
734 1 : mut wal_receiver_protocol_override,
735 1 : mut rel_size_v2_enabled,
736 1 : mut gc_compaction_enabled,
737 1 : mut gc_compaction_initial_threshold_kb,
738 1 : mut gc_compaction_ratio_percent,
739 1 : } = self;
740 1 :
741 1 : patch.checkpoint_distance.apply(&mut checkpoint_distance);
742 1 : patch
743 1 : .checkpoint_timeout
744 1 : .map(|v| humantime::parse_duration(&v))?
745 1 : .apply(&mut checkpoint_timeout);
746 1 : patch
747 1 : .compaction_target_size
748 1 : .apply(&mut compaction_target_size);
749 1 : patch
750 1 : .compaction_period
751 1 : .map(|v| humantime::parse_duration(&v))?
752 1 : .apply(&mut compaction_period);
753 1 : patch.compaction_threshold.apply(&mut compaction_threshold);
754 1 : patch
755 1 : .compaction_upper_limit
756 1 : .apply(&mut compaction_upper_limit);
757 1 : patch.compaction_algorithm.apply(&mut compaction_algorithm);
758 1 : patch.compaction_l0_first.apply(&mut compaction_l0_first);
759 1 : patch
760 1 : .compaction_l0_semaphore
761 1 : .apply(&mut compaction_l0_semaphore);
762 1 : patch
763 1 : .l0_flush_delay_threshold
764 1 : .apply(&mut l0_flush_delay_threshold);
765 1 : patch
766 1 : .l0_flush_stall_threshold
767 1 : .apply(&mut l0_flush_stall_threshold);
768 1 : patch.l0_flush_wait_upload.apply(&mut l0_flush_wait_upload);
769 1 : patch.gc_horizon.apply(&mut gc_horizon);
770 1 : patch
771 1 : .gc_period
772 1 : .map(|v| humantime::parse_duration(&v))?
773 1 : .apply(&mut gc_period);
774 1 : patch
775 1 : .image_creation_threshold
776 1 : .apply(&mut image_creation_threshold);
777 1 : patch
778 1 : .pitr_interval
779 1 : .map(|v| humantime::parse_duration(&v))?
780 1 : .apply(&mut pitr_interval);
781 1 : patch
782 1 : .walreceiver_connect_timeout
783 1 : .map(|v| humantime::parse_duration(&v))?
784 1 : .apply(&mut walreceiver_connect_timeout);
785 1 : patch
786 1 : .lagging_wal_timeout
787 1 : .map(|v| humantime::parse_duration(&v))?
788 1 : .apply(&mut lagging_wal_timeout);
789 1 : patch.max_lsn_wal_lag.apply(&mut max_lsn_wal_lag);
790 1 : patch.eviction_policy.apply(&mut eviction_policy);
791 1 : patch
792 1 : .min_resident_size_override
793 1 : .apply(&mut min_resident_size_override);
794 1 : patch
795 1 : .evictions_low_residence_duration_metric_threshold
796 1 : .map(|v| humantime::parse_duration(&v))?
797 1 : .apply(&mut evictions_low_residence_duration_metric_threshold);
798 1 : patch
799 1 : .heatmap_period
800 1 : .map(|v| humantime::parse_duration(&v))?
801 1 : .apply(&mut heatmap_period);
802 1 : patch.lazy_slru_download.apply(&mut lazy_slru_download);
803 1 : patch
804 1 : .timeline_get_throttle
805 1 : .apply(&mut timeline_get_throttle);
806 1 : patch
807 1 : .image_layer_creation_check_threshold
808 1 : .apply(&mut image_layer_creation_check_threshold);
809 1 : patch
810 1 : .image_creation_preempt_threshold
811 1 : .apply(&mut image_creation_preempt_threshold);
812 1 : patch
813 1 : .lsn_lease_length
814 1 : .map(|v| humantime::parse_duration(&v))?
815 1 : .apply(&mut lsn_lease_length);
816 1 : patch
817 1 : .lsn_lease_length_for_ts
818 1 : .map(|v| humantime::parse_duration(&v))?
819 1 : .apply(&mut lsn_lease_length_for_ts);
820 1 : patch.timeline_offloading.apply(&mut timeline_offloading);
821 1 : patch
822 1 : .wal_receiver_protocol_override
823 1 : .apply(&mut wal_receiver_protocol_override);
824 1 : patch.rel_size_v2_enabled.apply(&mut rel_size_v2_enabled);
825 1 : patch
826 1 : .gc_compaction_enabled
827 1 : .apply(&mut gc_compaction_enabled);
828 1 : patch
829 1 : .gc_compaction_initial_threshold_kb
830 1 : .apply(&mut gc_compaction_initial_threshold_kb);
831 1 : patch
832 1 : .gc_compaction_ratio_percent
833 1 : .apply(&mut gc_compaction_ratio_percent);
834 1 :
835 1 : Ok(Self {
836 1 : checkpoint_distance,
837 1 : checkpoint_timeout,
838 1 : compaction_target_size,
839 1 : compaction_period,
840 1 : compaction_threshold,
841 1 : compaction_upper_limit,
842 1 : compaction_algorithm,
843 1 : compaction_l0_first,
844 1 : compaction_l0_semaphore,
845 1 : l0_flush_delay_threshold,
846 1 : l0_flush_stall_threshold,
847 1 : l0_flush_wait_upload,
848 1 : gc_horizon,
849 1 : gc_period,
850 1 : image_creation_threshold,
851 1 : pitr_interval,
852 1 : walreceiver_connect_timeout,
853 1 : lagging_wal_timeout,
854 1 : max_lsn_wal_lag,
855 1 : eviction_policy,
856 1 : min_resident_size_override,
857 1 : evictions_low_residence_duration_metric_threshold,
858 1 : heatmap_period,
859 1 : lazy_slru_download,
860 1 : timeline_get_throttle,
861 1 : image_layer_creation_check_threshold,
862 1 : image_creation_preempt_threshold,
863 1 : lsn_lease_length,
864 1 : lsn_lease_length_for_ts,
865 1 : timeline_offloading,
866 1 : wal_receiver_protocol_override,
867 1 : rel_size_v2_enabled,
868 1 : gc_compaction_enabled,
869 1 : gc_compaction_initial_threshold_kb,
870 1 : gc_compaction_ratio_percent,
871 1 : })
872 1 : }
873 :
874 0 : pub fn merge(
875 0 : &self,
876 0 : global_conf: crate::config::TenantConfigToml,
877 0 : ) -> crate::config::TenantConfigToml {
878 0 : crate::config::TenantConfigToml {
879 0 : checkpoint_distance: self
880 0 : .checkpoint_distance
881 0 : .unwrap_or(global_conf.checkpoint_distance),
882 0 : checkpoint_timeout: self
883 0 : .checkpoint_timeout
884 0 : .unwrap_or(global_conf.checkpoint_timeout),
885 0 : compaction_target_size: self
886 0 : .compaction_target_size
887 0 : .unwrap_or(global_conf.compaction_target_size),
888 0 : compaction_period: self
889 0 : .compaction_period
890 0 : .unwrap_or(global_conf.compaction_period),
891 0 : compaction_threshold: self
892 0 : .compaction_threshold
893 0 : .unwrap_or(global_conf.compaction_threshold),
894 0 : compaction_upper_limit: self
895 0 : .compaction_upper_limit
896 0 : .unwrap_or(global_conf.compaction_upper_limit),
897 0 : compaction_algorithm: self
898 0 : .compaction_algorithm
899 0 : .as_ref()
900 0 : .unwrap_or(&global_conf.compaction_algorithm)
901 0 : .clone(),
902 0 : compaction_l0_first: self
903 0 : .compaction_l0_first
904 0 : .unwrap_or(global_conf.compaction_l0_first),
905 0 : compaction_l0_semaphore: self
906 0 : .compaction_l0_semaphore
907 0 : .unwrap_or(global_conf.compaction_l0_semaphore),
908 0 : l0_flush_delay_threshold: self
909 0 : .l0_flush_delay_threshold
910 0 : .or(global_conf.l0_flush_delay_threshold),
911 0 : l0_flush_stall_threshold: self
912 0 : .l0_flush_stall_threshold
913 0 : .or(global_conf.l0_flush_stall_threshold),
914 0 : l0_flush_wait_upload: self
915 0 : .l0_flush_wait_upload
916 0 : .unwrap_or(global_conf.l0_flush_wait_upload),
917 0 : gc_horizon: self.gc_horizon.unwrap_or(global_conf.gc_horizon),
918 0 : gc_period: self.gc_period.unwrap_or(global_conf.gc_period),
919 0 : image_creation_threshold: self
920 0 : .image_creation_threshold
921 0 : .unwrap_or(global_conf.image_creation_threshold),
922 0 : pitr_interval: self.pitr_interval.unwrap_or(global_conf.pitr_interval),
923 0 : walreceiver_connect_timeout: self
924 0 : .walreceiver_connect_timeout
925 0 : .unwrap_or(global_conf.walreceiver_connect_timeout),
926 0 : lagging_wal_timeout: self
927 0 : .lagging_wal_timeout
928 0 : .unwrap_or(global_conf.lagging_wal_timeout),
929 0 : max_lsn_wal_lag: self.max_lsn_wal_lag.unwrap_or(global_conf.max_lsn_wal_lag),
930 0 : eviction_policy: self.eviction_policy.unwrap_or(global_conf.eviction_policy),
931 0 : min_resident_size_override: self
932 0 : .min_resident_size_override
933 0 : .or(global_conf.min_resident_size_override),
934 0 : evictions_low_residence_duration_metric_threshold: self
935 0 : .evictions_low_residence_duration_metric_threshold
936 0 : .unwrap_or(global_conf.evictions_low_residence_duration_metric_threshold),
937 0 : heatmap_period: self.heatmap_period.unwrap_or(global_conf.heatmap_period),
938 0 : lazy_slru_download: self
939 0 : .lazy_slru_download
940 0 : .unwrap_or(global_conf.lazy_slru_download),
941 0 : timeline_get_throttle: self
942 0 : .timeline_get_throttle
943 0 : .clone()
944 0 : .unwrap_or(global_conf.timeline_get_throttle),
945 0 : image_layer_creation_check_threshold: self
946 0 : .image_layer_creation_check_threshold
947 0 : .unwrap_or(global_conf.image_layer_creation_check_threshold),
948 0 : image_creation_preempt_threshold: self
949 0 : .image_creation_preempt_threshold
950 0 : .unwrap_or(global_conf.image_creation_preempt_threshold),
951 0 : lsn_lease_length: self
952 0 : .lsn_lease_length
953 0 : .unwrap_or(global_conf.lsn_lease_length),
954 0 : lsn_lease_length_for_ts: self
955 0 : .lsn_lease_length_for_ts
956 0 : .unwrap_or(global_conf.lsn_lease_length_for_ts),
957 0 : timeline_offloading: self
958 0 : .timeline_offloading
959 0 : .unwrap_or(global_conf.timeline_offloading),
960 0 : wal_receiver_protocol_override: self
961 0 : .wal_receiver_protocol_override
962 0 : .or(global_conf.wal_receiver_protocol_override),
963 0 : rel_size_v2_enabled: self
964 0 : .rel_size_v2_enabled
965 0 : .unwrap_or(global_conf.rel_size_v2_enabled),
966 0 : gc_compaction_enabled: self
967 0 : .gc_compaction_enabled
968 0 : .unwrap_or(global_conf.gc_compaction_enabled),
969 0 : gc_compaction_initial_threshold_kb: self
970 0 : .gc_compaction_initial_threshold_kb
971 0 : .unwrap_or(global_conf.gc_compaction_initial_threshold_kb),
972 0 : gc_compaction_ratio_percent: self
973 0 : .gc_compaction_ratio_percent
974 0 : .unwrap_or(global_conf.gc_compaction_ratio_percent),
975 0 : }
976 0 : }
977 : }
978 :
979 : /// The policy for the aux file storage.
980 : ///
981 : /// It can be switched through `switch_aux_file_policy` tenant config.
982 : /// When the first aux file written, the policy will be persisted in the
983 : /// `index_part.json` file and has a limited migration path.
984 : ///
985 : /// Currently, we only allow the following migration path:
986 : ///
987 : /// Unset -> V1
988 : /// -> V2
989 : /// -> CrossValidation -> V2
990 : #[derive(
991 : Eq,
992 : PartialEq,
993 : Debug,
994 : Copy,
995 : Clone,
996 0 : strum_macros::EnumString,
997 : strum_macros::Display,
998 4 : serde_with::DeserializeFromStr,
999 : serde_with::SerializeDisplay,
1000 : )]
1001 : #[strum(serialize_all = "kebab-case")]
1002 : pub enum AuxFilePolicy {
1003 : /// V1 aux file policy: store everything in AUX_FILE_KEY
1004 : #[strum(ascii_case_insensitive)]
1005 : V1,
1006 : /// V2 aux file policy: store in the AUX_FILE keyspace
1007 : #[strum(ascii_case_insensitive)]
1008 : V2,
1009 : /// Cross validation runs both formats on the write path and does validation
1010 : /// on the read path.
1011 : #[strum(ascii_case_insensitive)]
1012 : CrossValidation,
1013 : }
1014 :
1015 0 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
1016 : #[serde(tag = "kind")]
1017 : pub enum EvictionPolicy {
1018 : NoEviction,
1019 : LayerAccessThreshold(EvictionPolicyLayerAccessThreshold),
1020 : OnlyImitiate(EvictionPolicyLayerAccessThreshold),
1021 : }
1022 :
1023 : impl EvictionPolicy {
1024 0 : pub fn discriminant_str(&self) -> &'static str {
1025 0 : match self {
1026 0 : EvictionPolicy::NoEviction => "NoEviction",
1027 0 : EvictionPolicy::LayerAccessThreshold(_) => "LayerAccessThreshold",
1028 0 : EvictionPolicy::OnlyImitiate(_) => "OnlyImitiate",
1029 : }
1030 0 : }
1031 : }
1032 :
1033 : #[derive(
1034 : Eq,
1035 : PartialEq,
1036 : Debug,
1037 : Copy,
1038 : Clone,
1039 0 : strum_macros::EnumString,
1040 : strum_macros::Display,
1041 0 : serde_with::DeserializeFromStr,
1042 : serde_with::SerializeDisplay,
1043 : )]
1044 : #[strum(serialize_all = "kebab-case")]
1045 : pub enum CompactionAlgorithm {
1046 : Legacy,
1047 : Tiered,
1048 : }
1049 :
1050 : #[derive(
1051 4 : Debug, Clone, Copy, PartialEq, Eq, serde_with::DeserializeFromStr, serde_with::SerializeDisplay,
1052 : )]
1053 : pub enum ImageCompressionAlgorithm {
1054 : // Disabled for writes, support decompressing during read path
1055 : Disabled,
1056 : /// Zstandard compression. Level 0 means and None mean the same (default level). Levels can be negative as well.
1057 : /// For details, see the [manual](http://facebook.github.io/zstd/zstd_manual.html).
1058 : Zstd {
1059 : level: Option<i8>,
1060 : },
1061 : }
1062 :
1063 : impl FromStr for ImageCompressionAlgorithm {
1064 : type Err = anyhow::Error;
1065 8 : fn from_str(s: &str) -> Result<Self, Self::Err> {
1066 8 : let mut components = s.split(['(', ')']);
1067 8 : let first = components
1068 8 : .next()
1069 8 : .ok_or_else(|| anyhow::anyhow!("empty string"))?;
1070 8 : match first {
1071 8 : "disabled" => Ok(ImageCompressionAlgorithm::Disabled),
1072 6 : "zstd" => {
1073 6 : let level = if let Some(v) = components.next() {
1074 4 : let v: i8 = v.parse()?;
1075 4 : Some(v)
1076 : } else {
1077 2 : None
1078 : };
1079 :
1080 6 : Ok(ImageCompressionAlgorithm::Zstd { level })
1081 : }
1082 0 : _ => anyhow::bail!("invalid specifier '{first}'"),
1083 : }
1084 8 : }
1085 : }
1086 :
1087 : impl Display for ImageCompressionAlgorithm {
1088 12 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1089 12 : match self {
1090 3 : ImageCompressionAlgorithm::Disabled => write!(f, "disabled"),
1091 9 : ImageCompressionAlgorithm::Zstd { level } => {
1092 9 : if let Some(level) = level {
1093 6 : write!(f, "zstd({})", level)
1094 : } else {
1095 3 : write!(f, "zstd")
1096 : }
1097 : }
1098 : }
1099 12 : }
1100 : }
1101 :
1102 0 : #[derive(Eq, PartialEq, Debug, Clone, Serialize, Deserialize)]
1103 : pub struct CompactionAlgorithmSettings {
1104 : pub kind: CompactionAlgorithm,
1105 : }
1106 :
1107 8 : #[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]
1108 : #[serde(tag = "mode", rename_all = "kebab-case", deny_unknown_fields)]
1109 : pub enum L0FlushConfig {
1110 : #[serde(rename_all = "snake_case")]
1111 : Direct { max_concurrency: NonZeroUsize },
1112 : }
1113 :
1114 0 : #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
1115 : pub struct EvictionPolicyLayerAccessThreshold {
1116 : #[serde(with = "humantime_serde")]
1117 : pub period: Duration,
1118 : #[serde(with = "humantime_serde")]
1119 : pub threshold: Duration,
1120 : }
1121 :
1122 6 : #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
1123 : pub struct ThrottleConfig {
1124 : /// See [`ThrottleConfigTaskKinds`] for why we do the serde `rename`.
1125 : #[serde(rename = "task_kinds")]
1126 : pub enabled: ThrottleConfigTaskKinds,
1127 : pub initial: u32,
1128 : #[serde(with = "humantime_serde")]
1129 : pub refill_interval: Duration,
1130 : pub refill_amount: NonZeroU32,
1131 : pub max: u32,
1132 : }
1133 :
1134 : /// Before <https://github.com/neondatabase/neon/pull/9962>
1135 : /// the throttle was a per `Timeline::get`/`Timeline::get_vectored` call.
1136 : /// The `task_kinds` field controlled which Pageserver "Task Kind"s
1137 : /// were subject to the throttle.
1138 : ///
1139 : /// After that PR, the throttle is applied at pagestream request level
1140 : /// and the `task_kinds` field does not apply since the only task kind
1141 : /// that us subject to the throttle is that of the page service.
1142 : ///
1143 : /// However, we don't want to make a breaking config change right now
1144 : /// because it means we have to migrate all the tenant configs.
1145 : /// This will be done in a future PR.
1146 : ///
1147 : /// In the meantime, we use emptiness / non-emptsiness of the `task_kinds`
1148 : /// field to determine if the throttle is enabled or not.
1149 1 : #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
1150 : #[serde(transparent)]
1151 : pub struct ThrottleConfigTaskKinds(Vec<String>);
1152 :
1153 : impl ThrottleConfigTaskKinds {
1154 489 : pub fn disabled() -> Self {
1155 489 : Self(vec![])
1156 489 : }
1157 454 : pub fn is_enabled(&self) -> bool {
1158 454 : !self.0.is_empty()
1159 454 : }
1160 : }
1161 :
1162 : impl ThrottleConfig {
1163 489 : pub fn disabled() -> Self {
1164 489 : Self {
1165 489 : enabled: ThrottleConfigTaskKinds::disabled(),
1166 489 : // other values don't matter with emtpy `task_kinds`.
1167 489 : initial: 0,
1168 489 : refill_interval: Duration::from_millis(1),
1169 489 : refill_amount: NonZeroU32::new(1).unwrap(),
1170 489 : max: 1,
1171 489 : }
1172 489 : }
1173 : /// The requests per second allowed by the given config.
1174 0 : pub fn steady_rps(&self) -> f64 {
1175 0 : (self.refill_amount.get() as f64) / (self.refill_interval.as_secs_f64())
1176 0 : }
1177 : }
1178 :
1179 : #[cfg(test)]
1180 : mod throttle_config_tests {
1181 : use super::*;
1182 :
1183 : #[test]
1184 1 : fn test_disabled_is_disabled() {
1185 1 : let config = ThrottleConfig::disabled();
1186 1 : assert!(!config.enabled.is_enabled());
1187 1 : }
1188 : #[test]
1189 1 : fn test_enabled_backwards_compat() {
1190 1 : let input = serde_json::json!({
1191 1 : "task_kinds": ["PageRequestHandler"],
1192 1 : "initial": 40000,
1193 1 : "refill_interval": "50ms",
1194 1 : "refill_amount": 1000,
1195 1 : "max": 40000,
1196 1 : "fair": true
1197 1 : });
1198 1 : let config: ThrottleConfig = serde_json::from_value(input).unwrap();
1199 1 : assert!(config.enabled.is_enabled());
1200 1 : }
1201 : }
1202 :
1203 : /// A flattened analog of a `pagesever::tenant::LocationMode`, which
1204 : /// lists out all possible states (and the virtual "Detached" state)
1205 : /// in a flat form rather than using rust-style enums.
1206 0 : #[derive(Serialize, Deserialize, Debug, Clone, Copy, Eq, PartialEq)]
1207 : pub enum LocationConfigMode {
1208 : AttachedSingle,
1209 : AttachedMulti,
1210 : AttachedStale,
1211 : Secondary,
1212 : Detached,
1213 : }
1214 :
1215 0 : #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
1216 : pub struct LocationConfigSecondary {
1217 : pub warm: bool,
1218 : }
1219 :
1220 : /// An alternative representation of `pageserver::tenant::LocationConf`,
1221 : /// for use in external-facing APIs.
1222 0 : #[derive(Serialize, Deserialize, Debug, Clone, Eq, PartialEq)]
1223 : pub struct LocationConfig {
1224 : pub mode: LocationConfigMode,
1225 : /// If attaching, in what generation?
1226 : #[serde(default)]
1227 : pub generation: Option<u32>,
1228 :
1229 : // If requesting mode `Secondary`, configuration for that.
1230 : #[serde(default)]
1231 : pub secondary_conf: Option<LocationConfigSecondary>,
1232 :
1233 : // Shard parameters: if shard_count is nonzero, then other shard_* fields
1234 : // must be set accurately.
1235 : #[serde(default)]
1236 : pub shard_number: u8,
1237 : #[serde(default)]
1238 : pub shard_count: u8,
1239 : #[serde(default)]
1240 : pub shard_stripe_size: u32,
1241 :
1242 : // This configuration only affects attached mode, but should be provided irrespective
1243 : // of the mode, as a secondary location might transition on startup if the response
1244 : // to the `/re-attach` control plane API requests it.
1245 : pub tenant_conf: TenantConfig,
1246 : }
1247 :
1248 0 : #[derive(Serialize, Deserialize)]
1249 : pub struct LocationConfigListResponse {
1250 : pub tenant_shards: Vec<(TenantShardId, Option<LocationConfig>)>,
1251 : }
1252 :
1253 : #[derive(Serialize)]
1254 : pub struct StatusResponse {
1255 : pub id: NodeId,
1256 : }
1257 :
1258 0 : #[derive(Serialize, Deserialize, Debug)]
1259 : #[serde(deny_unknown_fields)]
1260 : pub struct TenantLocationConfigRequest {
1261 : #[serde(flatten)]
1262 : pub config: LocationConfig, // as we have a flattened field, we should reject all unknown fields in it
1263 : }
1264 :
1265 0 : #[derive(Serialize, Deserialize, Debug)]
1266 : #[serde(deny_unknown_fields)]
1267 : pub struct TenantTimeTravelRequest {
1268 : pub shard_counts: Vec<ShardCount>,
1269 : }
1270 :
1271 0 : #[derive(Serialize, Deserialize, Debug)]
1272 : #[serde(deny_unknown_fields)]
1273 : pub struct TenantShardLocation {
1274 : pub shard_id: TenantShardId,
1275 : pub node_id: NodeId,
1276 : }
1277 :
1278 0 : #[derive(Serialize, Deserialize, Debug)]
1279 : #[serde(deny_unknown_fields)]
1280 : pub struct TenantLocationConfigResponse {
1281 : pub shards: Vec<TenantShardLocation>,
1282 : // If the shards' ShardCount count is >1, stripe_size will be set.
1283 : pub stripe_size: Option<ShardStripeSize>,
1284 : }
1285 :
1286 2 : #[derive(Serialize, Deserialize, Debug)]
1287 : #[serde(deny_unknown_fields)]
1288 : pub struct TenantConfigRequest {
1289 : pub tenant_id: TenantId,
1290 : #[serde(flatten)]
1291 : pub config: TenantConfig, // as we have a flattened field, we should reject all unknown fields in it
1292 : }
1293 :
1294 : impl std::ops::Deref for TenantConfigRequest {
1295 : type Target = TenantConfig;
1296 :
1297 0 : fn deref(&self) -> &Self::Target {
1298 0 : &self.config
1299 0 : }
1300 : }
1301 :
1302 : impl TenantConfigRequest {
1303 0 : pub fn new(tenant_id: TenantId) -> TenantConfigRequest {
1304 0 : let config = TenantConfig::default();
1305 0 : TenantConfigRequest { tenant_id, config }
1306 0 : }
1307 : }
1308 :
1309 3 : #[derive(Serialize, Deserialize, Debug)]
1310 : #[serde(deny_unknown_fields)]
1311 : pub struct TenantConfigPatchRequest {
1312 : pub tenant_id: TenantId,
1313 : #[serde(flatten)]
1314 : pub config: TenantConfigPatch, // as we have a flattened field, we should reject all unknown fields in it
1315 : }
1316 :
1317 0 : #[derive(Serialize, Deserialize, Debug)]
1318 : pub struct TenantWaitLsnRequest {
1319 : #[serde(flatten)]
1320 : pub timelines: HashMap<TimelineId, Lsn>,
1321 : pub timeout: Duration,
1322 : }
1323 :
1324 : /// See [`TenantState::attachment_status`] and the OpenAPI docs for context.
1325 0 : #[derive(Serialize, Deserialize, Clone)]
1326 : #[serde(tag = "slug", content = "data", rename_all = "snake_case")]
1327 : pub enum TenantAttachmentStatus {
1328 : Maybe,
1329 : Attached,
1330 : Failed { reason: String },
1331 : }
1332 :
1333 0 : #[derive(Serialize, Deserialize, Clone)]
1334 : pub struct TenantInfo {
1335 : pub id: TenantShardId,
1336 : // NB: intentionally not part of OpenAPI, we don't want to commit to a specific set of TenantState's
1337 : pub state: TenantState,
1338 : /// Sum of the size of all layer files.
1339 : /// If a layer is present in both local FS and S3, it counts only once.
1340 : pub current_physical_size: Option<u64>, // physical size is only included in `tenant_status` endpoint
1341 : pub attachment_status: TenantAttachmentStatus,
1342 : pub generation: u32,
1343 :
1344 : /// Opaque explanation if gc is being blocked.
1345 : ///
1346 : /// Only looked up for the individual tenant detail, not the listing.
1347 : #[serde(skip_serializing_if = "Option::is_none")]
1348 : pub gc_blocking: Option<String>,
1349 : }
1350 :
1351 0 : #[derive(Serialize, Deserialize, Clone)]
1352 : pub struct TenantDetails {
1353 : #[serde(flatten)]
1354 : pub tenant_info: TenantInfo,
1355 :
1356 : pub walredo: Option<WalRedoManagerStatus>,
1357 :
1358 : pub timelines: Vec<TimelineId>,
1359 : }
1360 :
1361 0 : #[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Copy, Debug)]
1362 : pub enum TimelineArchivalState {
1363 : Archived,
1364 : Unarchived,
1365 : }
1366 :
1367 0 : #[derive(Serialize, Deserialize, PartialEq, Eq, Clone)]
1368 : pub enum TimelineVisibilityState {
1369 : Visible,
1370 : Invisible,
1371 : }
1372 :
1373 0 : #[derive(Serialize, Deserialize, PartialEq, Eq, Clone)]
1374 : pub struct TimelineArchivalConfigRequest {
1375 : pub state: TimelineArchivalState,
1376 : }
1377 :
1378 0 : #[derive(Serialize, Deserialize, PartialEq, Eq, Clone)]
1379 : pub struct TimelinePatchIndexPartRequest {
1380 : pub rel_size_migration: Option<RelSizeMigration>,
1381 : pub gc_compaction_last_completed_lsn: Option<Lsn>,
1382 : pub applied_gc_cutoff_lsn: Option<Lsn>,
1383 : #[serde(default)]
1384 : pub force_index_update: bool,
1385 : }
1386 :
1387 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1388 : pub struct TimelinesInfoAndOffloaded {
1389 : pub timelines: Vec<TimelineInfo>,
1390 : pub offloaded: Vec<OffloadedTimelineInfo>,
1391 : }
1392 :
1393 : /// Analog of [`TimelineInfo`] for offloaded timelines.
1394 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1395 : pub struct OffloadedTimelineInfo {
1396 : pub tenant_id: TenantShardId,
1397 : pub timeline_id: TimelineId,
1398 : /// Whether the timeline has a parent it has been branched off from or not
1399 : pub ancestor_timeline_id: Option<TimelineId>,
1400 : /// Whether to retain the branch lsn at the ancestor or not
1401 : pub ancestor_retain_lsn: Option<Lsn>,
1402 : /// The time point when the timeline was archived
1403 : pub archived_at: chrono::DateTime<chrono::Utc>,
1404 : }
1405 :
1406 16 : #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
1407 : #[serde(rename_all = "camelCase")]
1408 : pub enum RelSizeMigration {
1409 : /// The tenant is using the old rel_size format.
1410 : /// Note that this enum is persisted as `Option<RelSizeMigration>` in the index part, so
1411 : /// `None` is the same as `Some(RelSizeMigration::Legacy)`.
1412 : Legacy,
1413 : /// The tenant is migrating to the new rel_size format. Both old and new rel_size format are
1414 : /// persisted in the index part. The read path will read both formats and merge them.
1415 : Migrating,
1416 : /// The tenant has migrated to the new rel_size format. Only the new rel_size format is persisted
1417 : /// in the index part, and the read path will not read the old format.
1418 : Migrated,
1419 : }
1420 :
1421 : /// This represents the output of the "timeline_detail" and "timeline_list" API calls.
1422 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1423 : pub struct TimelineInfo {
1424 : pub tenant_id: TenantShardId,
1425 : pub timeline_id: TimelineId,
1426 :
1427 : pub ancestor_timeline_id: Option<TimelineId>,
1428 : pub ancestor_lsn: Option<Lsn>,
1429 : pub last_record_lsn: Lsn,
1430 : pub prev_record_lsn: Option<Lsn>,
1431 :
1432 : /// Legacy field, retained for one version to enable old storage controller to
1433 : /// decode (it was a mandatory field).
1434 : #[serde(default, rename = "latest_gc_cutoff_lsn")]
1435 : pub _unused: Lsn,
1436 :
1437 : /// The LSN up to which GC has advanced: older data may still exist but it is not available for clients.
1438 : /// This LSN is not suitable for deciding where to create branches etc: use [`TimelineInfo::min_readable_lsn`] instead,
1439 : /// as it is easier to reason about.
1440 : #[serde(default)]
1441 : pub applied_gc_cutoff_lsn: Lsn,
1442 :
1443 : /// The upper bound of data which is either already GC'ed, or elegible to be GC'ed at any time based on PITR interval.
1444 : /// This LSN represents the "end of history" for this timeline, and callers should use it to figure out the oldest
1445 : /// LSN at which it is legal to create a branch or ephemeral endpoint.
1446 : ///
1447 : /// Note that holders of valid LSN leases may be able to create branches and read pages earlier
1448 : /// than this LSN, but new leases may not be taken out earlier than this LSN.
1449 : #[serde(default)]
1450 : pub min_readable_lsn: Lsn,
1451 :
1452 : pub disk_consistent_lsn: Lsn,
1453 :
1454 : /// The LSN that we have succesfully uploaded to remote storage
1455 : pub remote_consistent_lsn: Lsn,
1456 :
1457 : /// The LSN that we are advertizing to safekeepers
1458 : pub remote_consistent_lsn_visible: Lsn,
1459 :
1460 : /// The LSN from the start of the root timeline (never changes)
1461 : pub initdb_lsn: Lsn,
1462 :
1463 : pub current_logical_size: u64,
1464 : pub current_logical_size_is_accurate: bool,
1465 :
1466 : pub directory_entries_counts: Vec<u64>,
1467 :
1468 : /// Sum of the size of all layer files.
1469 : /// If a layer is present in both local FS and S3, it counts only once.
1470 : pub current_physical_size: Option<u64>, // is None when timeline is Unloaded
1471 : pub current_logical_size_non_incremental: Option<u64>,
1472 :
1473 : /// How many bytes of WAL are within this branch's pitr_interval. If the pitr_interval goes
1474 : /// beyond the branch's branch point, we only count up to the branch point.
1475 : pub pitr_history_size: u64,
1476 :
1477 : /// Whether this branch's branch point is within its ancestor's PITR interval (i.e. any
1478 : /// ancestor data used by this branch would have been retained anyway). If this is false, then
1479 : /// this branch may be imposing a cost on the ancestor by causing it to retain layers that it would
1480 : /// otherwise be able to GC.
1481 : pub within_ancestor_pitr: bool,
1482 :
1483 : pub timeline_dir_layer_file_size_sum: Option<u64>,
1484 :
1485 : pub wal_source_connstr: Option<String>,
1486 : pub last_received_msg_lsn: Option<Lsn>,
1487 : /// the timestamp (in microseconds) of the last received message
1488 : pub last_received_msg_ts: Option<u128>,
1489 : pub pg_version: u32,
1490 :
1491 : pub state: TimelineState,
1492 :
1493 : pub walreceiver_status: String,
1494 :
1495 : // ALWAYS add new fields at the end of the struct with `Option` to ensure forward/backward compatibility.
1496 : // Backward compatibility: you will get a JSON not containing the newly-added field.
1497 : // Forward compatibility: a previous version of the pageserver will receive a JSON. serde::Deserialize does
1498 : // not deny unknown fields by default so it's safe to set the field to some value, though it won't be
1499 : // read.
1500 : /// Whether the timeline is archived.
1501 : pub is_archived: Option<bool>,
1502 :
1503 : /// The status of the rel_size migration.
1504 : pub rel_size_migration: Option<RelSizeMigration>,
1505 :
1506 : /// Whether the timeline is invisible in synthetic size calculations.
1507 : pub is_invisible: Option<bool>,
1508 : }
1509 :
1510 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1511 : pub struct LayerMapInfo {
1512 : pub in_memory_layers: Vec<InMemoryLayerInfo>,
1513 : pub historic_layers: Vec<HistoricLayerInfo>,
1514 : }
1515 :
1516 : /// The residence status of a layer
1517 0 : #[derive(Debug, Clone, Copy, Serialize, Deserialize)]
1518 : pub enum LayerResidenceStatus {
1519 : /// Residence status for a layer file that exists locally.
1520 : /// It may also exist on the remote, we don't care here.
1521 : Resident,
1522 : /// Residence status for a layer file that only exists on the remote.
1523 : Evicted,
1524 : }
1525 :
1526 : #[serde_as]
1527 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1528 : pub struct LayerAccessStats {
1529 : #[serde_as(as = "serde_with::TimestampMilliSeconds")]
1530 : pub access_time: SystemTime,
1531 :
1532 : #[serde_as(as = "serde_with::TimestampMilliSeconds")]
1533 : pub residence_time: SystemTime,
1534 :
1535 : pub visible: bool,
1536 : }
1537 :
1538 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1539 : #[serde(tag = "kind")]
1540 : pub enum InMemoryLayerInfo {
1541 : Open { lsn_start: Lsn },
1542 : Frozen { lsn_start: Lsn, lsn_end: Lsn },
1543 : }
1544 :
1545 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1546 : #[serde(tag = "kind")]
1547 : pub enum HistoricLayerInfo {
1548 : Delta {
1549 : layer_file_name: String,
1550 : layer_file_size: u64,
1551 :
1552 : lsn_start: Lsn,
1553 : lsn_end: Lsn,
1554 : remote: bool,
1555 : access_stats: LayerAccessStats,
1556 :
1557 : l0: bool,
1558 : },
1559 : Image {
1560 : layer_file_name: String,
1561 : layer_file_size: u64,
1562 :
1563 : lsn_start: Lsn,
1564 : remote: bool,
1565 : access_stats: LayerAccessStats,
1566 : },
1567 : }
1568 :
1569 : impl HistoricLayerInfo {
1570 0 : pub fn layer_file_name(&self) -> &str {
1571 0 : match self {
1572 : HistoricLayerInfo::Delta {
1573 0 : layer_file_name, ..
1574 0 : } => layer_file_name,
1575 : HistoricLayerInfo::Image {
1576 0 : layer_file_name, ..
1577 0 : } => layer_file_name,
1578 : }
1579 0 : }
1580 0 : pub fn is_remote(&self) -> bool {
1581 0 : match self {
1582 0 : HistoricLayerInfo::Delta { remote, .. } => *remote,
1583 0 : HistoricLayerInfo::Image { remote, .. } => *remote,
1584 : }
1585 0 : }
1586 0 : pub fn set_remote(&mut self, value: bool) {
1587 0 : let field = match self {
1588 0 : HistoricLayerInfo::Delta { remote, .. } => remote,
1589 0 : HistoricLayerInfo::Image { remote, .. } => remote,
1590 : };
1591 0 : *field = value;
1592 0 : }
1593 0 : pub fn layer_file_size(&self) -> u64 {
1594 0 : match self {
1595 : HistoricLayerInfo::Delta {
1596 0 : layer_file_size, ..
1597 0 : } => *layer_file_size,
1598 : HistoricLayerInfo::Image {
1599 0 : layer_file_size, ..
1600 0 : } => *layer_file_size,
1601 : }
1602 0 : }
1603 : }
1604 :
1605 0 : #[derive(Debug, Serialize, Deserialize)]
1606 : pub struct DownloadRemoteLayersTaskSpawnRequest {
1607 : pub max_concurrent_downloads: NonZeroUsize,
1608 : }
1609 :
1610 0 : #[derive(Debug, Serialize, Deserialize)]
1611 : pub struct IngestAuxFilesRequest {
1612 : pub aux_files: HashMap<String, String>,
1613 : }
1614 :
1615 0 : #[derive(Debug, Serialize, Deserialize)]
1616 : pub struct ListAuxFilesRequest {
1617 : pub lsn: Lsn,
1618 : }
1619 :
1620 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1621 : pub struct DownloadRemoteLayersTaskInfo {
1622 : pub task_id: String,
1623 : pub state: DownloadRemoteLayersTaskState,
1624 : pub total_layer_count: u64, // stable once `completed`
1625 : pub successful_download_count: u64, // stable once `completed`
1626 : pub failed_download_count: u64, // stable once `completed`
1627 : }
1628 :
1629 0 : #[derive(Debug, Serialize, Deserialize, Clone)]
1630 : pub enum DownloadRemoteLayersTaskState {
1631 : Running,
1632 : Completed,
1633 : ShutDown,
1634 : }
1635 :
1636 0 : #[derive(Debug, Serialize, Deserialize)]
1637 : pub struct TimelineGcRequest {
1638 : pub gc_horizon: Option<u64>,
1639 : }
1640 :
1641 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1642 : pub struct WalRedoManagerProcessStatus {
1643 : pub pid: u32,
1644 : }
1645 :
1646 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1647 : pub struct WalRedoManagerStatus {
1648 : pub last_redo_at: Option<chrono::DateTime<chrono::Utc>>,
1649 : pub process: Option<WalRedoManagerProcessStatus>,
1650 : }
1651 :
1652 : /// The progress of a secondary tenant.
1653 : ///
1654 : /// It is mostly useful when doing a long running download: e.g. initiating
1655 : /// a download job, timing out while waiting for it to run, and then inspecting this status to understand
1656 : /// what's happening.
1657 0 : #[derive(Default, Debug, Serialize, Deserialize, Clone)]
1658 : pub struct SecondaryProgress {
1659 : /// The remote storage LastModified time of the heatmap object we last downloaded.
1660 : pub heatmap_mtime: Option<serde_system_time::SystemTime>,
1661 :
1662 : /// The number of layers currently on-disk
1663 : pub layers_downloaded: usize,
1664 : /// The number of layers in the most recently seen heatmap
1665 : pub layers_total: usize,
1666 :
1667 : /// The number of layer bytes currently on-disk
1668 : pub bytes_downloaded: u64,
1669 : /// The number of layer bytes in the most recently seen heatmap
1670 : pub bytes_total: u64,
1671 : }
1672 :
1673 0 : #[derive(Serialize, Deserialize, Debug)]
1674 : pub struct TenantScanRemoteStorageShard {
1675 : pub tenant_shard_id: TenantShardId,
1676 : pub generation: Option<u32>,
1677 : }
1678 :
1679 0 : #[derive(Serialize, Deserialize, Debug, Default)]
1680 : pub struct TenantScanRemoteStorageResponse {
1681 : pub shards: Vec<TenantScanRemoteStorageShard>,
1682 : }
1683 :
1684 0 : #[derive(Serialize, Deserialize, Debug, Clone)]
1685 : #[serde(rename_all = "snake_case")]
1686 : pub enum TenantSorting {
1687 : /// Total size of layers on local disk for all timelines in a shard.
1688 : ResidentSize,
1689 : /// The logical size of the largest timeline within a _tenant_ (not shard). Only tracked on
1690 : /// shard 0, contains the sum across all shards.
1691 : MaxLogicalSize,
1692 : /// The logical size of the largest timeline within a _tenant_ (not shard), divided by number of
1693 : /// shards. Only tracked on shard 0, and estimates the per-shard logical size.
1694 : MaxLogicalSizePerShard,
1695 : }
1696 :
1697 : impl Default for TenantSorting {
1698 0 : fn default() -> Self {
1699 0 : Self::ResidentSize
1700 0 : }
1701 : }
1702 :
1703 0 : #[derive(Serialize, Deserialize, Debug, Clone)]
1704 : pub struct TopTenantShardsRequest {
1705 : // How would you like to sort the tenants?
1706 : pub order_by: TenantSorting,
1707 :
1708 : // How many results?
1709 : pub limit: usize,
1710 :
1711 : // Omit tenants with more than this many shards (e.g. if this is the max number of shards
1712 : // that the caller would ever split to)
1713 : pub where_shards_lt: Option<ShardCount>,
1714 :
1715 : // Omit tenants where the ordering metric is less than this (this is an optimization to
1716 : // let us quickly exclude numerous tiny shards)
1717 : pub where_gt: Option<u64>,
1718 : }
1719 :
1720 0 : #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
1721 : pub struct TopTenantShardItem {
1722 : pub id: TenantShardId,
1723 :
1724 : /// Total size of layers on local disk for all timelines in this shard.
1725 : pub resident_size: u64,
1726 :
1727 : /// Total size of layers in remote storage for all timelines in this shard.
1728 : pub physical_size: u64,
1729 :
1730 : /// The largest logical size of a timeline within this _tenant_ (not shard). This is only
1731 : /// tracked on shard 0, and contains the sum of the logical size across all shards.
1732 : pub max_logical_size: u64,
1733 :
1734 : /// The largest logical size of a timeline within this _tenant_ (not shard) divided by number of
1735 : /// shards. This is only tracked on shard 0, and is only an estimate as we divide it evenly by
1736 : /// shard count, rounded up.
1737 : pub max_logical_size_per_shard: u64,
1738 : }
1739 :
1740 0 : #[derive(Serialize, Deserialize, Debug, Default)]
1741 : pub struct TopTenantShardsResponse {
1742 : pub shards: Vec<TopTenantShardItem>,
1743 : }
1744 :
1745 : pub mod virtual_file {
1746 : #[derive(
1747 : Copy,
1748 : Clone,
1749 : PartialEq,
1750 : Eq,
1751 : Hash,
1752 0 : strum_macros::EnumString,
1753 : strum_macros::Display,
1754 0 : serde_with::DeserializeFromStr,
1755 : serde_with::SerializeDisplay,
1756 : Debug,
1757 : )]
1758 : #[strum(serialize_all = "kebab-case")]
1759 : pub enum IoEngineKind {
1760 : StdFs,
1761 : #[cfg(target_os = "linux")]
1762 : TokioEpollUring,
1763 : }
1764 :
1765 : /// Direct IO modes for a pageserver.
1766 : #[derive(
1767 : Copy,
1768 : Clone,
1769 : PartialEq,
1770 : Eq,
1771 : Hash,
1772 0 : strum_macros::EnumString,
1773 : strum_macros::Display,
1774 0 : serde_with::DeserializeFromStr,
1775 : serde_with::SerializeDisplay,
1776 : Debug,
1777 : )]
1778 : #[strum(serialize_all = "kebab-case")]
1779 : #[repr(u8)]
1780 : pub enum IoMode {
1781 : /// Uses buffered IO.
1782 : Buffered,
1783 : /// Uses direct IO, error out if the operation fails.
1784 : #[cfg(target_os = "linux")]
1785 : Direct,
1786 : }
1787 :
1788 : impl IoMode {
1789 488 : pub const fn preferred() -> Self {
1790 488 : Self::Buffered
1791 488 : }
1792 : }
1793 :
1794 : impl TryFrom<u8> for IoMode {
1795 : type Error = u8;
1796 :
1797 5096 : fn try_from(value: u8) -> Result<Self, Self::Error> {
1798 5096 : Ok(match value {
1799 5096 : v if v == (IoMode::Buffered as u8) => IoMode::Buffered,
1800 : #[cfg(target_os = "linux")]
1801 0 : v if v == (IoMode::Direct as u8) => IoMode::Direct,
1802 0 : x => return Err(x),
1803 : })
1804 5096 : }
1805 : }
1806 : }
1807 :
1808 0 : #[derive(Debug, Clone, Serialize, Deserialize)]
1809 : pub struct ScanDisposableKeysResponse {
1810 : pub disposable_count: usize,
1811 : pub not_disposable_count: usize,
1812 : }
1813 :
1814 : // Wrapped in libpq CopyData
1815 : #[derive(PartialEq, Eq, Debug)]
1816 : pub enum PagestreamFeMessage {
1817 : Exists(PagestreamExistsRequest),
1818 : Nblocks(PagestreamNblocksRequest),
1819 : GetPage(PagestreamGetPageRequest),
1820 : DbSize(PagestreamDbSizeRequest),
1821 : GetSlruSegment(PagestreamGetSlruSegmentRequest),
1822 : #[cfg(feature = "testing")]
1823 : Test(PagestreamTestRequest),
1824 : }
1825 :
1826 : // Wrapped in libpq CopyData
1827 : #[derive(strum_macros::EnumProperty)]
1828 : pub enum PagestreamBeMessage {
1829 : Exists(PagestreamExistsResponse),
1830 : Nblocks(PagestreamNblocksResponse),
1831 : GetPage(PagestreamGetPageResponse),
1832 : Error(PagestreamErrorResponse),
1833 : DbSize(PagestreamDbSizeResponse),
1834 : GetSlruSegment(PagestreamGetSlruSegmentResponse),
1835 : #[cfg(feature = "testing")]
1836 : Test(PagestreamTestResponse),
1837 : }
1838 :
1839 : // Keep in sync with `pagestore_client.h`
1840 : #[repr(u8)]
1841 : enum PagestreamFeMessageTag {
1842 : Exists = 0,
1843 : Nblocks = 1,
1844 : GetPage = 2,
1845 : DbSize = 3,
1846 : GetSlruSegment = 4,
1847 : /* future tags above this line */
1848 : /// For testing purposes, not available in production.
1849 : #[cfg(feature = "testing")]
1850 : Test = 99,
1851 : }
1852 :
1853 : // Keep in sync with `pagestore_client.h`
1854 : #[repr(u8)]
1855 : enum PagestreamBeMessageTag {
1856 : Exists = 100,
1857 : Nblocks = 101,
1858 : GetPage = 102,
1859 : Error = 103,
1860 : DbSize = 104,
1861 : GetSlruSegment = 105,
1862 : /* future tags above this line */
1863 : /// For testing purposes, not available in production.
1864 : #[cfg(feature = "testing")]
1865 : Test = 199,
1866 : }
1867 :
1868 : impl TryFrom<u8> for PagestreamFeMessageTag {
1869 : type Error = u8;
1870 4 : fn try_from(value: u8) -> Result<Self, u8> {
1871 4 : match value {
1872 1 : 0 => Ok(PagestreamFeMessageTag::Exists),
1873 1 : 1 => Ok(PagestreamFeMessageTag::Nblocks),
1874 1 : 2 => Ok(PagestreamFeMessageTag::GetPage),
1875 1 : 3 => Ok(PagestreamFeMessageTag::DbSize),
1876 0 : 4 => Ok(PagestreamFeMessageTag::GetSlruSegment),
1877 : #[cfg(feature = "testing")]
1878 0 : 99 => Ok(PagestreamFeMessageTag::Test),
1879 0 : _ => Err(value),
1880 : }
1881 4 : }
1882 : }
1883 :
1884 : impl TryFrom<u8> for PagestreamBeMessageTag {
1885 : type Error = u8;
1886 0 : fn try_from(value: u8) -> Result<Self, u8> {
1887 0 : match value {
1888 0 : 100 => Ok(PagestreamBeMessageTag::Exists),
1889 0 : 101 => Ok(PagestreamBeMessageTag::Nblocks),
1890 0 : 102 => Ok(PagestreamBeMessageTag::GetPage),
1891 0 : 103 => Ok(PagestreamBeMessageTag::Error),
1892 0 : 104 => Ok(PagestreamBeMessageTag::DbSize),
1893 0 : 105 => Ok(PagestreamBeMessageTag::GetSlruSegment),
1894 : #[cfg(feature = "testing")]
1895 0 : 199 => Ok(PagestreamBeMessageTag::Test),
1896 0 : _ => Err(value),
1897 : }
1898 0 : }
1899 : }
1900 :
1901 : // A GetPage request contains two LSN values:
1902 : //
1903 : // request_lsn: Get the page version at this point in time. Lsn::Max is a special value that means
1904 : // "get the latest version present". It's used by the primary server, which knows that no one else
1905 : // is writing WAL. 'not_modified_since' must be set to a proper value even if request_lsn is
1906 : // Lsn::Max. Standby servers use the current replay LSN as the request LSN.
1907 : //
1908 : // not_modified_since: Hint to the pageserver that the client knows that the page has not been
1909 : // modified between 'not_modified_since' and the request LSN. It's always correct to set
1910 : // 'not_modified_since equal' to 'request_lsn' (unless Lsn::Max is used as the 'request_lsn'), but
1911 : // passing an earlier LSN can speed up the request, by allowing the pageserver to process the
1912 : // request without waiting for 'request_lsn' to arrive.
1913 : //
1914 : // The now-defunct V1 interface contained only one LSN, and a boolean 'latest' flag. The V1 interface was
1915 : // sufficient for the primary; the 'lsn' was equivalent to the 'not_modified_since' value, and
1916 : // 'latest' was set to true. The V2 interface was added because there was no correct way for a
1917 : // standby to request a page at a particular non-latest LSN, and also include the
1918 : // 'not_modified_since' hint. That led to an awkward choice of either using an old LSN in the
1919 : // request, if the standby knows that the page hasn't been modified since, and risk getting an error
1920 : // if that LSN has fallen behind the GC horizon, or requesting the current replay LSN, which could
1921 : // require the pageserver unnecessarily to wait for the WAL to arrive up to that point. The new V2
1922 : // interface allows sending both LSNs, and let the pageserver do the right thing. There was no
1923 : // difference in the responses between V1 and V2.
1924 : //
1925 : // V3 version of protocol adds request ID to all requests. This request ID is also included in response
1926 : // as well as other fields from requests, which allows to verify that we receive response for our request.
1927 : // We copy fields from request to response to make checking more reliable: request ID is formed from process ID
1928 : // and local counter, so in principle there can be duplicated requests IDs if process PID is reused.
1929 : //
1930 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
1931 : pub enum PagestreamProtocolVersion {
1932 : V2,
1933 : V3,
1934 : }
1935 :
1936 : pub type RequestId = u64;
1937 :
1938 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
1939 : pub struct PagestreamRequest {
1940 : pub reqid: RequestId,
1941 : pub request_lsn: Lsn,
1942 : pub not_modified_since: Lsn,
1943 : }
1944 :
1945 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
1946 : pub struct PagestreamExistsRequest {
1947 : pub hdr: PagestreamRequest,
1948 : pub rel: RelTag,
1949 : }
1950 :
1951 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
1952 : pub struct PagestreamNblocksRequest {
1953 : pub hdr: PagestreamRequest,
1954 : pub rel: RelTag,
1955 : }
1956 :
1957 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
1958 : pub struct PagestreamGetPageRequest {
1959 : pub hdr: PagestreamRequest,
1960 : pub rel: RelTag,
1961 : pub blkno: u32,
1962 : }
1963 :
1964 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
1965 : pub struct PagestreamDbSizeRequest {
1966 : pub hdr: PagestreamRequest,
1967 : pub dbnode: u32,
1968 : }
1969 :
1970 : #[derive(Debug, PartialEq, Eq, Clone, Copy)]
1971 : pub struct PagestreamGetSlruSegmentRequest {
1972 : pub hdr: PagestreamRequest,
1973 : pub kind: u8,
1974 : pub segno: u32,
1975 : }
1976 :
1977 : #[derive(Debug)]
1978 : pub struct PagestreamExistsResponse {
1979 : pub req: PagestreamExistsRequest,
1980 : pub exists: bool,
1981 : }
1982 :
1983 : #[derive(Debug)]
1984 : pub struct PagestreamNblocksResponse {
1985 : pub req: PagestreamNblocksRequest,
1986 : pub n_blocks: u32,
1987 : }
1988 :
1989 : #[derive(Debug)]
1990 : pub struct PagestreamGetPageResponse {
1991 : pub req: PagestreamGetPageRequest,
1992 : pub page: Bytes,
1993 : }
1994 :
1995 : #[derive(Debug)]
1996 : pub struct PagestreamGetSlruSegmentResponse {
1997 : pub req: PagestreamGetSlruSegmentRequest,
1998 : pub segment: Bytes,
1999 : }
2000 :
2001 : #[derive(Debug)]
2002 : pub struct PagestreamErrorResponse {
2003 : pub req: PagestreamRequest,
2004 : pub message: String,
2005 : }
2006 :
2007 : #[derive(Debug)]
2008 : pub struct PagestreamDbSizeResponse {
2009 : pub req: PagestreamDbSizeRequest,
2010 : pub db_size: i64,
2011 : }
2012 :
2013 : #[cfg(feature = "testing")]
2014 : #[derive(Debug, PartialEq, Eq, Clone)]
2015 : pub struct PagestreamTestRequest {
2016 : pub hdr: PagestreamRequest,
2017 : pub batch_key: u64,
2018 : pub message: String,
2019 : }
2020 :
2021 : #[cfg(feature = "testing")]
2022 : #[derive(Debug)]
2023 : pub struct PagestreamTestResponse {
2024 : pub req: PagestreamTestRequest,
2025 : }
2026 :
2027 : // This is a cut-down version of TenantHistorySize from the pageserver crate, omitting fields
2028 : // that require pageserver-internal types. It is sufficient to get the total size.
2029 0 : #[derive(Serialize, Deserialize, Debug)]
2030 : pub struct TenantHistorySize {
2031 : pub id: TenantId,
2032 : /// Size is a mixture of WAL and logical size, so the unit is bytes.
2033 : ///
2034 : /// Will be none if `?inputs_only=true` was given.
2035 : pub size: Option<u64>,
2036 : }
2037 :
2038 : impl PagestreamFeMessage {
2039 : /// Serialize a compute -> pageserver message. This is currently only used in testing
2040 : /// tools. Always uses protocol version 3.
2041 4 : pub fn serialize(&self) -> Bytes {
2042 4 : let mut bytes = BytesMut::new();
2043 4 :
2044 4 : match self {
2045 1 : Self::Exists(req) => {
2046 1 : bytes.put_u8(PagestreamFeMessageTag::Exists as u8);
2047 1 : bytes.put_u64(req.hdr.reqid);
2048 1 : bytes.put_u64(req.hdr.request_lsn.0);
2049 1 : bytes.put_u64(req.hdr.not_modified_since.0);
2050 1 : bytes.put_u32(req.rel.spcnode);
2051 1 : bytes.put_u32(req.rel.dbnode);
2052 1 : bytes.put_u32(req.rel.relnode);
2053 1 : bytes.put_u8(req.rel.forknum);
2054 1 : }
2055 :
2056 1 : Self::Nblocks(req) => {
2057 1 : bytes.put_u8(PagestreamFeMessageTag::Nblocks as u8);
2058 1 : bytes.put_u64(req.hdr.reqid);
2059 1 : bytes.put_u64(req.hdr.request_lsn.0);
2060 1 : bytes.put_u64(req.hdr.not_modified_since.0);
2061 1 : bytes.put_u32(req.rel.spcnode);
2062 1 : bytes.put_u32(req.rel.dbnode);
2063 1 : bytes.put_u32(req.rel.relnode);
2064 1 : bytes.put_u8(req.rel.forknum);
2065 1 : }
2066 :
2067 1 : Self::GetPage(req) => {
2068 1 : bytes.put_u8(PagestreamFeMessageTag::GetPage as u8);
2069 1 : bytes.put_u64(req.hdr.reqid);
2070 1 : bytes.put_u64(req.hdr.request_lsn.0);
2071 1 : bytes.put_u64(req.hdr.not_modified_since.0);
2072 1 : bytes.put_u32(req.rel.spcnode);
2073 1 : bytes.put_u32(req.rel.dbnode);
2074 1 : bytes.put_u32(req.rel.relnode);
2075 1 : bytes.put_u8(req.rel.forknum);
2076 1 : bytes.put_u32(req.blkno);
2077 1 : }
2078 :
2079 1 : Self::DbSize(req) => {
2080 1 : bytes.put_u8(PagestreamFeMessageTag::DbSize as u8);
2081 1 : bytes.put_u64(req.hdr.reqid);
2082 1 : bytes.put_u64(req.hdr.request_lsn.0);
2083 1 : bytes.put_u64(req.hdr.not_modified_since.0);
2084 1 : bytes.put_u32(req.dbnode);
2085 1 : }
2086 :
2087 0 : Self::GetSlruSegment(req) => {
2088 0 : bytes.put_u8(PagestreamFeMessageTag::GetSlruSegment as u8);
2089 0 : bytes.put_u64(req.hdr.reqid);
2090 0 : bytes.put_u64(req.hdr.request_lsn.0);
2091 0 : bytes.put_u64(req.hdr.not_modified_since.0);
2092 0 : bytes.put_u8(req.kind);
2093 0 : bytes.put_u32(req.segno);
2094 0 : }
2095 : #[cfg(feature = "testing")]
2096 0 : Self::Test(req) => {
2097 0 : bytes.put_u8(PagestreamFeMessageTag::Test as u8);
2098 0 : bytes.put_u64(req.hdr.reqid);
2099 0 : bytes.put_u64(req.hdr.request_lsn.0);
2100 0 : bytes.put_u64(req.hdr.not_modified_since.0);
2101 0 : bytes.put_u64(req.batch_key);
2102 0 : let message = req.message.as_bytes();
2103 0 : bytes.put_u64(message.len() as u64);
2104 0 : bytes.put_slice(message);
2105 0 : }
2106 : }
2107 :
2108 4 : bytes.into()
2109 4 : }
2110 :
2111 4 : pub fn parse<R: std::io::Read>(
2112 4 : body: &mut R,
2113 4 : protocol_version: PagestreamProtocolVersion,
2114 4 : ) -> anyhow::Result<PagestreamFeMessage> {
2115 : // these correspond to the NeonMessageTag enum in pagestore_client.h
2116 : //
2117 : // TODO: consider using protobuf or serde bincode for less error prone
2118 : // serialization.
2119 4 : let msg_tag = body.read_u8()?;
2120 4 : let (reqid, request_lsn, not_modified_since) = match protocol_version {
2121 : PagestreamProtocolVersion::V2 => (
2122 : 0,
2123 0 : Lsn::from(body.read_u64::<BigEndian>()?),
2124 0 : Lsn::from(body.read_u64::<BigEndian>()?),
2125 : ),
2126 : PagestreamProtocolVersion::V3 => (
2127 4 : body.read_u64::<BigEndian>()?,
2128 4 : Lsn::from(body.read_u64::<BigEndian>()?),
2129 4 : Lsn::from(body.read_u64::<BigEndian>()?),
2130 : ),
2131 : };
2132 :
2133 4 : match PagestreamFeMessageTag::try_from(msg_tag)
2134 4 : .map_err(|tag: u8| anyhow::anyhow!("invalid tag {tag}"))?
2135 : {
2136 : PagestreamFeMessageTag::Exists => {
2137 : Ok(PagestreamFeMessage::Exists(PagestreamExistsRequest {
2138 1 : hdr: PagestreamRequest {
2139 1 : reqid,
2140 1 : request_lsn,
2141 1 : not_modified_since,
2142 1 : },
2143 1 : rel: RelTag {
2144 1 : spcnode: body.read_u32::<BigEndian>()?,
2145 1 : dbnode: body.read_u32::<BigEndian>()?,
2146 1 : relnode: body.read_u32::<BigEndian>()?,
2147 1 : forknum: body.read_u8()?,
2148 : },
2149 : }))
2150 : }
2151 : PagestreamFeMessageTag::Nblocks => {
2152 : Ok(PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
2153 1 : hdr: PagestreamRequest {
2154 1 : reqid,
2155 1 : request_lsn,
2156 1 : not_modified_since,
2157 1 : },
2158 1 : rel: RelTag {
2159 1 : spcnode: body.read_u32::<BigEndian>()?,
2160 1 : dbnode: body.read_u32::<BigEndian>()?,
2161 1 : relnode: body.read_u32::<BigEndian>()?,
2162 1 : forknum: body.read_u8()?,
2163 : },
2164 : }))
2165 : }
2166 : PagestreamFeMessageTag::GetPage => {
2167 : Ok(PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
2168 1 : hdr: PagestreamRequest {
2169 1 : reqid,
2170 1 : request_lsn,
2171 1 : not_modified_since,
2172 1 : },
2173 1 : rel: RelTag {
2174 1 : spcnode: body.read_u32::<BigEndian>()?,
2175 1 : dbnode: body.read_u32::<BigEndian>()?,
2176 1 : relnode: body.read_u32::<BigEndian>()?,
2177 1 : forknum: body.read_u8()?,
2178 : },
2179 1 : blkno: body.read_u32::<BigEndian>()?,
2180 : }))
2181 : }
2182 : PagestreamFeMessageTag::DbSize => {
2183 : Ok(PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
2184 1 : hdr: PagestreamRequest {
2185 1 : reqid,
2186 1 : request_lsn,
2187 1 : not_modified_since,
2188 1 : },
2189 1 : dbnode: body.read_u32::<BigEndian>()?,
2190 : }))
2191 : }
2192 : PagestreamFeMessageTag::GetSlruSegment => Ok(PagestreamFeMessage::GetSlruSegment(
2193 : PagestreamGetSlruSegmentRequest {
2194 0 : hdr: PagestreamRequest {
2195 0 : reqid,
2196 0 : request_lsn,
2197 0 : not_modified_since,
2198 0 : },
2199 0 : kind: body.read_u8()?,
2200 0 : segno: body.read_u32::<BigEndian>()?,
2201 : },
2202 : )),
2203 : #[cfg(feature = "testing")]
2204 : PagestreamFeMessageTag::Test => Ok(PagestreamFeMessage::Test(PagestreamTestRequest {
2205 0 : hdr: PagestreamRequest {
2206 0 : reqid,
2207 0 : request_lsn,
2208 0 : not_modified_since,
2209 0 : },
2210 0 : batch_key: body.read_u64::<BigEndian>()?,
2211 : message: {
2212 0 : let len = body.read_u64::<BigEndian>()?;
2213 0 : let mut buf = vec![0; len as usize];
2214 0 : body.read_exact(&mut buf)?;
2215 0 : String::from_utf8(buf)?
2216 : },
2217 : })),
2218 : }
2219 4 : }
2220 : }
2221 :
2222 : impl PagestreamBeMessage {
2223 0 : pub fn serialize(&self, protocol_version: PagestreamProtocolVersion) -> Bytes {
2224 0 : let mut bytes = BytesMut::new();
2225 :
2226 : use PagestreamBeMessageTag as Tag;
2227 0 : match protocol_version {
2228 : PagestreamProtocolVersion::V2 => {
2229 0 : match self {
2230 0 : Self::Exists(resp) => {
2231 0 : bytes.put_u8(Tag::Exists as u8);
2232 0 : bytes.put_u8(resp.exists as u8);
2233 0 : }
2234 :
2235 0 : Self::Nblocks(resp) => {
2236 0 : bytes.put_u8(Tag::Nblocks as u8);
2237 0 : bytes.put_u32(resp.n_blocks);
2238 0 : }
2239 :
2240 0 : Self::GetPage(resp) => {
2241 0 : bytes.put_u8(Tag::GetPage as u8);
2242 0 : bytes.put(&resp.page[..])
2243 : }
2244 :
2245 0 : Self::Error(resp) => {
2246 0 : bytes.put_u8(Tag::Error as u8);
2247 0 : bytes.put(resp.message.as_bytes());
2248 0 : bytes.put_u8(0); // null terminator
2249 0 : }
2250 0 : Self::DbSize(resp) => {
2251 0 : bytes.put_u8(Tag::DbSize as u8);
2252 0 : bytes.put_i64(resp.db_size);
2253 0 : }
2254 :
2255 0 : Self::GetSlruSegment(resp) => {
2256 0 : bytes.put_u8(Tag::GetSlruSegment as u8);
2257 0 : bytes.put_u32((resp.segment.len() / BLCKSZ as usize) as u32);
2258 0 : bytes.put(&resp.segment[..]);
2259 0 : }
2260 :
2261 : #[cfg(feature = "testing")]
2262 0 : Self::Test(resp) => {
2263 0 : bytes.put_u8(Tag::Test as u8);
2264 0 : bytes.put_u64(resp.req.batch_key);
2265 0 : let message = resp.req.message.as_bytes();
2266 0 : bytes.put_u64(message.len() as u64);
2267 0 : bytes.put_slice(message);
2268 0 : }
2269 : }
2270 : }
2271 : PagestreamProtocolVersion::V3 => {
2272 0 : match self {
2273 0 : Self::Exists(resp) => {
2274 0 : bytes.put_u8(Tag::Exists as u8);
2275 0 : bytes.put_u64(resp.req.hdr.reqid);
2276 0 : bytes.put_u64(resp.req.hdr.request_lsn.0);
2277 0 : bytes.put_u64(resp.req.hdr.not_modified_since.0);
2278 0 : bytes.put_u32(resp.req.rel.spcnode);
2279 0 : bytes.put_u32(resp.req.rel.dbnode);
2280 0 : bytes.put_u32(resp.req.rel.relnode);
2281 0 : bytes.put_u8(resp.req.rel.forknum);
2282 0 : bytes.put_u8(resp.exists as u8);
2283 0 : }
2284 :
2285 0 : Self::Nblocks(resp) => {
2286 0 : bytes.put_u8(Tag::Nblocks as u8);
2287 0 : bytes.put_u64(resp.req.hdr.reqid);
2288 0 : bytes.put_u64(resp.req.hdr.request_lsn.0);
2289 0 : bytes.put_u64(resp.req.hdr.not_modified_since.0);
2290 0 : bytes.put_u32(resp.req.rel.spcnode);
2291 0 : bytes.put_u32(resp.req.rel.dbnode);
2292 0 : bytes.put_u32(resp.req.rel.relnode);
2293 0 : bytes.put_u8(resp.req.rel.forknum);
2294 0 : bytes.put_u32(resp.n_blocks);
2295 0 : }
2296 :
2297 0 : Self::GetPage(resp) => {
2298 0 : bytes.put_u8(Tag::GetPage as u8);
2299 0 : bytes.put_u64(resp.req.hdr.reqid);
2300 0 : bytes.put_u64(resp.req.hdr.request_lsn.0);
2301 0 : bytes.put_u64(resp.req.hdr.not_modified_since.0);
2302 0 : bytes.put_u32(resp.req.rel.spcnode);
2303 0 : bytes.put_u32(resp.req.rel.dbnode);
2304 0 : bytes.put_u32(resp.req.rel.relnode);
2305 0 : bytes.put_u8(resp.req.rel.forknum);
2306 0 : bytes.put_u32(resp.req.blkno);
2307 0 : bytes.put(&resp.page[..])
2308 : }
2309 :
2310 0 : Self::Error(resp) => {
2311 0 : bytes.put_u8(Tag::Error as u8);
2312 0 : bytes.put_u64(resp.req.reqid);
2313 0 : bytes.put_u64(resp.req.request_lsn.0);
2314 0 : bytes.put_u64(resp.req.not_modified_since.0);
2315 0 : bytes.put(resp.message.as_bytes());
2316 0 : bytes.put_u8(0); // null terminator
2317 0 : }
2318 0 : Self::DbSize(resp) => {
2319 0 : bytes.put_u8(Tag::DbSize as u8);
2320 0 : bytes.put_u64(resp.req.hdr.reqid);
2321 0 : bytes.put_u64(resp.req.hdr.request_lsn.0);
2322 0 : bytes.put_u64(resp.req.hdr.not_modified_since.0);
2323 0 : bytes.put_u32(resp.req.dbnode);
2324 0 : bytes.put_i64(resp.db_size);
2325 0 : }
2326 :
2327 0 : Self::GetSlruSegment(resp) => {
2328 0 : bytes.put_u8(Tag::GetSlruSegment as u8);
2329 0 : bytes.put_u64(resp.req.hdr.reqid);
2330 0 : bytes.put_u64(resp.req.hdr.request_lsn.0);
2331 0 : bytes.put_u64(resp.req.hdr.not_modified_since.0);
2332 0 : bytes.put_u8(resp.req.kind);
2333 0 : bytes.put_u32(resp.req.segno);
2334 0 : bytes.put_u32((resp.segment.len() / BLCKSZ as usize) as u32);
2335 0 : bytes.put(&resp.segment[..]);
2336 0 : }
2337 :
2338 : #[cfg(feature = "testing")]
2339 0 : Self::Test(resp) => {
2340 0 : bytes.put_u8(Tag::Test as u8);
2341 0 : bytes.put_u64(resp.req.hdr.reqid);
2342 0 : bytes.put_u64(resp.req.hdr.request_lsn.0);
2343 0 : bytes.put_u64(resp.req.hdr.not_modified_since.0);
2344 0 : bytes.put_u64(resp.req.batch_key);
2345 0 : let message = resp.req.message.as_bytes();
2346 0 : bytes.put_u64(message.len() as u64);
2347 0 : bytes.put_slice(message);
2348 0 : }
2349 : }
2350 : }
2351 : }
2352 0 : bytes.into()
2353 0 : }
2354 :
2355 0 : pub fn deserialize(buf: Bytes) -> anyhow::Result<Self> {
2356 0 : let mut buf = buf.reader();
2357 0 : let msg_tag = buf.read_u8()?;
2358 :
2359 : use PagestreamBeMessageTag as Tag;
2360 0 : let ok =
2361 0 : match Tag::try_from(msg_tag).map_err(|tag: u8| anyhow::anyhow!("invalid tag {tag}"))? {
2362 : Tag::Exists => {
2363 0 : let reqid = buf.read_u64::<BigEndian>()?;
2364 0 : let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
2365 0 : let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
2366 0 : let rel = RelTag {
2367 0 : spcnode: buf.read_u32::<BigEndian>()?,
2368 0 : dbnode: buf.read_u32::<BigEndian>()?,
2369 0 : relnode: buf.read_u32::<BigEndian>()?,
2370 0 : forknum: buf.read_u8()?,
2371 : };
2372 0 : let exists = buf.read_u8()? != 0;
2373 0 : Self::Exists(PagestreamExistsResponse {
2374 0 : req: PagestreamExistsRequest {
2375 0 : hdr: PagestreamRequest {
2376 0 : reqid,
2377 0 : request_lsn,
2378 0 : not_modified_since,
2379 0 : },
2380 0 : rel,
2381 0 : },
2382 0 : exists,
2383 0 : })
2384 : }
2385 : Tag::Nblocks => {
2386 0 : let reqid = buf.read_u64::<BigEndian>()?;
2387 0 : let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
2388 0 : let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
2389 0 : let rel = RelTag {
2390 0 : spcnode: buf.read_u32::<BigEndian>()?,
2391 0 : dbnode: buf.read_u32::<BigEndian>()?,
2392 0 : relnode: buf.read_u32::<BigEndian>()?,
2393 0 : forknum: buf.read_u8()?,
2394 : };
2395 0 : let n_blocks = buf.read_u32::<BigEndian>()?;
2396 0 : Self::Nblocks(PagestreamNblocksResponse {
2397 0 : req: PagestreamNblocksRequest {
2398 0 : hdr: PagestreamRequest {
2399 0 : reqid,
2400 0 : request_lsn,
2401 0 : not_modified_since,
2402 0 : },
2403 0 : rel,
2404 0 : },
2405 0 : n_blocks,
2406 0 : })
2407 : }
2408 : Tag::GetPage => {
2409 0 : let reqid = buf.read_u64::<BigEndian>()?;
2410 0 : let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
2411 0 : let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
2412 0 : let rel = RelTag {
2413 0 : spcnode: buf.read_u32::<BigEndian>()?,
2414 0 : dbnode: buf.read_u32::<BigEndian>()?,
2415 0 : relnode: buf.read_u32::<BigEndian>()?,
2416 0 : forknum: buf.read_u8()?,
2417 : };
2418 0 : let blkno = buf.read_u32::<BigEndian>()?;
2419 0 : let mut page = vec![0; 8192]; // TODO: use MaybeUninit
2420 0 : buf.read_exact(&mut page)?;
2421 0 : Self::GetPage(PagestreamGetPageResponse {
2422 0 : req: PagestreamGetPageRequest {
2423 0 : hdr: PagestreamRequest {
2424 0 : reqid,
2425 0 : request_lsn,
2426 0 : not_modified_since,
2427 0 : },
2428 0 : rel,
2429 0 : blkno,
2430 0 : },
2431 0 : page: page.into(),
2432 0 : })
2433 : }
2434 : Tag::Error => {
2435 0 : let reqid = buf.read_u64::<BigEndian>()?;
2436 0 : let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
2437 0 : let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
2438 0 : let mut msg = Vec::new();
2439 0 : buf.read_until(0, &mut msg)?;
2440 0 : let cstring = std::ffi::CString::from_vec_with_nul(msg)?;
2441 0 : let rust_str = cstring.to_str()?;
2442 0 : Self::Error(PagestreamErrorResponse {
2443 0 : req: PagestreamRequest {
2444 0 : reqid,
2445 0 : request_lsn,
2446 0 : not_modified_since,
2447 0 : },
2448 0 : message: rust_str.to_owned(),
2449 0 : })
2450 : }
2451 : Tag::DbSize => {
2452 0 : let reqid = buf.read_u64::<BigEndian>()?;
2453 0 : let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
2454 0 : let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
2455 0 : let dbnode = buf.read_u32::<BigEndian>()?;
2456 0 : let db_size = buf.read_i64::<BigEndian>()?;
2457 0 : Self::DbSize(PagestreamDbSizeResponse {
2458 0 : req: PagestreamDbSizeRequest {
2459 0 : hdr: PagestreamRequest {
2460 0 : reqid,
2461 0 : request_lsn,
2462 0 : not_modified_since,
2463 0 : },
2464 0 : dbnode,
2465 0 : },
2466 0 : db_size,
2467 0 : })
2468 : }
2469 : Tag::GetSlruSegment => {
2470 0 : let reqid = buf.read_u64::<BigEndian>()?;
2471 0 : let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
2472 0 : let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
2473 0 : let kind = buf.read_u8()?;
2474 0 : let segno = buf.read_u32::<BigEndian>()?;
2475 0 : let n_blocks = buf.read_u32::<BigEndian>()?;
2476 0 : let mut segment = vec![0; n_blocks as usize * BLCKSZ as usize];
2477 0 : buf.read_exact(&mut segment)?;
2478 0 : Self::GetSlruSegment(PagestreamGetSlruSegmentResponse {
2479 0 : req: PagestreamGetSlruSegmentRequest {
2480 0 : hdr: PagestreamRequest {
2481 0 : reqid,
2482 0 : request_lsn,
2483 0 : not_modified_since,
2484 0 : },
2485 0 : kind,
2486 0 : segno,
2487 0 : },
2488 0 : segment: segment.into(),
2489 0 : })
2490 : }
2491 : #[cfg(feature = "testing")]
2492 : Tag::Test => {
2493 0 : let reqid = buf.read_u64::<BigEndian>()?;
2494 0 : let request_lsn = Lsn(buf.read_u64::<BigEndian>()?);
2495 0 : let not_modified_since = Lsn(buf.read_u64::<BigEndian>()?);
2496 0 : let batch_key = buf.read_u64::<BigEndian>()?;
2497 0 : let len = buf.read_u64::<BigEndian>()?;
2498 0 : let mut msg = vec![0; len as usize];
2499 0 : buf.read_exact(&mut msg)?;
2500 0 : let message = String::from_utf8(msg)?;
2501 0 : Self::Test(PagestreamTestResponse {
2502 0 : req: PagestreamTestRequest {
2503 0 : hdr: PagestreamRequest {
2504 0 : reqid,
2505 0 : request_lsn,
2506 0 : not_modified_since,
2507 0 : },
2508 0 : batch_key,
2509 0 : message,
2510 0 : },
2511 0 : })
2512 : }
2513 : };
2514 0 : let remaining = buf.into_inner();
2515 0 : if !remaining.is_empty() {
2516 0 : anyhow::bail!(
2517 0 : "remaining bytes in msg with tag={msg_tag}: {}",
2518 0 : remaining.len()
2519 0 : );
2520 0 : }
2521 0 : Ok(ok)
2522 0 : }
2523 :
2524 0 : pub fn kind(&self) -> &'static str {
2525 0 : match self {
2526 0 : Self::Exists(_) => "Exists",
2527 0 : Self::Nblocks(_) => "Nblocks",
2528 0 : Self::GetPage(_) => "GetPage",
2529 0 : Self::Error(_) => "Error",
2530 0 : Self::DbSize(_) => "DbSize",
2531 0 : Self::GetSlruSegment(_) => "GetSlruSegment",
2532 : #[cfg(feature = "testing")]
2533 0 : Self::Test(_) => "Test",
2534 : }
2535 0 : }
2536 : }
2537 :
2538 0 : #[derive(Debug, Serialize, Deserialize)]
2539 : pub struct PageTraceEvent {
2540 : pub key: CompactKey,
2541 : pub effective_lsn: Lsn,
2542 : pub time: SystemTime,
2543 : }
2544 :
2545 : impl Default for PageTraceEvent {
2546 0 : fn default() -> Self {
2547 0 : Self {
2548 0 : key: Default::default(),
2549 0 : effective_lsn: Default::default(),
2550 0 : time: std::time::UNIX_EPOCH,
2551 0 : }
2552 0 : }
2553 : }
2554 :
2555 : #[cfg(test)]
2556 : mod tests {
2557 : use std::str::FromStr;
2558 :
2559 : use serde_json::json;
2560 :
2561 : use super::*;
2562 :
2563 : #[test]
2564 1 : fn test_pagestream() {
2565 1 : // Test serialization/deserialization of PagestreamFeMessage
2566 1 : let messages = vec![
2567 1 : PagestreamFeMessage::Exists(PagestreamExistsRequest {
2568 1 : hdr: PagestreamRequest {
2569 1 : reqid: 0,
2570 1 : request_lsn: Lsn(4),
2571 1 : not_modified_since: Lsn(3),
2572 1 : },
2573 1 : rel: RelTag {
2574 1 : forknum: 1,
2575 1 : spcnode: 2,
2576 1 : dbnode: 3,
2577 1 : relnode: 4,
2578 1 : },
2579 1 : }),
2580 1 : PagestreamFeMessage::Nblocks(PagestreamNblocksRequest {
2581 1 : hdr: PagestreamRequest {
2582 1 : reqid: 0,
2583 1 : request_lsn: Lsn(4),
2584 1 : not_modified_since: Lsn(4),
2585 1 : },
2586 1 : rel: RelTag {
2587 1 : forknum: 1,
2588 1 : spcnode: 2,
2589 1 : dbnode: 3,
2590 1 : relnode: 4,
2591 1 : },
2592 1 : }),
2593 1 : PagestreamFeMessage::GetPage(PagestreamGetPageRequest {
2594 1 : hdr: PagestreamRequest {
2595 1 : reqid: 0,
2596 1 : request_lsn: Lsn(4),
2597 1 : not_modified_since: Lsn(3),
2598 1 : },
2599 1 : rel: RelTag {
2600 1 : forknum: 1,
2601 1 : spcnode: 2,
2602 1 : dbnode: 3,
2603 1 : relnode: 4,
2604 1 : },
2605 1 : blkno: 7,
2606 1 : }),
2607 1 : PagestreamFeMessage::DbSize(PagestreamDbSizeRequest {
2608 1 : hdr: PagestreamRequest {
2609 1 : reqid: 0,
2610 1 : request_lsn: Lsn(4),
2611 1 : not_modified_since: Lsn(3),
2612 1 : },
2613 1 : dbnode: 7,
2614 1 : }),
2615 1 : ];
2616 5 : for msg in messages {
2617 4 : let bytes = msg.serialize();
2618 4 : let reconstructed =
2619 4 : PagestreamFeMessage::parse(&mut bytes.reader(), PagestreamProtocolVersion::V3)
2620 4 : .unwrap();
2621 4 : assert!(msg == reconstructed);
2622 : }
2623 1 : }
2624 :
2625 : #[test]
2626 1 : fn test_tenantinfo_serde() {
2627 1 : // Test serialization/deserialization of TenantInfo
2628 1 : let original_active = TenantInfo {
2629 1 : id: TenantShardId::unsharded(TenantId::generate()),
2630 1 : state: TenantState::Active,
2631 1 : current_physical_size: Some(42),
2632 1 : attachment_status: TenantAttachmentStatus::Attached,
2633 1 : generation: 1,
2634 1 : gc_blocking: None,
2635 1 : };
2636 1 : let expected_active = json!({
2637 1 : "id": original_active.id.to_string(),
2638 1 : "state": {
2639 1 : "slug": "Active",
2640 1 : },
2641 1 : "current_physical_size": 42,
2642 1 : "attachment_status": {
2643 1 : "slug":"attached",
2644 1 : },
2645 1 : "generation" : 1
2646 1 : });
2647 1 :
2648 1 : let original_broken = TenantInfo {
2649 1 : id: TenantShardId::unsharded(TenantId::generate()),
2650 1 : state: TenantState::Broken {
2651 1 : reason: "reason".into(),
2652 1 : backtrace: "backtrace info".into(),
2653 1 : },
2654 1 : current_physical_size: Some(42),
2655 1 : attachment_status: TenantAttachmentStatus::Attached,
2656 1 : generation: 1,
2657 1 : gc_blocking: None,
2658 1 : };
2659 1 : let expected_broken = json!({
2660 1 : "id": original_broken.id.to_string(),
2661 1 : "state": {
2662 1 : "slug": "Broken",
2663 1 : "data": {
2664 1 : "backtrace": "backtrace info",
2665 1 : "reason": "reason",
2666 1 : }
2667 1 : },
2668 1 : "current_physical_size": 42,
2669 1 : "attachment_status": {
2670 1 : "slug":"attached",
2671 1 : },
2672 1 : "generation" : 1
2673 1 : });
2674 1 :
2675 1 : assert_eq!(
2676 1 : serde_json::to_value(&original_active).unwrap(),
2677 1 : expected_active
2678 1 : );
2679 :
2680 1 : assert_eq!(
2681 1 : serde_json::to_value(&original_broken).unwrap(),
2682 1 : expected_broken
2683 1 : );
2684 1 : assert!(format!("{:?}", &original_broken.state).contains("reason"));
2685 1 : assert!(format!("{:?}", &original_broken.state).contains("backtrace info"));
2686 1 : }
2687 :
2688 : #[test]
2689 1 : fn test_reject_unknown_field() {
2690 1 : let id = TenantId::generate();
2691 1 : let config_request = json!({
2692 1 : "tenant_id": id.to_string(),
2693 1 : "unknown_field": "unknown_value".to_string(),
2694 1 : });
2695 1 : let err = serde_json::from_value::<TenantConfigRequest>(config_request).unwrap_err();
2696 1 : assert!(
2697 1 : err.to_string().contains("unknown field `unknown_field`"),
2698 0 : "expect unknown field `unknown_field` error, got: {}",
2699 : err
2700 : );
2701 1 : }
2702 :
2703 : #[test]
2704 1 : fn tenantstatus_activating_serde() {
2705 1 : let states = [TenantState::Activating(ActivatingFrom::Attaching)];
2706 1 : let expected = "[{\"slug\":\"Activating\",\"data\":\"Attaching\"}]";
2707 1 :
2708 1 : let actual = serde_json::to_string(&states).unwrap();
2709 1 :
2710 1 : assert_eq!(actual, expected);
2711 :
2712 1 : let parsed = serde_json::from_str::<Vec<TenantState>>(&actual).unwrap();
2713 1 :
2714 1 : assert_eq!(states.as_slice(), &parsed);
2715 1 : }
2716 :
2717 : #[test]
2718 1 : fn tenantstatus_activating_strum() {
2719 1 : // tests added, because we use these for metrics
2720 1 : let examples = [
2721 1 : (line!(), TenantState::Attaching, "Attaching"),
2722 1 : (
2723 1 : line!(),
2724 1 : TenantState::Activating(ActivatingFrom::Attaching),
2725 1 : "Activating",
2726 1 : ),
2727 1 : (line!(), TenantState::Active, "Active"),
2728 1 : (
2729 1 : line!(),
2730 1 : TenantState::Stopping {
2731 1 : progress: utils::completion::Barrier::default(),
2732 1 : },
2733 1 : "Stopping",
2734 1 : ),
2735 1 : (
2736 1 : line!(),
2737 1 : TenantState::Broken {
2738 1 : reason: "Example".into(),
2739 1 : backtrace: "Looooong backtrace".into(),
2740 1 : },
2741 1 : "Broken",
2742 1 : ),
2743 1 : ];
2744 :
2745 6 : for (line, rendered, expected) in examples {
2746 5 : let actual: &'static str = rendered.into();
2747 5 : assert_eq!(actual, expected, "example on {line}");
2748 : }
2749 1 : }
2750 :
2751 : #[test]
2752 1 : fn test_image_compression_algorithm_parsing() {
2753 : use ImageCompressionAlgorithm::*;
2754 1 : let cases = [
2755 1 : ("disabled", Disabled),
2756 1 : ("zstd", Zstd { level: None }),
2757 1 : ("zstd(18)", Zstd { level: Some(18) }),
2758 1 : ("zstd(-3)", Zstd { level: Some(-3) }),
2759 1 : ];
2760 :
2761 5 : for (display, expected) in cases {
2762 4 : assert_eq!(
2763 4 : ImageCompressionAlgorithm::from_str(display).unwrap(),
2764 : expected,
2765 0 : "parsing works"
2766 : );
2767 4 : assert_eq!(format!("{expected}"), display, "Display FromStr roundtrip");
2768 :
2769 4 : let ser = serde_json::to_string(&expected).expect("serialization");
2770 4 : assert_eq!(
2771 4 : serde_json::from_str::<ImageCompressionAlgorithm>(&ser).unwrap(),
2772 : expected,
2773 0 : "serde roundtrip"
2774 : );
2775 :
2776 4 : assert_eq!(
2777 4 : serde_json::Value::String(display.to_string()),
2778 4 : serde_json::to_value(expected).unwrap(),
2779 0 : "Display is the serde serialization"
2780 : );
2781 : }
2782 1 : }
2783 :
2784 : #[test]
2785 1 : fn test_tenant_config_patch_request_serde() {
2786 1 : let patch_request = TenantConfigPatchRequest {
2787 1 : tenant_id: TenantId::from_str("17c6d121946a61e5ab0fe5a2fd4d8215").unwrap(),
2788 1 : config: TenantConfigPatch {
2789 1 : checkpoint_distance: FieldPatch::Upsert(42),
2790 1 : gc_horizon: FieldPatch::Remove,
2791 1 : compaction_threshold: FieldPatch::Noop,
2792 1 : ..TenantConfigPatch::default()
2793 1 : },
2794 1 : };
2795 1 :
2796 1 : let json = serde_json::to_string(&patch_request).unwrap();
2797 1 :
2798 1 : let expected = r#"{"tenant_id":"17c6d121946a61e5ab0fe5a2fd4d8215","checkpoint_distance":42,"gc_horizon":null}"#;
2799 1 : assert_eq!(json, expected);
2800 :
2801 1 : let decoded: TenantConfigPatchRequest = serde_json::from_str(&json).unwrap();
2802 1 : assert_eq!(decoded.tenant_id, patch_request.tenant_id);
2803 1 : assert_eq!(decoded.config, patch_request.config);
2804 :
2805 : // Now apply the patch to a config to demonstrate semantics
2806 :
2807 1 : let base = TenantConfig {
2808 1 : checkpoint_distance: Some(28),
2809 1 : gc_horizon: Some(100),
2810 1 : compaction_target_size: Some(1024),
2811 1 : ..Default::default()
2812 1 : };
2813 1 :
2814 1 : let expected = TenantConfig {
2815 1 : checkpoint_distance: Some(42),
2816 1 : gc_horizon: None,
2817 1 : ..base.clone()
2818 1 : };
2819 1 :
2820 1 : let patched = base.apply_patch(decoded.config).unwrap();
2821 1 :
2822 1 : assert_eq!(patched, expected);
2823 1 : }
2824 : }
|