Line data Source code
1 : //! Common traits and structs for layers
2 :
3 : pub mod delta_layer;
4 : mod filename;
5 : pub mod image_layer;
6 : mod inmemory_layer;
7 : pub(crate) mod layer;
8 : mod layer_desc;
9 :
10 : use crate::context::{AccessStatsBehavior, RequestContext};
11 : use crate::repository::Value;
12 : use crate::task_mgr::TaskKind;
13 : use crate::walrecord::NeonWalRecord;
14 : use bytes::Bytes;
15 : use enum_map::EnumMap;
16 : use enumset::EnumSet;
17 : use once_cell::sync::Lazy;
18 : use pageserver_api::key::Key;
19 : use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum};
20 : use pageserver_api::models::{
21 : LayerAccessKind, LayerResidenceEvent, LayerResidenceEventReason, LayerResidenceStatus,
22 : };
23 : use std::cmp::{Ordering, Reverse};
24 : use std::collections::hash_map::Entry;
25 : use std::collections::{BinaryHeap, HashMap};
26 : use std::ops::Range;
27 : use std::sync::Mutex;
28 : use std::time::{Duration, SystemTime, UNIX_EPOCH};
29 : use tracing::warn;
30 : use utils::history_buffer::HistoryBufferWithDropCounter;
31 : use utils::rate_limit::RateLimit;
32 :
33 : use utils::{id::TimelineId, lsn::Lsn};
34 :
35 : pub use delta_layer::{DeltaLayer, DeltaLayerWriter, ValueRef};
36 : pub use filename::{DeltaFileName, ImageFileName, LayerFileName};
37 : pub use image_layer::{ImageLayer, ImageLayerWriter};
38 : pub use inmemory_layer::InMemoryLayer;
39 : pub use layer_desc::{PersistentLayerDesc, PersistentLayerKey};
40 :
41 : pub(crate) use layer::{EvictionError, Layer, ResidentLayer};
42 :
43 : use super::layer_map::InMemoryLayerHandle;
44 : use super::timeline::layer_manager::LayerManager;
45 : use super::timeline::GetVectoredError;
46 : use super::PageReconstructError;
47 :
48 0 : pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool
49 0 : where
50 0 : T: PartialOrd<T>,
51 0 : {
52 0 : if a.start < b.start {
53 0 : a.end > b.start
54 : } else {
55 0 : b.end > a.start
56 : }
57 0 : }
58 :
59 : /// Struct used to communicate across calls to 'get_value_reconstruct_data'.
60 : ///
61 : /// Before first call, you can fill in 'page_img' if you have an older cached
62 : /// version of the page available. That can save work in
63 : /// 'get_value_reconstruct_data', as it can stop searching for page versions
64 : /// when all the WAL records going back to the cached image have been collected.
65 : ///
66 : /// When get_value_reconstruct_data returns Complete, 'img' is set to an image
67 : /// of the page, or the oldest WAL record in 'records' is a will_init-type
68 : /// record that initializes the page without requiring a previous image.
69 : ///
70 : /// If 'get_page_reconstruct_data' returns Continue, some 'records' may have
71 : /// been collected, but there are more records outside the current layer. Pass
72 : /// the same ValueReconstructState struct in the next 'get_value_reconstruct_data'
73 : /// call, to collect more records.
74 : ///
75 0 : #[derive(Debug)]
76 : pub struct ValueReconstructState {
77 : pub records: Vec<(Lsn, NeonWalRecord)>,
78 : pub img: Option<(Lsn, Bytes)>,
79 : }
80 :
81 320 : #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
82 : pub(crate) enum ValueReconstructSituation {
83 : Complete,
84 : #[default]
85 : Continue,
86 : }
87 :
88 : /// Reconstruct data accumulated for a single key during a vectored get
89 320 : #[derive(Debug, Default, Clone)]
90 : pub(crate) struct VectoredValueReconstructState {
91 : pub(crate) records: Vec<(Lsn, NeonWalRecord)>,
92 : pub(crate) img: Option<(Lsn, Bytes)>,
93 :
94 : situation: ValueReconstructSituation,
95 : }
96 :
97 : impl VectoredValueReconstructState {
98 0 : fn get_cached_lsn(&self) -> Option<Lsn> {
99 0 : self.img.as_ref().map(|img| img.0)
100 0 : }
101 : }
102 :
103 : impl From<VectoredValueReconstructState> for ValueReconstructState {
104 320 : fn from(mut state: VectoredValueReconstructState) -> Self {
105 320 : // walredo expects the records to be descending in terms of Lsn
106 320 : state.records.sort_by_key(|(lsn, _)| Reverse(*lsn));
107 320 :
108 320 : ValueReconstructState {
109 320 : records: state.records,
110 320 : img: state.img,
111 320 : }
112 320 : }
113 : }
114 :
115 : /// Bag of data accumulated during a vectored get
116 : pub(crate) struct ValuesReconstructState {
117 : pub(crate) keys: HashMap<Key, Result<VectoredValueReconstructState, PageReconstructError>>,
118 :
119 : keys_done: KeySpaceRandomAccum,
120 : }
121 :
122 : impl ValuesReconstructState {
123 10 : pub(crate) fn new() -> Self {
124 10 : Self {
125 10 : keys: HashMap::new(),
126 10 : keys_done: KeySpaceRandomAccum::new(),
127 10 : }
128 10 : }
129 :
130 : /// Associate a key with the error which it encountered and mark it as done
131 0 : pub(crate) fn on_key_error(&mut self, key: Key, err: PageReconstructError) {
132 0 : let previous = self.keys.insert(key, Err(err));
133 0 : if let Some(Ok(state)) = previous {
134 0 : if state.situation == ValueReconstructSituation::Continue {
135 0 : self.keys_done.add_key(key);
136 0 : }
137 0 : }
138 0 : }
139 :
140 : /// Update the state collected for a given key.
141 : /// Returns true if this was the last value needed for the key and false otherwise.
142 : ///
143 : /// If the key is done after the update, mark it as such.
144 320 : pub(crate) fn update_key(
145 320 : &mut self,
146 320 : key: &Key,
147 320 : lsn: Lsn,
148 320 : value: Value,
149 320 : ) -> ValueReconstructSituation {
150 320 : let state = self
151 320 : .keys
152 320 : .entry(*key)
153 320 : .or_insert(Ok(VectoredValueReconstructState::default()));
154 :
155 320 : if let Ok(state) = state {
156 320 : let key_done = match state.situation {
157 0 : ValueReconstructSituation::Complete => unreachable!(),
158 320 : ValueReconstructSituation::Continue => match value {
159 320 : Value::Image(img) => {
160 320 : state.img = Some((lsn, img));
161 320 : true
162 : }
163 0 : Value::WalRecord(rec) => {
164 0 : let reached_cache =
165 0 : state.get_cached_lsn().map(|clsn| clsn + 1) == Some(lsn);
166 0 : let will_init = rec.will_init();
167 0 : state.records.push((lsn, rec));
168 0 : will_init || reached_cache
169 : }
170 : },
171 : };
172 :
173 320 : if key_done && state.situation == ValueReconstructSituation::Continue {
174 320 : state.situation = ValueReconstructSituation::Complete;
175 320 : self.keys_done.add_key(*key);
176 320 : }
177 :
178 320 : state.situation
179 : } else {
180 0 : ValueReconstructSituation::Complete
181 : }
182 320 : }
183 :
184 : /// Returns the Lsn at which this key is cached if one exists.
185 : /// The read path should go no further than this Lsn for the given key.
186 330 : pub(crate) fn get_cached_lsn(&self, key: &Key) -> Option<Lsn> {
187 330 : self.keys
188 330 : .get(key)
189 330 : .and_then(|k| k.as_ref().ok())
190 330 : .and_then(|state| state.get_cached_lsn())
191 330 : }
192 :
193 : /// Returns the key space describing the keys that have
194 : /// been marked as completed since the last call to this function.
195 20 : pub(crate) fn consume_done_keys(&mut self) -> KeySpace {
196 20 : self.keys_done.consume_keyspace()
197 20 : }
198 : }
199 :
200 : impl Default for ValuesReconstructState {
201 0 : fn default() -> Self {
202 0 : Self::new()
203 0 : }
204 : }
205 :
206 : /// Description of layer to be read - the layer map can turn
207 : /// this description into the actual layer.
208 20 : #[derive(PartialEq, Eq, Hash, Debug, Clone)]
209 : pub(crate) enum ReadableLayerDesc {
210 : Persistent {
211 : desc: PersistentLayerDesc,
212 : lsn_range: Range<Lsn>,
213 : },
214 : InMemory {
215 : handle: InMemoryLayerHandle,
216 : lsn_ceil: Lsn,
217 : },
218 : }
219 :
220 : /// Wraper for 'ReadableLayerDesc' sorted by Lsn
221 0 : #[derive(Debug)]
222 : struct ReadableLayerDescOrdered(ReadableLayerDesc);
223 :
224 : /// Data structure which maintains a fringe of layers for the
225 : /// read path. The fringe is the set of layers which intersects
226 : /// the current keyspace that the search is descending on.
227 : /// Each layer tracks the keyspace that intersects it.
228 : ///
229 : /// The fringe must appear sorted by Lsn. Hence, it uses
230 : /// a two layer indexing scheme.
231 0 : #[derive(Debug)]
232 : pub(crate) struct LayerFringe {
233 : layers_by_lsn: BinaryHeap<ReadableLayerDescOrdered>,
234 : layers: HashMap<ReadableLayerDesc, KeySpace>,
235 : }
236 :
237 : impl LayerFringe {
238 10 : pub(crate) fn new() -> Self {
239 10 : LayerFringe {
240 10 : layers_by_lsn: BinaryHeap::new(),
241 10 : layers: HashMap::new(),
242 10 : }
243 10 : }
244 :
245 20 : pub(crate) fn next_layer(&mut self) -> Option<(ReadableLayerDesc, KeySpace)> {
246 20 : let handle = match self.layers_by_lsn.pop() {
247 10 : Some(h) => h,
248 10 : None => return None,
249 : };
250 :
251 10 : let removed = self.layers.remove_entry(&handle.0);
252 10 : match removed {
253 10 : Some((layer, keyspace)) => Some((layer, keyspace)),
254 0 : None => unreachable!("fringe internals are always consistent"),
255 : }
256 20 : }
257 :
258 10 : pub(crate) fn update(&mut self, layer: ReadableLayerDesc, keyspace: KeySpace) {
259 10 : let entry = self.layers.entry(layer.clone());
260 10 : match entry {
261 0 : Entry::Occupied(mut entry) => {
262 0 : entry.get_mut().merge(&keyspace);
263 0 : }
264 10 : Entry::Vacant(entry) => {
265 10 : self.layers_by_lsn
266 10 : .push(ReadableLayerDescOrdered(entry.key().clone()));
267 10 : entry.insert(keyspace);
268 10 : }
269 : }
270 10 : }
271 : }
272 :
273 : impl Default for LayerFringe {
274 0 : fn default() -> Self {
275 0 : Self::new()
276 0 : }
277 : }
278 :
279 : impl Ord for ReadableLayerDescOrdered {
280 0 : fn cmp(&self, other: &Self) -> Ordering {
281 0 : let ord = self.0.get_lsn_ceil().cmp(&other.0.get_lsn_ceil());
282 0 : if ord == std::cmp::Ordering::Equal {
283 0 : self.0
284 0 : .get_lsn_floor()
285 0 : .cmp(&other.0.get_lsn_floor())
286 0 : .reverse()
287 : } else {
288 0 : ord
289 : }
290 0 : }
291 : }
292 :
293 : impl PartialOrd for ReadableLayerDescOrdered {
294 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
295 0 : Some(self.cmp(other))
296 0 : }
297 : }
298 :
299 : impl PartialEq for ReadableLayerDescOrdered {
300 0 : fn eq(&self, other: &Self) -> bool {
301 0 : self.0.get_lsn_floor() == other.0.get_lsn_floor()
302 0 : && self.0.get_lsn_ceil() == other.0.get_lsn_ceil()
303 0 : }
304 : }
305 :
306 : impl Eq for ReadableLayerDescOrdered {}
307 :
308 : impl ReadableLayerDesc {
309 10 : pub(crate) fn get_lsn_floor(&self) -> Lsn {
310 10 : match self {
311 10 : ReadableLayerDesc::Persistent { lsn_range, .. } => lsn_range.start,
312 0 : ReadableLayerDesc::InMemory { handle, .. } => handle.get_lsn_floor(),
313 : }
314 10 : }
315 :
316 0 : pub(crate) fn get_lsn_ceil(&self) -> Lsn {
317 0 : match self {
318 0 : ReadableLayerDesc::Persistent { lsn_range, .. } => lsn_range.end,
319 0 : ReadableLayerDesc::InMemory { lsn_ceil, .. } => *lsn_ceil,
320 : }
321 0 : }
322 :
323 10 : pub(crate) async fn get_values_reconstruct_data(
324 10 : &self,
325 10 : layer_manager: &LayerManager,
326 10 : keyspace: KeySpace,
327 10 : reconstruct_state: &mut ValuesReconstructState,
328 10 : ctx: &RequestContext,
329 10 : ) -> Result<(), GetVectoredError> {
330 10 : match self {
331 10 : ReadableLayerDesc::Persistent { desc, lsn_range } => {
332 10 : let layer = layer_manager.get_from_desc(desc);
333 10 : layer
334 10 : .get_values_reconstruct_data(
335 10 : keyspace,
336 10 : lsn_range.clone(),
337 10 : reconstruct_state,
338 10 : ctx,
339 10 : )
340 25 : .await
341 : }
342 0 : ReadableLayerDesc::InMemory { handle, lsn_ceil } => {
343 0 : let layer = layer_manager
344 0 : .layer_map()
345 0 : .get_in_memory_layer(handle)
346 0 : .unwrap();
347 0 :
348 0 : layer
349 0 : .get_values_reconstruct_data(keyspace, *lsn_ceil, reconstruct_state, ctx)
350 0 : .await
351 : }
352 : }
353 10 : }
354 : }
355 :
356 : /// Return value from [`Layer::get_value_reconstruct_data`]
357 108 : #[derive(Clone, Copy, Debug)]
358 : pub enum ValueReconstructResult {
359 : /// Got all the data needed to reconstruct the requested page
360 : Complete,
361 : /// This layer didn't contain all the required data, the caller should look up
362 : /// the predecessor layer at the returned LSN and collect more data from there.
363 : Continue,
364 :
365 : /// This layer didn't contain data needed to reconstruct the page version at
366 : /// the returned LSN. This is usually considered an error, but might be OK
367 : /// in some circumstances.
368 : Missing,
369 : }
370 :
371 0 : #[derive(Debug)]
372 : pub struct LayerAccessStats(Mutex<LayerAccessStatsLocked>);
373 :
374 : /// This struct holds two instances of [`LayerAccessStatsInner`].
375 : /// Accesses are recorded to both instances.
376 : /// The `for_scraping_api`instance can be reset from the management API via [`LayerAccessStatsReset`].
377 : /// The `for_eviction_policy` is never reset.
378 578 : #[derive(Debug, Default, Clone)]
379 : struct LayerAccessStatsLocked {
380 : for_scraping_api: LayerAccessStatsInner,
381 : for_eviction_policy: LayerAccessStatsInner,
382 : }
383 :
384 : impl LayerAccessStatsLocked {
385 124924 : fn iter_mut(&mut self) -> impl Iterator<Item = &mut LayerAccessStatsInner> {
386 124924 : [&mut self.for_scraping_api, &mut self.for_eviction_policy].into_iter()
387 124924 : }
388 : }
389 :
390 1156 : #[derive(Debug, Default, Clone)]
391 : struct LayerAccessStatsInner {
392 : first_access: Option<LayerAccessStatFullDetails>,
393 : count_by_access_kind: EnumMap<LayerAccessKind, u64>,
394 : task_kind_flag: EnumSet<TaskKind>,
395 : last_accesses: HistoryBufferWithDropCounter<LayerAccessStatFullDetails, 16>,
396 : last_residence_changes: HistoryBufferWithDropCounter<LayerResidenceEvent, 16>,
397 : }
398 :
399 0 : #[derive(Debug, Clone, Copy)]
400 : pub(crate) struct LayerAccessStatFullDetails {
401 : pub(crate) when: SystemTime,
402 : pub(crate) task_kind: TaskKind,
403 : pub(crate) access_kind: LayerAccessKind,
404 : }
405 :
406 0 : #[derive(Clone, Copy, strum_macros::EnumString)]
407 : pub enum LayerAccessStatsReset {
408 : NoReset,
409 : JustTaskKindFlags,
410 : AllStats,
411 : }
412 :
413 0 : fn system_time_to_millis_since_epoch(ts: &SystemTime) -> u64 {
414 0 : ts.duration_since(UNIX_EPOCH)
415 0 : .expect("better to die in this unlikely case than report false stats")
416 0 : .as_millis()
417 0 : .try_into()
418 0 : .expect("64 bits is enough for few more years")
419 0 : }
420 :
421 : impl LayerAccessStatFullDetails {
422 0 : fn as_api_model(&self) -> pageserver_api::models::LayerAccessStatFullDetails {
423 0 : let Self {
424 0 : when,
425 0 : task_kind,
426 0 : access_kind,
427 0 : } = self;
428 0 : pageserver_api::models::LayerAccessStatFullDetails {
429 0 : when_millis_since_epoch: system_time_to_millis_since_epoch(when),
430 0 : task_kind: task_kind.into(), // into static str, powered by strum_macros
431 0 : access_kind: *access_kind,
432 0 : }
433 0 : }
434 : }
435 :
436 : impl LayerAccessStats {
437 : /// Create an empty stats object.
438 : ///
439 : /// The caller is responsible for recording a residence event
440 : /// using [`record_residence_event`] before calling `latest_activity`.
441 : /// If they don't, [`latest_activity`] will return `None`.
442 : ///
443 : /// [`record_residence_event`]: Self::record_residence_event
444 : /// [`latest_activity`]: Self::latest_activity
445 554 : pub(crate) fn empty_will_record_residence_event_later() -> Self {
446 554 : LayerAccessStats(Mutex::default())
447 554 : }
448 :
449 : /// Create an empty stats object and record a [`LayerLoad`] event with the given residence status.
450 : ///
451 : /// See [`record_residence_event`] for why you need to do this while holding the layer map lock.
452 : ///
453 : /// [`LayerLoad`]: LayerResidenceEventReason::LayerLoad
454 : /// [`record_residence_event`]: Self::record_residence_event
455 24 : pub(crate) fn for_loading_layer(status: LayerResidenceStatus) -> Self {
456 24 : let new = LayerAccessStats(Mutex::new(LayerAccessStatsLocked::default()));
457 24 : new.record_residence_event(status, LayerResidenceEventReason::LayerLoad);
458 24 : new
459 24 : }
460 :
461 : /// Record a change in layer residency.
462 : ///
463 : /// Recording the event must happen while holding the layer map lock to
464 : /// ensure that latest-activity-threshold-based layer eviction (eviction_task.rs)
465 : /// can do an "imitate access" to this layer, before it observes `now-latest_activity() > threshold`.
466 : ///
467 : /// If we instead recorded the residence event with a timestamp from before grabbing the layer map lock,
468 : /// the following race could happen:
469 : ///
470 : /// - Compact: Write out an L1 layer from several L0 layers. This records residence event LayerCreate with the current timestamp.
471 : /// - Eviction: imitate access logical size calculation. This accesses the L0 layers because the L1 layer is not yet in the layer map.
472 : /// - Compact: Grab layer map lock, add the new L1 to layer map and remove the L0s, release layer map lock.
473 : /// - Eviction: observes the new L1 layer whose only activity timestamp is the LayerCreate event.
474 : ///
475 586 : pub(crate) fn record_residence_event(
476 586 : &self,
477 586 : status: LayerResidenceStatus,
478 586 : reason: LayerResidenceEventReason,
479 586 : ) {
480 586 : let mut locked = self.0.lock().unwrap();
481 1172 : locked.iter_mut().for_each(|inner| {
482 1172 : inner
483 1172 : .last_residence_changes
484 1172 : .write(LayerResidenceEvent::new(status, reason))
485 1172 : });
486 586 : }
487 :
488 124338 : fn record_access(&self, access_kind: LayerAccessKind, ctx: &RequestContext) {
489 124338 : if ctx.access_stats_behavior() == AccessStatsBehavior::Skip {
490 0 : return;
491 124338 : }
492 124338 :
493 124338 : let this_access = LayerAccessStatFullDetails {
494 124338 : when: SystemTime::now(),
495 124338 : task_kind: ctx.task_kind(),
496 124338 : access_kind,
497 124338 : };
498 124338 :
499 124338 : let mut locked = self.0.lock().unwrap();
500 248676 : locked.iter_mut().for_each(|inner| {
501 248676 : inner.first_access.get_or_insert(this_access);
502 248676 : inner.count_by_access_kind[access_kind] += 1;
503 248676 : inner.task_kind_flag |= ctx.task_kind();
504 248676 : inner.last_accesses.write(this_access);
505 248676 : })
506 124338 : }
507 :
508 0 : fn as_api_model(
509 0 : &self,
510 0 : reset: LayerAccessStatsReset,
511 0 : ) -> pageserver_api::models::LayerAccessStats {
512 0 : let mut locked = self.0.lock().unwrap();
513 0 : let inner = &mut locked.for_scraping_api;
514 0 : let LayerAccessStatsInner {
515 0 : first_access,
516 0 : count_by_access_kind,
517 0 : task_kind_flag,
518 0 : last_accesses,
519 0 : last_residence_changes,
520 0 : } = inner;
521 0 : let ret = pageserver_api::models::LayerAccessStats {
522 0 : access_count_by_access_kind: count_by_access_kind
523 0 : .iter()
524 0 : .map(|(kind, count)| (kind, *count))
525 0 : .collect(),
526 0 : task_kind_access_flag: task_kind_flag
527 0 : .iter()
528 0 : .map(|task_kind| task_kind.into()) // into static str, powered by strum_macros
529 0 : .collect(),
530 0 : first: first_access.as_ref().map(|a| a.as_api_model()),
531 0 : accesses_history: last_accesses.map(|m| m.as_api_model()),
532 0 : residence_events_history: last_residence_changes.clone(),
533 0 : };
534 0 : match reset {
535 0 : LayerAccessStatsReset::NoReset => (),
536 0 : LayerAccessStatsReset::JustTaskKindFlags => {
537 0 : inner.task_kind_flag.clear();
538 0 : }
539 0 : LayerAccessStatsReset::AllStats => {
540 0 : *inner = LayerAccessStatsInner::default();
541 0 : }
542 : }
543 0 : ret
544 0 : }
545 :
546 : /// Get the latest access timestamp, falling back to latest residence event, further falling
547 : /// back to `SystemTime::now` for a usable timestamp for eviction.
548 0 : pub(crate) fn latest_activity_or_now(&self) -> SystemTime {
549 0 : self.latest_activity().unwrap_or_else(SystemTime::now)
550 0 : }
551 :
552 : /// Get the latest access timestamp, falling back to latest residence event.
553 : ///
554 : /// This function can only return `None` if there has not yet been a call to the
555 : /// [`record_residence_event`] method. That would generally be considered an
556 : /// implementation error. This function logs a rate-limited warning in that case.
557 : ///
558 : /// TODO: use type system to avoid the need for `fallback`.
559 : /// The approach in <https://github.com/neondatabase/neon/pull/3775>
560 : /// could be used to enforce that a residence event is recorded
561 : /// before a layer is added to the layer map. We could also have
562 : /// a layer wrapper type that holds the LayerAccessStats, and ensure
563 : /// that that type can only be produced by inserting into the layer map.
564 : ///
565 : /// [`record_residence_event`]: Self::record_residence_event
566 0 : fn latest_activity(&self) -> Option<SystemTime> {
567 0 : let locked = self.0.lock().unwrap();
568 0 : let inner = &locked.for_eviction_policy;
569 0 : match inner.last_accesses.recent() {
570 0 : Some(a) => Some(a.when),
571 0 : None => match inner.last_residence_changes.recent() {
572 0 : Some(e) => Some(e.timestamp),
573 : None => {
574 : static WARN_RATE_LIMIT: Lazy<Mutex<(usize, RateLimit)>> =
575 0 : Lazy::new(|| Mutex::new((0, RateLimit::new(Duration::from_secs(10)))));
576 0 : let mut guard = WARN_RATE_LIMIT.lock().unwrap();
577 0 : guard.0 += 1;
578 0 : let occurences = guard.0;
579 0 : guard.1.call(move || {
580 0 : warn!(parent: None, occurences, "latest_activity not available, this is an implementation bug, using fallback value");
581 0 : });
582 0 : None
583 : }
584 : },
585 : }
586 0 : }
587 : }
588 :
589 : /// Get a layer descriptor from a layer.
590 : pub trait AsLayerDesc {
591 : /// Get the layer descriptor.
592 : fn layer_desc(&self) -> &PersistentLayerDesc;
593 : }
594 :
595 : pub mod tests {
596 : use pageserver_api::shard::TenantShardId;
597 :
598 : use super::*;
599 :
600 : impl From<DeltaFileName> for PersistentLayerDesc {
601 0 : fn from(value: DeltaFileName) -> Self {
602 0 : PersistentLayerDesc::new_delta(
603 0 : TenantShardId::from([0; 18]),
604 0 : TimelineId::from_array([0; 16]),
605 0 : value.key_range,
606 0 : value.lsn_range,
607 0 : 233,
608 0 : )
609 0 : }
610 : }
611 :
612 : impl From<ImageFileName> for PersistentLayerDesc {
613 0 : fn from(value: ImageFileName) -> Self {
614 0 : PersistentLayerDesc::new_img(
615 0 : TenantShardId::from([0; 18]),
616 0 : TimelineId::from_array([0; 16]),
617 0 : value.key_range,
618 0 : value.lsn,
619 0 : 233,
620 0 : )
621 0 : }
622 : }
623 :
624 : impl From<LayerFileName> for PersistentLayerDesc {
625 0 : fn from(value: LayerFileName) -> Self {
626 0 : match value {
627 0 : LayerFileName::Delta(d) => Self::from(d),
628 0 : LayerFileName::Image(i) => Self::from(i),
629 : }
630 0 : }
631 : }
632 : }
633 :
634 : /// Range wrapping newtype, which uses display to render Debug.
635 : ///
636 : /// Useful with `Key`, which has too verbose `{:?}` for printing multiple layers.
637 : struct RangeDisplayDebug<'a, T: std::fmt::Display>(&'a Range<T>);
638 :
639 : impl<'a, T: std::fmt::Display> std::fmt::Debug for RangeDisplayDebug<'a, T> {
640 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
641 0 : write!(f, "{}..{}", self.0.start, self.0.end)
642 0 : }
643 : }
|