Line data Source code
1 : //! Common traits and structs for layers
2 :
3 : pub mod delta_layer;
4 : mod filename;
5 : pub mod image_layer;
6 : mod inmemory_layer;
7 : pub(crate) mod layer;
8 : mod layer_desc;
9 :
10 : use crate::context::{AccessStatsBehavior, RequestContext};
11 : use crate::repository::Value;
12 : use crate::task_mgr::TaskKind;
13 : use crate::walrecord::NeonWalRecord;
14 : use bytes::Bytes;
15 : use enum_map::EnumMap;
16 : use enumset::EnumSet;
17 : use once_cell::sync::Lazy;
18 : use pageserver_api::key::Key;
19 : use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum};
20 : use pageserver_api::models::{
21 : LayerAccessKind, LayerResidenceEvent, LayerResidenceEventReason, LayerResidenceStatus,
22 : };
23 : use std::cmp::{Ordering, Reverse};
24 : use std::collections::hash_map::Entry;
25 : use std::collections::{BinaryHeap, HashMap};
26 : use std::ops::Range;
27 : use std::sync::Mutex;
28 : use std::time::{Duration, SystemTime, UNIX_EPOCH};
29 : use tracing::warn;
30 : use utils::history_buffer::HistoryBufferWithDropCounter;
31 : use utils::rate_limit::RateLimit;
32 :
33 : use utils::{id::TimelineId, lsn::Lsn};
34 :
35 : pub use delta_layer::{DeltaLayer, DeltaLayerWriter, ValueRef};
36 : pub use filename::{DeltaFileName, ImageFileName, LayerFileName};
37 : pub use image_layer::{ImageLayer, ImageLayerWriter};
38 : pub use inmemory_layer::InMemoryLayer;
39 : pub use layer_desc::{PersistentLayerDesc, PersistentLayerKey};
40 :
41 : pub(crate) use layer::{EvictionError, Layer, ResidentLayer};
42 :
43 : use super::layer_map::InMemoryLayerHandle;
44 : use super::timeline::layer_manager::LayerManager;
45 : use super::timeline::GetVectoredError;
46 : use super::PageReconstructError;
47 :
48 0 : pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool
49 0 : where
50 0 : T: PartialOrd<T>,
51 0 : {
52 0 : if a.start < b.start {
53 0 : a.end > b.start
54 : } else {
55 0 : b.end > a.start
56 : }
57 0 : }
58 :
59 : /// Struct used to communicate across calls to 'get_value_reconstruct_data'.
60 : ///
61 : /// Before first call, you can fill in 'page_img' if you have an older cached
62 : /// version of the page available. That can save work in
63 : /// 'get_value_reconstruct_data', as it can stop searching for page versions
64 : /// when all the WAL records going back to the cached image have been collected.
65 : ///
66 : /// When get_value_reconstruct_data returns Complete, 'img' is set to an image
67 : /// of the page, or the oldest WAL record in 'records' is a will_init-type
68 : /// record that initializes the page without requiring a previous image.
69 : ///
70 : /// If 'get_page_reconstruct_data' returns Continue, some 'records' may have
71 : /// been collected, but there are more records outside the current layer. Pass
72 : /// the same ValueReconstructState struct in the next 'get_value_reconstruct_data'
73 : /// call, to collect more records.
74 : ///
75 0 : #[derive(Debug)]
76 : pub struct ValueReconstructState {
77 : pub records: Vec<(Lsn, NeonWalRecord)>,
78 : pub img: Option<(Lsn, Bytes)>,
79 : }
80 :
81 640 : #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
82 : pub(crate) enum ValueReconstructSituation {
83 : Complete,
84 : #[default]
85 : Continue,
86 : }
87 :
88 : /// Reconstruct data accumulated for a single key during a vectored get
89 320 : #[derive(Debug, Default, Clone)]
90 : pub(crate) struct VectoredValueReconstructState {
91 : pub(crate) records: Vec<(Lsn, NeonWalRecord)>,
92 : pub(crate) img: Option<(Lsn, Bytes)>,
93 :
94 : situation: ValueReconstructSituation,
95 : }
96 :
97 : impl VectoredValueReconstructState {
98 0 : fn get_cached_lsn(&self) -> Option<Lsn> {
99 0 : self.img.as_ref().map(|img| img.0)
100 0 : }
101 : }
102 :
103 : impl From<VectoredValueReconstructState> for ValueReconstructState {
104 320 : fn from(mut state: VectoredValueReconstructState) -> Self {
105 320 : // walredo expects the records to be descending in terms of Lsn
106 320 : state.records.sort_by_key(|(lsn, _)| Reverse(*lsn));
107 320 :
108 320 : ValueReconstructState {
109 320 : records: state.records,
110 320 : img: state.img,
111 320 : }
112 320 : }
113 : }
114 :
115 : /// Bag of data accumulated during a vectored get
116 : pub(crate) struct ValuesReconstructState {
117 : pub(crate) keys: HashMap<Key, Result<VectoredValueReconstructState, PageReconstructError>>,
118 :
119 : keys_done: KeySpaceRandomAccum,
120 : }
121 :
122 : impl ValuesReconstructState {
123 10 : pub(crate) fn new() -> Self {
124 10 : Self {
125 10 : keys: HashMap::new(),
126 10 : keys_done: KeySpaceRandomAccum::new(),
127 10 : }
128 10 : }
129 :
130 : /// Associate a key with the error which it encountered and mark it as done
131 0 : pub(crate) fn on_key_error(&mut self, key: Key, err: PageReconstructError) {
132 0 : let previous = self.keys.insert(key, Err(err));
133 0 : if let Some(Ok(state)) = previous {
134 0 : if state.situation == ValueReconstructSituation::Continue {
135 0 : self.keys_done.add_key(key);
136 0 : }
137 0 : }
138 0 : }
139 :
140 : /// Update the state collected for a given key.
141 : /// Returns true if this was the last value needed for the key and false otherwise.
142 : ///
143 : /// If the key is done after the update, mark it as such.
144 320 : pub(crate) fn update_key(
145 320 : &mut self,
146 320 : key: &Key,
147 320 : lsn: Lsn,
148 320 : value: Value,
149 320 : ) -> ValueReconstructSituation {
150 320 : let state = self
151 320 : .keys
152 320 : .entry(*key)
153 320 : .or_insert(Ok(VectoredValueReconstructState::default()));
154 :
155 320 : if let Ok(state) = state {
156 320 : let key_done = match state.situation {
157 0 : ValueReconstructSituation::Complete => unreachable!(),
158 320 : ValueReconstructSituation::Continue => match value {
159 320 : Value::Image(img) => {
160 320 : state.img = Some((lsn, img));
161 320 : true
162 : }
163 0 : Value::WalRecord(rec) => {
164 0 : let reached_cache =
165 0 : state.get_cached_lsn().map(|clsn| clsn + 1) == Some(lsn);
166 0 : let will_init = rec.will_init();
167 0 : state.records.push((lsn, rec));
168 0 : will_init || reached_cache
169 : }
170 : },
171 : };
172 :
173 320 : if key_done && state.situation == ValueReconstructSituation::Continue {
174 320 : state.situation = ValueReconstructSituation::Complete;
175 320 : self.keys_done.add_key(*key);
176 320 : }
177 :
178 320 : state.situation
179 : } else {
180 0 : ValueReconstructSituation::Complete
181 : }
182 320 : }
183 :
184 : /// Returns the Lsn at which this key is cached if one exists.
185 : /// The read path should go no further than this Lsn for the given key.
186 320 : pub(crate) fn get_cached_lsn(&self, key: &Key) -> Option<Lsn> {
187 320 : self.keys
188 320 : .get(key)
189 320 : .and_then(|k| k.as_ref().ok())
190 320 : .and_then(|state| state.get_cached_lsn())
191 320 : }
192 :
193 : /// Returns the key space describing the keys that have
194 : /// been marked as completed since the last call to this function.
195 20 : pub(crate) fn consume_done_keys(&mut self) -> KeySpace {
196 20 : self.keys_done.consume_keyspace()
197 20 : }
198 : }
199 :
200 : impl Default for ValuesReconstructState {
201 0 : fn default() -> Self {
202 0 : Self::new()
203 0 : }
204 : }
205 :
206 : /// Description of layer to be read - the layer map can turn
207 : /// this description into the actual layer.
208 20 : #[derive(PartialEq, Eq, Hash, Debug, Clone)]
209 : pub(crate) enum ReadableLayerDesc {
210 : Persistent {
211 : desc: PersistentLayerDesc,
212 : lsn_floor: Lsn,
213 : lsn_ceil: Lsn,
214 : },
215 : InMemory {
216 : handle: InMemoryLayerHandle,
217 : lsn_ceil: Lsn,
218 : },
219 : }
220 :
221 : /// Wraper for 'ReadableLayerDesc' sorted by Lsn
222 0 : #[derive(Debug)]
223 : struct ReadableLayerDescOrdered(ReadableLayerDesc);
224 :
225 : /// Data structure which maintains a fringe of layers for the
226 : /// read path. The fringe is the set of layers which intersects
227 : /// the current keyspace that the search is descending on.
228 : /// Each layer tracks the keyspace that intersects it.
229 : ///
230 : /// The fringe must appear sorted by Lsn. Hence, it uses
231 : /// a two layer indexing scheme.
232 0 : #[derive(Debug)]
233 : pub(crate) struct LayerFringe {
234 : layers_by_lsn: BinaryHeap<ReadableLayerDescOrdered>,
235 : layers: HashMap<ReadableLayerDesc, KeySpace>,
236 : }
237 :
238 : impl LayerFringe {
239 10 : pub(crate) fn new() -> Self {
240 10 : LayerFringe {
241 10 : layers_by_lsn: BinaryHeap::new(),
242 10 : layers: HashMap::new(),
243 10 : }
244 10 : }
245 :
246 20 : pub(crate) fn next_layer(&mut self) -> Option<(ReadableLayerDesc, KeySpace)> {
247 20 : let handle = match self.layers_by_lsn.pop() {
248 10 : Some(h) => h,
249 10 : None => return None,
250 : };
251 :
252 10 : let removed = self.layers.remove_entry(&handle.0);
253 10 : match removed {
254 10 : Some((layer, keyspace)) => Some((layer, keyspace)),
255 0 : None => unreachable!("fringe internals are always consistent"),
256 : }
257 20 : }
258 :
259 10 : pub(crate) fn update(&mut self, layer: ReadableLayerDesc, keyspace: KeySpace) {
260 10 : let entry = self.layers.entry(layer.clone());
261 10 : match entry {
262 0 : Entry::Occupied(mut entry) => {
263 0 : entry.get_mut().merge(&keyspace);
264 0 : }
265 10 : Entry::Vacant(entry) => {
266 10 : self.layers_by_lsn
267 10 : .push(ReadableLayerDescOrdered(entry.key().clone()));
268 10 : entry.insert(keyspace);
269 10 : }
270 : }
271 10 : }
272 : }
273 :
274 : impl Default for LayerFringe {
275 0 : fn default() -> Self {
276 0 : Self::new()
277 0 : }
278 : }
279 :
280 : impl Ord for ReadableLayerDescOrdered {
281 0 : fn cmp(&self, other: &Self) -> Ordering {
282 0 : let ord = self.0.get_lsn_ceil().cmp(&other.0.get_lsn_ceil());
283 0 : if ord == std::cmp::Ordering::Equal {
284 0 : self.0
285 0 : .get_lsn_floor()
286 0 : .cmp(&other.0.get_lsn_floor())
287 0 : .reverse()
288 : } else {
289 0 : ord
290 : }
291 0 : }
292 : }
293 :
294 : impl PartialOrd for ReadableLayerDescOrdered {
295 0 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
296 0 : Some(self.cmp(other))
297 0 : }
298 : }
299 :
300 : impl PartialEq for ReadableLayerDescOrdered {
301 0 : fn eq(&self, other: &Self) -> bool {
302 0 : self.0.get_lsn_floor() == other.0.get_lsn_floor()
303 0 : && self.0.get_lsn_ceil() == other.0.get_lsn_ceil()
304 0 : }
305 : }
306 :
307 : impl Eq for ReadableLayerDescOrdered {}
308 :
309 : impl ReadableLayerDesc {
310 10 : pub(crate) fn get_lsn_floor(&self) -> Lsn {
311 10 : match self {
312 10 : ReadableLayerDesc::Persistent { lsn_floor, .. } => *lsn_floor,
313 0 : ReadableLayerDesc::InMemory { handle, .. } => handle.get_lsn_floor(),
314 : }
315 10 : }
316 :
317 0 : pub(crate) fn get_lsn_ceil(&self) -> Lsn {
318 0 : match self {
319 0 : ReadableLayerDesc::Persistent { lsn_ceil, .. } => *lsn_ceil,
320 0 : ReadableLayerDesc::InMemory { lsn_ceil, .. } => *lsn_ceil,
321 : }
322 0 : }
323 :
324 10 : pub(crate) async fn get_values_reconstruct_data(
325 10 : &self,
326 10 : layer_manager: &LayerManager,
327 10 : keyspace: KeySpace,
328 10 : reconstruct_state: &mut ValuesReconstructState,
329 10 : ctx: &RequestContext,
330 10 : ) -> Result<(), GetVectoredError> {
331 10 : match self {
332 10 : ReadableLayerDesc::Persistent { desc, lsn_ceil, .. } => {
333 10 : let layer = layer_manager.get_from_desc(desc);
334 10 : layer
335 10 : .get_values_reconstruct_data(keyspace, *lsn_ceil, reconstruct_state, ctx)
336 28 : .await
337 : }
338 0 : ReadableLayerDesc::InMemory { handle, lsn_ceil } => {
339 0 : let layer = layer_manager
340 0 : .layer_map()
341 0 : .get_in_memory_layer(handle)
342 0 : .unwrap();
343 0 :
344 0 : layer
345 0 : .get_values_reconstruct_data(keyspace, *lsn_ceil, reconstruct_state, ctx)
346 0 : .await
347 : }
348 : }
349 10 : }
350 : }
351 :
352 : /// Return value from [`Layer::get_value_reconstruct_data`]
353 108 : #[derive(Clone, Copy, Debug)]
354 : pub enum ValueReconstructResult {
355 : /// Got all the data needed to reconstruct the requested page
356 : Complete,
357 : /// This layer didn't contain all the required data, the caller should look up
358 : /// the predecessor layer at the returned LSN and collect more data from there.
359 : Continue,
360 :
361 : /// This layer didn't contain data needed to reconstruct the page version at
362 : /// the returned LSN. This is usually considered an error, but might be OK
363 : /// in some circumstances.
364 : Missing,
365 : }
366 :
367 0 : #[derive(Debug)]
368 : pub struct LayerAccessStats(Mutex<LayerAccessStatsLocked>);
369 :
370 : /// This struct holds two instances of [`LayerAccessStatsInner`].
371 : /// Accesses are recorded to both instances.
372 : /// The `for_scraping_api`instance can be reset from the management API via [`LayerAccessStatsReset`].
373 : /// The `for_eviction_policy` is never reset.
374 574 : #[derive(Debug, Default, Clone)]
375 : struct LayerAccessStatsLocked {
376 : for_scraping_api: LayerAccessStatsInner,
377 : for_eviction_policy: LayerAccessStatsInner,
378 : }
379 :
380 : impl LayerAccessStatsLocked {
381 124737 : fn iter_mut(&mut self) -> impl Iterator<Item = &mut LayerAccessStatsInner> {
382 124737 : [&mut self.for_scraping_api, &mut self.for_eviction_policy].into_iter()
383 124737 : }
384 : }
385 :
386 1148 : #[derive(Debug, Default, Clone)]
387 : struct LayerAccessStatsInner {
388 : first_access: Option<LayerAccessStatFullDetails>,
389 : count_by_access_kind: EnumMap<LayerAccessKind, u64>,
390 : task_kind_flag: EnumSet<TaskKind>,
391 : last_accesses: HistoryBufferWithDropCounter<LayerAccessStatFullDetails, 16>,
392 : last_residence_changes: HistoryBufferWithDropCounter<LayerResidenceEvent, 16>,
393 : }
394 :
395 0 : #[derive(Debug, Clone, Copy)]
396 : pub(crate) struct LayerAccessStatFullDetails {
397 : pub(crate) when: SystemTime,
398 : pub(crate) task_kind: TaskKind,
399 : pub(crate) access_kind: LayerAccessKind,
400 : }
401 :
402 0 : #[derive(Clone, Copy, strum_macros::EnumString)]
403 : pub enum LayerAccessStatsReset {
404 : NoReset,
405 : JustTaskKindFlags,
406 : AllStats,
407 : }
408 :
409 0 : fn system_time_to_millis_since_epoch(ts: &SystemTime) -> u64 {
410 0 : ts.duration_since(UNIX_EPOCH)
411 0 : .expect("better to die in this unlikely case than report false stats")
412 0 : .as_millis()
413 0 : .try_into()
414 0 : .expect("64 bits is enough for few more years")
415 0 : }
416 :
417 : impl LayerAccessStatFullDetails {
418 0 : fn as_api_model(&self) -> pageserver_api::models::LayerAccessStatFullDetails {
419 0 : let Self {
420 0 : when,
421 0 : task_kind,
422 0 : access_kind,
423 0 : } = self;
424 0 : pageserver_api::models::LayerAccessStatFullDetails {
425 0 : when_millis_since_epoch: system_time_to_millis_since_epoch(when),
426 0 : task_kind: task_kind.into(), // into static str, powered by strum_macros
427 0 : access_kind: *access_kind,
428 0 : }
429 0 : }
430 : }
431 :
432 : impl LayerAccessStats {
433 : /// Create an empty stats object.
434 : ///
435 : /// The caller is responsible for recording a residence event
436 : /// using [`record_residence_event`] before calling `latest_activity`.
437 : /// If they don't, [`latest_activity`] will return `None`.
438 : ///
439 : /// [`record_residence_event`]: Self::record_residence_event
440 : /// [`latest_activity`]: Self::latest_activity
441 550 : pub(crate) fn empty_will_record_residence_event_later() -> Self {
442 550 : LayerAccessStats(Mutex::default())
443 550 : }
444 :
445 : /// Create an empty stats object and record a [`LayerLoad`] event with the given residence status.
446 : ///
447 : /// See [`record_residence_event`] for why you need to do this while holding the layer map lock.
448 : ///
449 : /// [`LayerLoad`]: LayerResidenceEventReason::LayerLoad
450 : /// [`record_residence_event`]: Self::record_residence_event
451 24 : pub(crate) fn for_loading_layer(status: LayerResidenceStatus) -> Self {
452 24 : let new = LayerAccessStats(Mutex::new(LayerAccessStatsLocked::default()));
453 24 : new.record_residence_event(status, LayerResidenceEventReason::LayerLoad);
454 24 : new
455 24 : }
456 :
457 : /// Record a change in layer residency.
458 : ///
459 : /// Recording the event must happen while holding the layer map lock to
460 : /// ensure that latest-activity-threshold-based layer eviction (eviction_task.rs)
461 : /// can do an "imitate access" to this layer, before it observes `now-latest_activity() > threshold`.
462 : ///
463 : /// If we instead recorded the residence event with a timestamp from before grabbing the layer map lock,
464 : /// the following race could happen:
465 : ///
466 : /// - Compact: Write out an L1 layer from several L0 layers. This records residence event LayerCreate with the current timestamp.
467 : /// - Eviction: imitate access logical size calculation. This accesses the L0 layers because the L1 layer is not yet in the layer map.
468 : /// - Compact: Grab layer map lock, add the new L1 to layer map and remove the L0s, release layer map lock.
469 : /// - Eviction: observes the new L1 layer whose only activity timestamp is the LayerCreate event.
470 : ///
471 576 : pub(crate) fn record_residence_event(
472 576 : &self,
473 576 : status: LayerResidenceStatus,
474 576 : reason: LayerResidenceEventReason,
475 576 : ) {
476 576 : let mut locked = self.0.lock().unwrap();
477 1152 : locked.iter_mut().for_each(|inner| {
478 1152 : inner
479 1152 : .last_residence_changes
480 1152 : .write(LayerResidenceEvent::new(status, reason))
481 1152 : });
482 576 : }
483 :
484 124161 : fn record_access(&self, access_kind: LayerAccessKind, ctx: &RequestContext) {
485 124161 : if ctx.access_stats_behavior() == AccessStatsBehavior::Skip {
486 0 : return;
487 124161 : }
488 124161 :
489 124161 : let this_access = LayerAccessStatFullDetails {
490 124161 : when: SystemTime::now(),
491 124161 : task_kind: ctx.task_kind(),
492 124161 : access_kind,
493 124161 : };
494 124161 :
495 124161 : let mut locked = self.0.lock().unwrap();
496 248322 : locked.iter_mut().for_each(|inner| {
497 248322 : inner.first_access.get_or_insert(this_access);
498 248322 : inner.count_by_access_kind[access_kind] += 1;
499 248322 : inner.task_kind_flag |= ctx.task_kind();
500 248322 : inner.last_accesses.write(this_access);
501 248322 : })
502 124161 : }
503 :
504 0 : fn as_api_model(
505 0 : &self,
506 0 : reset: LayerAccessStatsReset,
507 0 : ) -> pageserver_api::models::LayerAccessStats {
508 0 : let mut locked = self.0.lock().unwrap();
509 0 : let inner = &mut locked.for_scraping_api;
510 0 : let LayerAccessStatsInner {
511 0 : first_access,
512 0 : count_by_access_kind,
513 0 : task_kind_flag,
514 0 : last_accesses,
515 0 : last_residence_changes,
516 0 : } = inner;
517 0 : let ret = pageserver_api::models::LayerAccessStats {
518 0 : access_count_by_access_kind: count_by_access_kind
519 0 : .iter()
520 0 : .map(|(kind, count)| (kind, *count))
521 0 : .collect(),
522 0 : task_kind_access_flag: task_kind_flag
523 0 : .iter()
524 0 : .map(|task_kind| task_kind.into()) // into static str, powered by strum_macros
525 0 : .collect(),
526 0 : first: first_access.as_ref().map(|a| a.as_api_model()),
527 0 : accesses_history: last_accesses.map(|m| m.as_api_model()),
528 0 : residence_events_history: last_residence_changes.clone(),
529 0 : };
530 0 : match reset {
531 0 : LayerAccessStatsReset::NoReset => (),
532 0 : LayerAccessStatsReset::JustTaskKindFlags => {
533 0 : inner.task_kind_flag.clear();
534 0 : }
535 0 : LayerAccessStatsReset::AllStats => {
536 0 : *inner = LayerAccessStatsInner::default();
537 0 : }
538 : }
539 0 : ret
540 0 : }
541 :
542 : /// Get the latest access timestamp, falling back to latest residence event, further falling
543 : /// back to `SystemTime::now` for a usable timestamp for eviction.
544 0 : pub(crate) fn latest_activity_or_now(&self) -> SystemTime {
545 0 : self.latest_activity().unwrap_or_else(SystemTime::now)
546 0 : }
547 :
548 : /// Get the latest access timestamp, falling back to latest residence event.
549 : ///
550 : /// This function can only return `None` if there has not yet been a call to the
551 : /// [`record_residence_event`] method. That would generally be considered an
552 : /// implementation error. This function logs a rate-limited warning in that case.
553 : ///
554 : /// TODO: use type system to avoid the need for `fallback`.
555 : /// The approach in <https://github.com/neondatabase/neon/pull/3775>
556 : /// could be used to enforce that a residence event is recorded
557 : /// before a layer is added to the layer map. We could also have
558 : /// a layer wrapper type that holds the LayerAccessStats, and ensure
559 : /// that that type can only be produced by inserting into the layer map.
560 : ///
561 : /// [`record_residence_event`]: Self::record_residence_event
562 0 : fn latest_activity(&self) -> Option<SystemTime> {
563 0 : let locked = self.0.lock().unwrap();
564 0 : let inner = &locked.for_eviction_policy;
565 0 : match inner.last_accesses.recent() {
566 0 : Some(a) => Some(a.when),
567 0 : None => match inner.last_residence_changes.recent() {
568 0 : Some(e) => Some(e.timestamp),
569 : None => {
570 : static WARN_RATE_LIMIT: Lazy<Mutex<(usize, RateLimit)>> =
571 0 : Lazy::new(|| Mutex::new((0, RateLimit::new(Duration::from_secs(10)))));
572 0 : let mut guard = WARN_RATE_LIMIT.lock().unwrap();
573 0 : guard.0 += 1;
574 0 : let occurences = guard.0;
575 0 : guard.1.call(move || {
576 0 : warn!(parent: None, occurences, "latest_activity not available, this is an implementation bug, using fallback value");
577 0 : });
578 0 : None
579 : }
580 : },
581 : }
582 0 : }
583 : }
584 :
585 : /// Get a layer descriptor from a layer.
586 : pub trait AsLayerDesc {
587 : /// Get the layer descriptor.
588 : fn layer_desc(&self) -> &PersistentLayerDesc;
589 : }
590 :
591 : pub mod tests {
592 : use pageserver_api::shard::TenantShardId;
593 :
594 : use super::*;
595 :
596 : impl From<DeltaFileName> for PersistentLayerDesc {
597 0 : fn from(value: DeltaFileName) -> Self {
598 0 : PersistentLayerDesc::new_delta(
599 0 : TenantShardId::from([0; 18]),
600 0 : TimelineId::from_array([0; 16]),
601 0 : value.key_range,
602 0 : value.lsn_range,
603 0 : 233,
604 0 : )
605 0 : }
606 : }
607 :
608 : impl From<ImageFileName> for PersistentLayerDesc {
609 0 : fn from(value: ImageFileName) -> Self {
610 0 : PersistentLayerDesc::new_img(
611 0 : TenantShardId::from([0; 18]),
612 0 : TimelineId::from_array([0; 16]),
613 0 : value.key_range,
614 0 : value.lsn,
615 0 : 233,
616 0 : )
617 0 : }
618 : }
619 :
620 : impl From<LayerFileName> for PersistentLayerDesc {
621 0 : fn from(value: LayerFileName) -> Self {
622 0 : match value {
623 0 : LayerFileName::Delta(d) => Self::from(d),
624 0 : LayerFileName::Image(i) => Self::from(i),
625 : }
626 0 : }
627 : }
628 : }
629 :
630 : /// Range wrapping newtype, which uses display to render Debug.
631 : ///
632 : /// Useful with `Key`, which has too verbose `{:?}` for printing multiple layers.
633 : struct RangeDisplayDebug<'a, T: std::fmt::Display>(&'a Range<T>);
634 :
635 : impl<'a, T: std::fmt::Display> std::fmt::Debug for RangeDisplayDebug<'a, T> {
636 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
637 0 : write!(f, "{}..{}", self.0.start, self.0.end)
638 0 : }
639 : }
|