Line data Source code
1 : //! Common traits and structs for layers
2 :
3 : pub mod delta_layer;
4 : mod filename;
5 : pub mod image_layer;
6 : pub(crate) mod inmemory_layer;
7 : pub(crate) mod layer;
8 : mod layer_desc;
9 :
10 : use crate::context::{AccessStatsBehavior, RequestContext};
11 : use crate::repository::Value;
12 : use crate::task_mgr::TaskKind;
13 : use crate::walrecord::NeonWalRecord;
14 : use bytes::Bytes;
15 : use enum_map::EnumMap;
16 : use enumset::EnumSet;
17 : use once_cell::sync::Lazy;
18 : use pageserver_api::key::Key;
19 : use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum};
20 : use pageserver_api::models::{
21 : LayerAccessKind, LayerResidenceEvent, LayerResidenceEventReason, LayerResidenceStatus,
22 : };
23 : use std::borrow::Cow;
24 : use std::cmp::{Ordering, Reverse};
25 : use std::collections::hash_map::Entry;
26 : use std::collections::{BinaryHeap, HashMap};
27 : use std::ops::Range;
28 : use std::sync::{Arc, Mutex};
29 : use std::time::{Duration, SystemTime, UNIX_EPOCH};
30 : use tracing::warn;
31 : use utils::history_buffer::HistoryBufferWithDropCounter;
32 : use utils::rate_limit::RateLimit;
33 :
34 : use utils::{id::TimelineId, lsn::Lsn};
35 :
36 : pub use delta_layer::{DeltaLayer, DeltaLayerWriter, ValueRef};
37 : pub use filename::{DeltaFileName, ImageFileName, LayerFileName};
38 : pub use image_layer::{ImageLayer, ImageLayerWriter};
39 : pub use inmemory_layer::InMemoryLayer;
40 : pub use layer_desc::{PersistentLayerDesc, PersistentLayerKey};
41 :
42 : pub(crate) use layer::{EvictionError, Layer, ResidentLayer};
43 :
44 : use self::inmemory_layer::InMemoryLayerFileId;
45 :
46 : use super::timeline::GetVectoredError;
47 : use super::PageReconstructError;
48 :
49 0 : pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool
50 0 : where
51 0 : T: PartialOrd<T>,
52 0 : {
53 0 : if a.start < b.start {
54 0 : a.end > b.start
55 : } else {
56 0 : b.end > a.start
57 : }
58 0 : }
59 :
60 : /// Struct used to communicate across calls to 'get_value_reconstruct_data'.
61 : ///
62 : /// Before first call, you can fill in 'page_img' if you have an older cached
63 : /// version of the page available. That can save work in
64 : /// 'get_value_reconstruct_data', as it can stop searching for page versions
65 : /// when all the WAL records going back to the cached image have been collected.
66 : ///
67 : /// When get_value_reconstruct_data returns Complete, 'img' is set to an image
68 : /// of the page, or the oldest WAL record in 'records' is a will_init-type
69 : /// record that initializes the page without requiring a previous image.
70 : ///
71 : /// If 'get_page_reconstruct_data' returns Continue, some 'records' may have
72 : /// been collected, but there are more records outside the current layer. Pass
73 : /// the same ValueReconstructState struct in the next 'get_value_reconstruct_data'
74 : /// call, to collect more records.
75 : ///
76 : #[derive(Debug, Default)]
77 : pub struct ValueReconstructState {
78 : pub records: Vec<(Lsn, NeonWalRecord)>,
79 : pub img: Option<(Lsn, Bytes)>,
80 : }
81 :
82 : #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
83 : pub(crate) enum ValueReconstructSituation {
84 : Complete,
85 : #[default]
86 : Continue,
87 : }
88 :
89 : /// Reconstruct data accumulated for a single key during a vectored get
90 : #[derive(Debug, Default, Clone)]
91 : pub(crate) struct VectoredValueReconstructState {
92 : pub(crate) records: Vec<(Lsn, NeonWalRecord)>,
93 : pub(crate) img: Option<(Lsn, Bytes)>,
94 :
95 : situation: ValueReconstructSituation,
96 : }
97 :
98 : impl VectoredValueReconstructState {
99 0 : fn get_cached_lsn(&self) -> Option<Lsn> {
100 0 : self.img.as_ref().map(|img| img.0)
101 0 : }
102 : }
103 :
104 : impl From<VectoredValueReconstructState> for ValueReconstructState {
105 362 : fn from(mut state: VectoredValueReconstructState) -> Self {
106 362 : // walredo expects the records to be descending in terms of Lsn
107 362 : state.records.sort_by_key(|(lsn, _)| Reverse(*lsn));
108 362 :
109 362 : ValueReconstructState {
110 362 : records: state.records,
111 362 : img: state.img,
112 362 : }
113 362 : }
114 : }
115 :
116 : /// Bag of data accumulated during a vectored get
117 : pub(crate) struct ValuesReconstructState {
118 : pub(crate) keys: HashMap<Key, Result<VectoredValueReconstructState, PageReconstructError>>,
119 :
120 : keys_done: KeySpaceRandomAccum,
121 : }
122 :
123 : impl ValuesReconstructState {
124 214 : pub(crate) fn new() -> Self {
125 214 : Self {
126 214 : keys: HashMap::new(),
127 214 : keys_done: KeySpaceRandomAccum::new(),
128 214 : }
129 214 : }
130 :
131 : /// Associate a key with the error which it encountered and mark it as done
132 0 : pub(crate) fn on_key_error(&mut self, key: Key, err: PageReconstructError) {
133 0 : let previous = self.keys.insert(key, Err(err));
134 0 : if let Some(Ok(state)) = previous {
135 0 : if state.situation == ValueReconstructSituation::Continue {
136 0 : self.keys_done.add_key(key);
137 0 : }
138 0 : }
139 0 : }
140 :
141 : /// Update the state collected for a given key.
142 : /// Returns true if this was the last value needed for the key and false otherwise.
143 : ///
144 : /// If the key is done after the update, mark it as such.
145 362 : pub(crate) fn update_key(
146 362 : &mut self,
147 362 : key: &Key,
148 362 : lsn: Lsn,
149 362 : value: Value,
150 362 : ) -> ValueReconstructSituation {
151 362 : let state = self
152 362 : .keys
153 362 : .entry(*key)
154 362 : .or_insert(Ok(VectoredValueReconstructState::default()));
155 :
156 362 : if let Ok(state) = state {
157 362 : let key_done = match state.situation {
158 0 : ValueReconstructSituation::Complete => unreachable!(),
159 362 : ValueReconstructSituation::Continue => match value {
160 362 : Value::Image(img) => {
161 362 : state.img = Some((lsn, img));
162 362 : true
163 : }
164 0 : Value::WalRecord(rec) => {
165 0 : let reached_cache =
166 0 : state.get_cached_lsn().map(|clsn| clsn + 1) == Some(lsn);
167 0 : let will_init = rec.will_init();
168 0 : state.records.push((lsn, rec));
169 0 : will_init || reached_cache
170 : }
171 : },
172 : };
173 :
174 362 : if key_done && state.situation == ValueReconstructSituation::Continue {
175 362 : state.situation = ValueReconstructSituation::Complete;
176 362 : self.keys_done.add_key(*key);
177 362 : }
178 :
179 362 : state.situation
180 : } else {
181 0 : ValueReconstructSituation::Complete
182 : }
183 362 : }
184 :
185 : /// Returns the Lsn at which this key is cached if one exists.
186 : /// The read path should go no further than this Lsn for the given key.
187 58026 : pub(crate) fn get_cached_lsn(&self, key: &Key) -> Option<Lsn> {
188 58026 : self.keys
189 58026 : .get(key)
190 58026 : .and_then(|k| k.as_ref().ok())
191 58026 : .and_then(|state| state.get_cached_lsn())
192 58026 : }
193 :
194 : /// Returns the key space describing the keys that have
195 : /// been marked as completed since the last call to this function.
196 32 : pub(crate) fn consume_done_keys(&mut self) -> KeySpace {
197 32 : self.keys_done.consume_keyspace()
198 32 : }
199 : }
200 :
201 : impl Default for ValuesReconstructState {
202 0 : fn default() -> Self {
203 0 : Self::new()
204 0 : }
205 : }
206 :
207 : /// A key that uniquely identifies a layer in a timeline
208 : #[derive(Debug, PartialEq, Eq, Clone, Hash)]
209 : pub(crate) enum LayerId {
210 : PersitentLayerId(PersistentLayerKey),
211 : InMemoryLayerId(InMemoryLayerFileId),
212 : }
213 :
214 : /// Layer wrapper for the read path. Note that it is valid
215 : /// to use these layers even after external operations have
216 : /// been performed on them (compaction, freeze, etc.).
217 : #[derive(Debug)]
218 : pub(crate) enum ReadableLayer {
219 : PersistentLayer(Layer),
220 : InMemoryLayer(Arc<InMemoryLayer>),
221 : }
222 :
223 : /// A partial description of a read to be done.
224 : #[derive(Debug, Clone)]
225 : struct ReadDesc {
226 : /// An id used to resolve the readable layer within the fringe
227 : layer_id: LayerId,
228 : /// Lsn range for the read, used for selecting the next read
229 : lsn_range: Range<Lsn>,
230 : }
231 :
232 : /// Data structure which maintains a fringe of layers for the
233 : /// read path. The fringe is the set of layers which intersects
234 : /// the current keyspace that the search is descending on.
235 : /// Each layer tracks the keyspace that intersects it.
236 : ///
237 : /// The fringe must appear sorted by Lsn. Hence, it uses
238 : /// a two layer indexing scheme.
239 : #[derive(Debug)]
240 : pub(crate) struct LayerFringe {
241 : planned_reads_by_lsn: BinaryHeap<ReadDesc>,
242 : layers: HashMap<LayerId, LayerKeyspace>,
243 : }
244 :
245 : #[derive(Debug)]
246 : struct LayerKeyspace {
247 : layer: ReadableLayer,
248 : target_keyspace: KeySpace,
249 : }
250 :
251 : impl LayerFringe {
252 14 : pub(crate) fn new() -> Self {
253 14 : LayerFringe {
254 14 : planned_reads_by_lsn: BinaryHeap::new(),
255 14 : layers: HashMap::new(),
256 14 : }
257 14 : }
258 :
259 32 : pub(crate) fn next_layer(&mut self) -> Option<(ReadableLayer, KeySpace, Range<Lsn>)> {
260 32 : let read_desc = match self.planned_reads_by_lsn.pop() {
261 18 : Some(desc) => desc,
262 14 : None => return None,
263 : };
264 :
265 18 : let removed = self.layers.remove_entry(&read_desc.layer_id);
266 18 : match removed {
267 : Some((
268 : _,
269 : LayerKeyspace {
270 18 : layer,
271 18 : target_keyspace,
272 18 : },
273 18 : )) => Some((layer, target_keyspace, read_desc.lsn_range)),
274 0 : None => unreachable!("fringe internals are always consistent"),
275 : }
276 32 : }
277 :
278 18 : pub(crate) fn update(
279 18 : &mut self,
280 18 : layer: ReadableLayer,
281 18 : keyspace: KeySpace,
282 18 : lsn_range: Range<Lsn>,
283 18 : ) {
284 18 : let layer_id = layer.id();
285 18 : let entry = self.layers.entry(layer_id.clone());
286 18 : match entry {
287 0 : Entry::Occupied(mut entry) => {
288 0 : entry.get_mut().target_keyspace.merge(&keyspace);
289 0 : }
290 18 : Entry::Vacant(entry) => {
291 18 : self.planned_reads_by_lsn.push(ReadDesc {
292 18 : lsn_range,
293 18 : layer_id: layer_id.clone(),
294 18 : });
295 18 : entry.insert(LayerKeyspace {
296 18 : layer,
297 18 : target_keyspace: keyspace,
298 18 : });
299 18 : }
300 : }
301 18 : }
302 : }
303 :
304 : impl Default for LayerFringe {
305 0 : fn default() -> Self {
306 0 : Self::new()
307 0 : }
308 : }
309 :
310 : impl Ord for ReadDesc {
311 6 : fn cmp(&self, other: &Self) -> Ordering {
312 6 : let ord = self.lsn_range.end.cmp(&other.lsn_range.end);
313 6 : if ord == std::cmp::Ordering::Equal {
314 6 : self.lsn_range.start.cmp(&other.lsn_range.start).reverse()
315 : } else {
316 0 : ord
317 : }
318 6 : }
319 : }
320 :
321 : impl PartialOrd for ReadDesc {
322 6 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
323 6 : Some(self.cmp(other))
324 6 : }
325 : }
326 :
327 : impl PartialEq for ReadDesc {
328 0 : fn eq(&self, other: &Self) -> bool {
329 0 : self.lsn_range == other.lsn_range
330 0 : }
331 : }
332 :
333 : impl Eq for ReadDesc {}
334 :
335 : impl ReadableLayer {
336 18 : pub(crate) fn id(&self) -> LayerId {
337 18 : match self {
338 18 : Self::PersistentLayer(layer) => LayerId::PersitentLayerId(layer.layer_desc().key()),
339 0 : Self::InMemoryLayer(layer) => LayerId::InMemoryLayerId(layer.file_id()),
340 : }
341 18 : }
342 :
343 18 : pub(crate) async fn get_values_reconstruct_data(
344 18 : &self,
345 18 : keyspace: KeySpace,
346 18 : lsn_range: Range<Lsn>,
347 18 : reconstruct_state: &mut ValuesReconstructState,
348 18 : ctx: &RequestContext,
349 18 : ) -> Result<(), GetVectoredError> {
350 18 : match self {
351 18 : ReadableLayer::PersistentLayer(layer) => {
352 18 : layer
353 18 : .get_values_reconstruct_data(keyspace, lsn_range, reconstruct_state, ctx)
354 41 : .await
355 : }
356 0 : ReadableLayer::InMemoryLayer(layer) => {
357 0 : layer
358 0 : .get_values_reconstruct_data(keyspace, lsn_range.end, reconstruct_state, ctx)
359 0 : .await
360 : }
361 : }
362 18 : }
363 : }
364 :
365 : /// Return value from [`Layer::get_value_reconstruct_data`]
366 : #[derive(Clone, Copy, Debug)]
367 : pub enum ValueReconstructResult {
368 : /// Got all the data needed to reconstruct the requested page
369 : Complete,
370 : /// This layer didn't contain all the required data, the caller should look up
371 : /// the predecessor layer at the returned LSN and collect more data from there.
372 : Continue,
373 :
374 : /// This layer didn't contain data needed to reconstruct the page version at
375 : /// the returned LSN. This is usually considered an error, but might be OK
376 : /// in some circumstances.
377 : Missing,
378 : }
379 :
380 : #[derive(Debug)]
381 : pub struct LayerAccessStats(Mutex<LayerAccessStatsLocked>);
382 :
383 : /// This struct holds two instances of [`LayerAccessStatsInner`].
384 : /// Accesses are recorded to both instances.
385 : /// The `for_scraping_api`instance can be reset from the management API via [`LayerAccessStatsReset`].
386 : /// The `for_eviction_policy` is never reset.
387 : #[derive(Debug, Default, Clone)]
388 : struct LayerAccessStatsLocked {
389 : for_scraping_api: LayerAccessStatsInner,
390 : for_eviction_policy: LayerAccessStatsInner,
391 : }
392 :
393 : impl LayerAccessStatsLocked {
394 125236 : fn iter_mut(&mut self) -> impl Iterator<Item = &mut LayerAccessStatsInner> {
395 125236 : [&mut self.for_scraping_api, &mut self.for_eviction_policy].into_iter()
396 125236 : }
397 : }
398 :
399 : #[derive(Debug, Default, Clone)]
400 : struct LayerAccessStatsInner {
401 : first_access: Option<LayerAccessStatFullDetails>,
402 : count_by_access_kind: EnumMap<LayerAccessKind, u64>,
403 : task_kind_flag: EnumSet<TaskKind>,
404 : last_accesses: HistoryBufferWithDropCounter<LayerAccessStatFullDetails, 16>,
405 : last_residence_changes: HistoryBufferWithDropCounter<LayerResidenceEvent, 16>,
406 : }
407 :
408 : #[derive(Debug, Clone, Copy)]
409 : pub(crate) struct LayerAccessStatFullDetails {
410 : pub(crate) when: SystemTime,
411 : pub(crate) task_kind: TaskKind,
412 : pub(crate) access_kind: LayerAccessKind,
413 : }
414 :
415 0 : #[derive(Clone, Copy, strum_macros::EnumString)]
416 : pub enum LayerAccessStatsReset {
417 : NoReset,
418 : JustTaskKindFlags,
419 : AllStats,
420 : }
421 :
422 0 : fn system_time_to_millis_since_epoch(ts: &SystemTime) -> u64 {
423 0 : ts.duration_since(UNIX_EPOCH)
424 0 : .expect("better to die in this unlikely case than report false stats")
425 0 : .as_millis()
426 0 : .try_into()
427 0 : .expect("64 bits is enough for few more years")
428 0 : }
429 :
430 : impl LayerAccessStatFullDetails {
431 0 : fn as_api_model(&self) -> pageserver_api::models::LayerAccessStatFullDetails {
432 0 : let Self {
433 0 : when,
434 0 : task_kind,
435 0 : access_kind,
436 0 : } = self;
437 0 : pageserver_api::models::LayerAccessStatFullDetails {
438 0 : when_millis_since_epoch: system_time_to_millis_since_epoch(when),
439 0 : task_kind: Cow::Borrowed(task_kind.into()), // into static str, powered by strum_macros
440 0 : access_kind: *access_kind,
441 0 : }
442 0 : }
443 : }
444 :
445 : impl LayerAccessStats {
446 : /// Create an empty stats object.
447 : ///
448 : /// The caller is responsible for recording a residence event
449 : /// using [`record_residence_event`] before calling `latest_activity`.
450 : /// If they don't, [`latest_activity`] will return `None`.
451 : ///
452 : /// [`record_residence_event`]: Self::record_residence_event
453 : /// [`latest_activity`]: Self::latest_activity
454 960 : pub(crate) fn empty_will_record_residence_event_later() -> Self {
455 960 : LayerAccessStats(Mutex::default())
456 960 : }
457 :
458 : /// Create an empty stats object and record a [`LayerLoad`] event with the given residence status.
459 : ///
460 : /// See [`record_residence_event`] for why you need to do this while holding the layer map lock.
461 : ///
462 : /// [`LayerLoad`]: LayerResidenceEventReason::LayerLoad
463 : /// [`record_residence_event`]: Self::record_residence_event
464 24 : pub(crate) fn for_loading_layer(status: LayerResidenceStatus) -> Self {
465 24 : let new = LayerAccessStats(Mutex::new(LayerAccessStatsLocked::default()));
466 24 : new.record_residence_event(status, LayerResidenceEventReason::LayerLoad);
467 24 : new
468 24 : }
469 :
470 : /// Record a change in layer residency.
471 : ///
472 : /// Recording the event must happen while holding the layer map lock to
473 : /// ensure that latest-activity-threshold-based layer eviction (eviction_task.rs)
474 : /// can do an "imitate access" to this layer, before it observes `now-latest_activity() > threshold`.
475 : ///
476 : /// If we instead recorded the residence event with a timestamp from before grabbing the layer map lock,
477 : /// the following race could happen:
478 : ///
479 : /// - Compact: Write out an L1 layer from several L0 layers. This records residence event LayerCreate with the current timestamp.
480 : /// - Eviction: imitate access logical size calculation. This accesses the L0 layers because the L1 layer is not yet in the layer map.
481 : /// - Compact: Grab layer map lock, add the new L1 to layer map and remove the L0s, release layer map lock.
482 : /// - Eviction: observes the new L1 layer whose only activity timestamp is the LayerCreate event.
483 : ///
484 1008 : pub(crate) fn record_residence_event(
485 1008 : &self,
486 1008 : status: LayerResidenceStatus,
487 1008 : reason: LayerResidenceEventReason,
488 1008 : ) {
489 1008 : let mut locked = self.0.lock().unwrap();
490 2016 : locked.iter_mut().for_each(|inner| {
491 2016 : inner
492 2016 : .last_residence_changes
493 2016 : .write(LayerResidenceEvent::new(status, reason))
494 2016 : });
495 1008 : }
496 :
497 124228 : fn record_access(&self, access_kind: LayerAccessKind, ctx: &RequestContext) {
498 124228 : if ctx.access_stats_behavior() == AccessStatsBehavior::Skip {
499 0 : return;
500 124228 : }
501 124228 :
502 124228 : let this_access = LayerAccessStatFullDetails {
503 124228 : when: SystemTime::now(),
504 124228 : task_kind: ctx.task_kind(),
505 124228 : access_kind,
506 124228 : };
507 124228 :
508 124228 : let mut locked = self.0.lock().unwrap();
509 248456 : locked.iter_mut().for_each(|inner| {
510 248456 : inner.first_access.get_or_insert(this_access);
511 248456 : inner.count_by_access_kind[access_kind] += 1;
512 248456 : inner.task_kind_flag |= ctx.task_kind();
513 248456 : inner.last_accesses.write(this_access);
514 248456 : })
515 124228 : }
516 :
517 0 : fn as_api_model(
518 0 : &self,
519 0 : reset: LayerAccessStatsReset,
520 0 : ) -> pageserver_api::models::LayerAccessStats {
521 0 : let mut locked = self.0.lock().unwrap();
522 0 : let inner = &mut locked.for_scraping_api;
523 0 : let LayerAccessStatsInner {
524 0 : first_access,
525 0 : count_by_access_kind,
526 0 : task_kind_flag,
527 0 : last_accesses,
528 0 : last_residence_changes,
529 0 : } = inner;
530 0 : let ret = pageserver_api::models::LayerAccessStats {
531 0 : access_count_by_access_kind: count_by_access_kind
532 0 : .iter()
533 0 : .map(|(kind, count)| (kind, *count))
534 0 : .collect(),
535 0 : task_kind_access_flag: task_kind_flag
536 0 : .iter()
537 0 : .map(|task_kind| Cow::Borrowed(task_kind.into())) // into static str, powered by strum_macros
538 0 : .collect(),
539 0 : first: first_access.as_ref().map(|a| a.as_api_model()),
540 0 : accesses_history: last_accesses.map(|m| m.as_api_model()),
541 0 : residence_events_history: last_residence_changes.clone(),
542 0 : };
543 0 : match reset {
544 0 : LayerAccessStatsReset::NoReset => (),
545 0 : LayerAccessStatsReset::JustTaskKindFlags => {
546 0 : inner.task_kind_flag.clear();
547 0 : }
548 0 : LayerAccessStatsReset::AllStats => {
549 0 : *inner = LayerAccessStatsInner::default();
550 0 : }
551 : }
552 0 : ret
553 0 : }
554 :
555 : /// Get the latest access timestamp, falling back to latest residence event, further falling
556 : /// back to `SystemTime::now` for a usable timestamp for eviction.
557 0 : pub(crate) fn latest_activity_or_now(&self) -> SystemTime {
558 0 : self.latest_activity().unwrap_or_else(SystemTime::now)
559 0 : }
560 :
561 : /// Get the latest access timestamp, falling back to latest residence event.
562 : ///
563 : /// This function can only return `None` if there has not yet been a call to the
564 : /// [`record_residence_event`] method. That would generally be considered an
565 : /// implementation error. This function logs a rate-limited warning in that case.
566 : ///
567 : /// TODO: use type system to avoid the need for `fallback`.
568 : /// The approach in <https://github.com/neondatabase/neon/pull/3775>
569 : /// could be used to enforce that a residence event is recorded
570 : /// before a layer is added to the layer map. We could also have
571 : /// a layer wrapper type that holds the LayerAccessStats, and ensure
572 : /// that that type can only be produced by inserting into the layer map.
573 : ///
574 : /// [`record_residence_event`]: Self::record_residence_event
575 0 : fn latest_activity(&self) -> Option<SystemTime> {
576 0 : let locked = self.0.lock().unwrap();
577 0 : let inner = &locked.for_eviction_policy;
578 0 : match inner.last_accesses.recent() {
579 0 : Some(a) => Some(a.when),
580 0 : None => match inner.last_residence_changes.recent() {
581 0 : Some(e) => Some(e.timestamp),
582 : None => {
583 : static WARN_RATE_LIMIT: Lazy<Mutex<(usize, RateLimit)>> =
584 0 : Lazy::new(|| Mutex::new((0, RateLimit::new(Duration::from_secs(10)))));
585 0 : let mut guard = WARN_RATE_LIMIT.lock().unwrap();
586 0 : guard.0 += 1;
587 0 : let occurences = guard.0;
588 0 : guard.1.call(move || {
589 0 : warn!(parent: None, occurences, "latest_activity not available, this is an implementation bug, using fallback value");
590 0 : });
591 0 : None
592 : }
593 : },
594 : }
595 0 : }
596 : }
597 :
598 : /// Get a layer descriptor from a layer.
599 : pub trait AsLayerDesc {
600 : /// Get the layer descriptor.
601 : fn layer_desc(&self) -> &PersistentLayerDesc;
602 : }
603 :
604 : pub mod tests {
605 : use pageserver_api::shard::TenantShardId;
606 :
607 : use super::*;
608 :
609 : impl From<DeltaFileName> for PersistentLayerDesc {
610 0 : fn from(value: DeltaFileName) -> Self {
611 0 : PersistentLayerDesc::new_delta(
612 0 : TenantShardId::from([0; 18]),
613 0 : TimelineId::from_array([0; 16]),
614 0 : value.key_range,
615 0 : value.lsn_range,
616 0 : 233,
617 0 : )
618 0 : }
619 : }
620 :
621 : impl From<ImageFileName> for PersistentLayerDesc {
622 0 : fn from(value: ImageFileName) -> Self {
623 0 : PersistentLayerDesc::new_img(
624 0 : TenantShardId::from([0; 18]),
625 0 : TimelineId::from_array([0; 16]),
626 0 : value.key_range,
627 0 : value.lsn,
628 0 : 233,
629 0 : )
630 0 : }
631 : }
632 :
633 : impl From<LayerFileName> for PersistentLayerDesc {
634 0 : fn from(value: LayerFileName) -> Self {
635 0 : match value {
636 0 : LayerFileName::Delta(d) => Self::from(d),
637 0 : LayerFileName::Image(i) => Self::from(i),
638 : }
639 0 : }
640 : }
641 : }
642 :
643 : /// Range wrapping newtype, which uses display to render Debug.
644 : ///
645 : /// Useful with `Key`, which has too verbose `{:?}` for printing multiple layers.
646 : struct RangeDisplayDebug<'a, T: std::fmt::Display>(&'a Range<T>);
647 :
648 : impl<'a, T: std::fmt::Display> std::fmt::Debug for RangeDisplayDebug<'a, T> {
649 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
650 0 : write!(f, "{}..{}", self.0.start, self.0.end)
651 0 : }
652 : }
|