LCOV - code coverage report
Current view: top level - pageserver/src/tenant - storage_layer.rs (source / functions) Coverage Total Hit
Test: ae32e90d936a6b0438b35d9ab8babc961b1ac107.info Lines: 79.1 % 546 432
Test Date: 2025-01-29 17:08:55 Functions: 80.2 % 81 65

            Line data    Source code
       1              : //! Common traits and structs for layers
       2              : 
       3              : pub mod batch_split_writer;
       4              : pub mod delta_layer;
       5              : pub mod filter_iterator;
       6              : pub mod image_layer;
       7              : pub mod inmemory_layer;
       8              : pub(crate) mod layer;
       9              : mod layer_desc;
      10              : mod layer_name;
      11              : pub mod merge_iterator;
      12              : 
      13              : use crate::config::PageServerConf;
      14              : use crate::context::{AccessStatsBehavior, RequestContext};
      15              : use bytes::Bytes;
      16              : use futures::stream::FuturesUnordered;
      17              : use futures::StreamExt;
      18              : use pageserver_api::key::Key;
      19              : use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum};
      20              : use pageserver_api::record::NeonWalRecord;
      21              : use pageserver_api::value::Value;
      22              : use std::cmp::Ordering;
      23              : use std::collections::hash_map::Entry;
      24              : use std::collections::{BinaryHeap, HashMap};
      25              : use std::future::Future;
      26              : use std::ops::Range;
      27              : use std::pin::Pin;
      28              : use std::sync::atomic::AtomicUsize;
      29              : use std::sync::Arc;
      30              : use std::time::{Duration, SystemTime, UNIX_EPOCH};
      31              : use tracing::{trace, Instrument};
      32              : use utils::sync::gate::GateGuard;
      33              : 
      34              : use utils::lsn::Lsn;
      35              : 
      36              : pub use batch_split_writer::{BatchLayerWriter, SplitDeltaLayerWriter, SplitImageLayerWriter};
      37              : pub use delta_layer::{DeltaLayer, DeltaLayerWriter, ValueRef};
      38              : pub use image_layer::{ImageLayer, ImageLayerWriter};
      39              : pub use inmemory_layer::InMemoryLayer;
      40              : pub use layer_desc::{PersistentLayerDesc, PersistentLayerKey};
      41              : pub use layer_name::{DeltaLayerName, ImageLayerName, LayerName};
      42              : 
      43              : pub(crate) use layer::{EvictionError, Layer, ResidentLayer};
      44              : 
      45              : use self::inmemory_layer::InMemoryLayerFileId;
      46              : 
      47              : use super::timeline::GetVectoredError;
      48              : use super::PageReconstructError;
      49              : 
      50            0 : pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool
      51            0 : where
      52            0 :     T: PartialOrd<T>,
      53            0 : {
      54            0 :     if a.start < b.start {
      55            0 :         a.end > b.start
      56              :     } else {
      57            0 :         b.end > a.start
      58              :     }
      59            0 : }
      60              : 
      61              : /// Struct used to communicate across calls to 'get_value_reconstruct_data'.
      62              : ///
      63              : /// Before first call, you can fill in 'page_img' if you have an older cached
      64              : /// version of the page available. That can save work in
      65              : /// 'get_value_reconstruct_data', as it can stop searching for page versions
      66              : /// when all the WAL records going back to the cached image have been collected.
      67              : ///
      68              : /// When get_value_reconstruct_data returns Complete, 'img' is set to an image
      69              : /// of the page, or the oldest WAL record in 'records' is a will_init-type
      70              : /// record that initializes the page without requiring a previous image.
      71              : ///
      72              : /// If 'get_page_reconstruct_data' returns Continue, some 'records' may have
      73              : /// been collected, but there are more records outside the current layer. Pass
      74              : /// the same ValueReconstructState struct in the next 'get_value_reconstruct_data'
      75              : /// call, to collect more records.
      76              : ///
      77              : #[derive(Debug, Default)]
      78              : pub(crate) struct ValueReconstructState {
      79              :     pub(crate) records: Vec<(Lsn, NeonWalRecord)>,
      80              :     pub(crate) img: Option<(Lsn, Bytes)>,
      81              : }
      82              : 
      83              : #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
      84              : pub(crate) enum ValueReconstructSituation {
      85              :     Complete,
      86              :     #[default]
      87              :     Continue,
      88              : }
      89              : 
      90              : /// On disk representation of a value loaded in a buffer
      91              : #[derive(Debug)]
      92              : pub(crate) enum OnDiskValue {
      93              :     /// Unencoded [`Value::Image`]
      94              :     RawImage(Bytes),
      95              :     /// Encoded [`Value`]. Can deserialize into an image or a WAL record
      96              :     WalRecordOrImage(Bytes),
      97              : }
      98              : 
      99              : /// Reconstruct data accumulated for a single key during a vectored get
     100              : #[derive(Debug, Default)]
     101              : pub(crate) struct VectoredValueReconstructState {
     102              :     pub(crate) on_disk_values: Vec<(Lsn, OnDiskValueIoWaiter)>,
     103              : 
     104              :     pub(crate) situation: ValueReconstructSituation,
     105              : }
     106              : 
     107              : #[derive(Debug)]
     108              : pub(crate) struct OnDiskValueIoWaiter {
     109              :     rx: tokio::sync::oneshot::Receiver<OnDiskValueIoResult>,
     110              : }
     111              : 
     112              : #[derive(Debug)]
     113              : #[must_use]
     114              : pub(crate) enum OnDiskValueIo {
     115              :     /// Traversal identified this IO as required to complete the vectored get.
     116              :     Required {
     117              :         num_active_ios: Arc<AtomicUsize>,
     118              :         tx: tokio::sync::oneshot::Sender<OnDiskValueIoResult>,
     119              :     },
     120              :     /// Sparse keyspace reads always read all the values for a given key,
     121              :     /// even though only the first value is needed.
     122              :     ///
     123              :     /// This variant represents the unnecessary IOs for those values at lower LSNs
     124              :     /// that aren't needed, but are currently still being done.
     125              :     ///
     126              :     /// The execution of unnecessary IOs was a pre-existing behavior before concurrent IO.
     127              :     /// We added this explicit representation here so that we can drop
     128              :     /// unnecessary IO results immediately, instead of buffering them in
     129              :     /// `oneshot` channels inside [`VectoredValueReconstructState`] until
     130              :     /// [`VectoredValueReconstructState::collect_pending_ios`] gets called.
     131              :     Unnecessary,
     132              : }
     133              : 
     134              : type OnDiskValueIoResult = Result<OnDiskValue, std::io::Error>;
     135              : 
     136              : impl OnDiskValueIo {
     137      1482755 :     pub(crate) fn complete(self, res: OnDiskValueIoResult) {
     138      1482755 :         match self {
     139      1337380 :             OnDiskValueIo::Required { num_active_ios, tx } => {
     140      1337380 :                 num_active_ios.fetch_sub(1, std::sync::atomic::Ordering::Release);
     141      1337380 :                 let _ = tx.send(res);
     142      1337380 :             }
     143       145375 :             OnDiskValueIo::Unnecessary => {
     144       145375 :                 // Nobody cared, see variant doc comment.
     145       145375 :             }
     146              :         }
     147      1482755 :     }
     148              : }
     149              : 
     150              : #[derive(Debug, thiserror::Error)]
     151              : pub(crate) enum WaitCompletionError {
     152              :     #[error("OnDiskValueIo was dropped without completing, likely the sidecar task panicked")]
     153              :     IoDropped,
     154              : }
     155              : 
     156              : impl OnDiskValueIoWaiter {
     157      1337378 :     pub(crate) async fn wait_completion(self) -> Result<OnDiskValueIoResult, WaitCompletionError> {
     158      1337378 :         // NB: for Unnecessary IOs, this method never gets called because we don't add them to `on_disk_values`.
     159      1337378 :         self.rx.await.map_err(|_| WaitCompletionError::IoDropped)
     160      1337378 :     }
     161              : }
     162              : 
     163              : impl VectoredValueReconstructState {
     164              :     /// # Cancel-Safety
     165              :     ///
     166              :     /// Technically fine to stop polling this future, but, the IOs will still
     167              :     /// be executed to completion by the sidecar task and hold on to / consume resources.
     168              :     /// Better not do it to make reasonsing about the system easier.
     169      1336066 :     pub(crate) async fn collect_pending_ios(
     170      1336066 :         self,
     171      1336066 :     ) -> Result<ValueReconstructState, PageReconstructError> {
     172              :         use utils::bin_ser::BeSer;
     173              : 
     174      1336066 :         let mut res = Ok(ValueReconstructState::default());
     175              : 
     176              :         // We should try hard not to bail early, so that by the time we return from this
     177              :         // function, all IO for this value is done. It's not required -- we could totally
     178              :         // stop polling the IO futures in the sidecar task, they need to support that,
     179              :         // but just stopping to poll doesn't reduce the IO load on the disk. It's easier
     180              :         // to reason about the system if we just wait for all IO to complete, even if
     181              :         // we're no longer interested in the result.
     182              :         //
     183              :         // Revisit this when IO futures are replaced with a more sophisticated IO system
     184              :         // and an IO scheduler, where we know which IOs were submitted and which ones
     185              :         // just queued. Cf the comment on IoConcurrency::spawn_io.
     186      2673444 :         for (lsn, waiter) in self.on_disk_values {
     187      1337378 :             let value_recv_res = waiter
     188      1337378 :                 .wait_completion()
     189      1337378 :                 // we rely on the caller to poll us to completion, so this is not a bail point
     190      1337378 :                 .await;
     191              :             // Force not bailing early by wrapping the code into a closure.
     192              :             #[allow(clippy::redundant_closure_call)]
     193      1337378 :             let _: () = (|| {
     194      1337378 :                 match (&mut res, value_recv_res) {
     195            0 :                     (Err(_), _) => {
     196            0 :                         // We've already failed, no need to process more.
     197            0 :                     }
     198            0 :                     (Ok(_), Err(wait_err)) => {
     199            0 :                         // This shouldn't happen - likely the sidecar task panicked.
     200            0 :                         res = Err(PageReconstructError::Other(wait_err.into()));
     201            0 :                     }
     202            0 :                     (Ok(_), Ok(Err(err))) => {
     203            0 :                         let err: std::io::Error = err;
     204            0 :                         // TODO: returning IO error here will fail a compute query.
     205            0 :                         // Probably not what we want, we're not doing `maybe_fatal_err`
     206            0 :                         // in the IO futures.
     207            0 :                         // But it's been like that for a long time, not changing it
     208            0 :                         // as part of concurrent IO.
     209            0 :                         // => https://github.com/neondatabase/neon/issues/10454
     210            0 :                         res = Err(PageReconstructError::Other(err.into()));
     211            0 :                     }
     212        38198 :                     (Ok(ok), Ok(Ok(OnDiskValue::RawImage(img)))) => {
     213        38198 :                         assert!(ok.img.is_none());
     214        38198 :                         ok.img = Some((lsn, img));
     215              :                     }
     216      1299180 :                     (Ok(ok), Ok(Ok(OnDiskValue::WalRecordOrImage(buf)))) => {
     217      1299180 :                         match Value::des(&buf) {
     218         1400 :                             Ok(Value::WalRecord(rec)) => {
     219         1400 :                                 ok.records.push((lsn, rec));
     220         1400 :                             }
     221      1297780 :                             Ok(Value::Image(img)) => {
     222      1297780 :                                 assert!(ok.img.is_none());
     223      1297780 :                                 ok.img = Some((lsn, img));
     224              :                             }
     225            0 :                             Err(err) => {
     226            0 :                                 res = Err(PageReconstructError::Other(err.into()));
     227            0 :                             }
     228              :                         }
     229              :                     }
     230              :                 }
     231      1337378 :             })();
     232      1337378 :         }
     233              : 
     234      1336066 :         res
     235      1336066 :     }
     236              : }
     237              : 
     238              : /// Bag of data accumulated during a vectored get..
     239              : pub(crate) struct ValuesReconstructState {
     240              :     /// The keys will be removed after `get_vectored` completes. The caller outside `Timeline`
     241              :     /// should not expect to get anything from this hashmap.
     242              :     pub(crate) keys: HashMap<Key, VectoredValueReconstructState>,
     243              :     /// The keys which are already retrieved
     244              :     keys_done: KeySpaceRandomAccum,
     245              : 
     246              :     /// The keys covered by the image layers
     247              :     keys_with_image_coverage: Option<Range<Key>>,
     248              : 
     249              :     // Statistics that are still accessible as a caller of `get_vectored_impl`.
     250              :     layers_visited: u32,
     251              :     delta_layers_visited: u32,
     252              : 
     253              :     pub(crate) io_concurrency: IoConcurrency,
     254              :     num_active_ios: Arc<AtomicUsize>,
     255              : }
     256              : 
     257              : /// The level of IO concurrency to be used on the read path
     258              : ///
     259              : /// The desired end state is that we always do parallel IO.
     260              : /// This struct and the dispatching in the impl will be removed once
     261              : /// we've built enough confidence.
     262              : pub(crate) enum IoConcurrency {
     263              :     Sequential,
     264              :     SidecarTask {
     265              :         task_id: usize,
     266              :         ios_tx: tokio::sync::mpsc::UnboundedSender<IoFuture>,
     267              :     },
     268              : }
     269              : 
     270              : type IoFuture = Pin<Box<dyn Send + Future<Output = ()>>>;
     271              : 
     272              : pub(crate) enum SelectedIoConcurrency {
     273              :     Sequential,
     274              :     SidecarTask(GateGuard),
     275              : }
     276              : 
     277              : impl std::fmt::Debug for IoConcurrency {
     278            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     279            0 :         match self {
     280            0 :             IoConcurrency::Sequential => write!(f, "Sequential"),
     281            0 :             IoConcurrency::SidecarTask { .. } => write!(f, "SidecarTask"),
     282              :         }
     283            0 :     }
     284              : }
     285              : 
     286              : impl std::fmt::Debug for SelectedIoConcurrency {
     287           64 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     288           64 :         match self {
     289           32 :             SelectedIoConcurrency::Sequential => write!(f, "Sequential"),
     290           32 :             SelectedIoConcurrency::SidecarTask(_) => write!(f, "SidecarTask"),
     291              :         }
     292           64 :     }
     293              : }
     294              : 
     295              : impl IoConcurrency {
     296              :     /// Force sequential IO. This is a temporary workaround until we have
     297              :     /// moved plumbing-through-the-call-stack
     298              :     /// of IoConcurrency into `RequestContextq.
     299              :     ///
     300              :     /// DO NOT USE for new code.
     301              :     ///
     302              :     /// Tracking issue: <https://github.com/neondatabase/neon/issues/10460>.
     303      1215206 :     pub(crate) fn sequential() -> Self {
     304      1215206 :         Self::spawn(SelectedIoConcurrency::Sequential)
     305      1215206 :     }
     306              : 
     307         1012 :     pub(crate) fn spawn_from_conf(
     308         1012 :         conf: &'static PageServerConf,
     309         1012 :         gate_guard: GateGuard,
     310         1012 :     ) -> IoConcurrency {
     311              :         use pageserver_api::config::GetVectoredConcurrentIo;
     312         1012 :         let selected = match conf.get_vectored_concurrent_io {
     313         1012 :             GetVectoredConcurrentIo::Sequential => SelectedIoConcurrency::Sequential,
     314            0 :             GetVectoredConcurrentIo::SidecarTask => SelectedIoConcurrency::SidecarTask(gate_guard),
     315              :         };
     316         1012 :         Self::spawn(selected)
     317         1012 :     }
     318              : 
     319      1216282 :     pub(crate) fn spawn(io_concurrency: SelectedIoConcurrency) -> Self {
     320      1216282 :         match io_concurrency {
     321      1216250 :             SelectedIoConcurrency::Sequential => IoConcurrency::Sequential,
     322           32 :             SelectedIoConcurrency::SidecarTask(gate_guard) => {
     323           32 :                 let (ios_tx, ios_rx) = tokio::sync::mpsc::unbounded_channel();
     324              :                 static TASK_ID: AtomicUsize = AtomicUsize::new(0);
     325           32 :                 let task_id = TASK_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
     326              :                 // TODO: enrich the span with more context (tenant,shard,timeline) + (basebackup|pagestream|...)
     327           32 :                 let span =
     328           32 :                     tracing::info_span!(parent: None, "IoConcurrency_sidecar", task_id = task_id);
     329           32 :                 trace!(task_id, "spawning sidecar task");
     330           32 :                 tokio::spawn(async move {
     331           32 :                     trace!("start");
     332           32 :                     scopeguard::defer!{ trace!("end") };
     333              :                     type IosRx = tokio::sync::mpsc::UnboundedReceiver<IoFuture>;
     334              :                     enum State {
     335              :                         Waiting {
     336              :                             // invariant: is_empty(), but we recycle the allocation
     337              :                             empty_futures: FuturesUnordered<IoFuture>,
     338              :                             ios_rx: IosRx,
     339              :                         },
     340              :                         Executing {
     341              :                             futures: FuturesUnordered<IoFuture>,
     342              :                             ios_rx: IosRx,
     343              :                         },
     344              :                         ShuttingDown {
     345              :                             futures: FuturesUnordered<IoFuture>,
     346              :                         },
     347              :                     }
     348           32 :                     let mut state = State::Waiting {
     349           32 :                         empty_futures: FuturesUnordered::new(),
     350           32 :                         ios_rx,
     351           32 :                     };
     352              :                     loop {
     353        39084 :                         match state {
     354              :                             State::Waiting {
     355        18531 :                                 empty_futures,
     356        18531 :                                 mut ios_rx,
     357        18531 :                             } => {
     358        18531 :                                 assert!(empty_futures.is_empty());
     359        18531 :                                 tokio::select! {
     360        18531 :                                     fut = ios_rx.recv() => {
     361        18499 :                                         if let Some(fut) = fut {
     362        18499 :                                             trace!("received new io future");
     363        18499 :                                             empty_futures.push(fut);
     364        18499 :                                             state = State::Executing { futures: empty_futures, ios_rx };
     365              :                                         } else {
     366            0 :                                             state = State::ShuttingDown { futures: empty_futures }
     367              :                                         }
     368              :                                     }
     369              :                                 }
     370              :                             }
     371              :                             State::Executing {
     372        20553 :                                 mut futures,
     373        20553 :                                 mut ios_rx,
     374        20553 :                             } => {
     375        20553 :                                 tokio::select! {
     376        20553 :                                     res = futures.next() => {
     377        19526 :                                         trace!("io future completed");
     378        19526 :                                         assert!(res.is_some());
     379        19526 :                                         if futures.is_empty() {
     380        18499 :                                             state = State::Waiting { empty_futures: futures, ios_rx};
     381        18499 :                                         } else {
     382         1027 :                                             state = State::Executing { futures, ios_rx };
     383         1027 :                                         }
     384              :                                     }
     385        20553 :                                     fut = ios_rx.recv() => {
     386         1027 :                                         if let Some(fut) = fut {
     387         1027 :                                             trace!("received new io future");
     388         1027 :                                             futures.push(fut);
     389         1027 :                                             state =  State::Executing { futures, ios_rx};
     390            0 :                                         } else {
     391            0 :                                             state = State::ShuttingDown { futures };
     392            0 :                                         }
     393              :                                     }
     394              :                                 }
     395              :                             }
     396              :                             State::ShuttingDown {
     397            0 :                                 mut futures,
     398            0 :                             } => {
     399            0 :                                 trace!("shutting down");
     400            0 :                                 while let Some(()) = futures.next().await {
     401            0 :                                     trace!("io future completed (shutdown)");
     402              :                                     // drain
     403              :                                 }
     404            0 :                                 trace!("shutdown complete");
     405            0 :                                 break;
     406            0 :                             }
     407            0 :                         }
     408            0 :                     }
     409            0 :                     drop(gate_guard); // drop it right before we exit
     410           32 :                 }.instrument(span));
     411           32 :                 IoConcurrency::SidecarTask { task_id, ios_tx }
     412              :             }
     413              :         }
     414      1216282 :     }
     415              : 
     416        76338 :     pub(crate) fn clone(&self) -> Self {
     417        76338 :         match self {
     418        39460 :             IoConcurrency::Sequential => IoConcurrency::Sequential,
     419        36878 :             IoConcurrency::SidecarTask { task_id, ios_tx } => IoConcurrency::SidecarTask {
     420        36878 :                 task_id: *task_id,
     421        36878 :                 ios_tx: ios_tx.clone(),
     422        36878 :             },
     423              :         }
     424        76338 :     }
     425              : 
     426              :     /// Submit an IO to be executed in the background. DEADLOCK RISK, read the full doc string.
     427              :     ///
     428              :     /// The IO is represented as an opaque future.
     429              :     /// IO completion must be handled inside the future, e.g., through a oneshot channel.
     430              :     ///
     431              :     /// The API seems simple but there are multiple **pitfalls** involving
     432              :     /// DEADLOCK RISK.
     433              :     ///
     434              :     /// First, there are no guarantees about the exexecution of the IO.
     435              :     /// It may be `await`ed in-place before this function returns.
     436              :     /// It may be polled partially by this task and handed off to another task to be finished.
     437              :     /// It may be polled and then dropped before returning ready.
     438              :     ///
     439              :     /// This means that submitted IOs must not be interedependent.
     440              :     /// Interdependence may be through shared limited resources, e.g.,
     441              :     /// - VirtualFile file descriptor cache slot acquisition
     442              :     /// - tokio-epoll-uring slot
     443              :     ///
     444              :     /// # Why current usage is safe from deadlocks
     445              :     ///
     446              :     /// Textbook condition for a deadlock is that _all_ of the following be given
     447              :     /// - Mutual exclusion
     448              :     /// - Hold and wait
     449              :     /// - No preemption
     450              :     /// - Circular wait
     451              :     ///
     452              :     /// The current usage is safe because:
     453              :     /// - Mutual exclusion: IO futures definitely use mutexes, no way around that for now
     454              :     /// - Hold and wait: IO futures currently hold two kinds of locks/resources while waiting
     455              :     ///   for acquisition of other resources:
     456              :     ///    - VirtualFile file descriptor cache slot tokio mutex
     457              :     ///    - tokio-epoll-uring slot (uses tokio notify => wait queue, much like mutex)
     458              :     /// - No preemption: there's no taking-away of acquired locks/resources => given
     459              :     /// - Circular wait: this is the part of the condition that isn't met: all IO futures
     460              :     ///   first acquire VirtualFile mutex, then tokio-epoll-uring slot.
     461              :     ///   There is no IO future that acquires slot before VirtualFile.
     462              :     ///   Hence there can be no circular waiting.
     463              :     ///   Hence there cannot be a deadlock.
     464              :     ///
     465              :     /// This is a very fragile situation and must be revisited whenver any code called from
     466              :     /// inside the IO futures is changed.
     467              :     ///
     468              :     /// We will move away from opaque IO futures towards well-defined IOs at some point in
     469              :     /// the future when we have shipped this first version of concurrent IO to production
     470              :     /// and are ready to retire the Sequential mode which runs the futures in place.
     471              :     /// Right now, while brittle, the opaque IO approach allows us to ship the feature
     472              :     /// with minimal changes to the code and minimal changes to existing behavior in Sequential mode.
     473              :     ///
     474              :     /// Also read the comment in `collect_pending_ios`.
     475      1526361 :     pub(crate) async fn spawn_io<F>(&mut self, fut: F)
     476      1526361 :     where
     477      1526361 :         F: std::future::Future<Output = ()> + Send + 'static,
     478      1526361 :     {
     479      1526361 :         match self {
     480      1506835 :             IoConcurrency::Sequential => fut.await,
     481        19526 :             IoConcurrency::SidecarTask { ios_tx, .. } => {
     482        19526 :                 let fut = Box::pin(fut);
     483        19526 :                 // NB: experiments showed that doing an opportunistic poll of `fut` here was bad for throughput
     484        19526 :                 // while insignificant for latency.
     485        19526 :                 // It would make sense to revisit the tokio-epoll-uring API in the future such that we can try
     486        19526 :                 // a submission here, but never poll the future. That way, io_uring can make proccess while
     487        19526 :                 // the future sits in the ios_tx queue.
     488        19526 :                 match ios_tx.send(fut) {
     489        19526 :                     Ok(()) => {}
     490              :                     Err(_) => {
     491            0 :                         unreachable!("the io task must have exited, likely it panicked")
     492              :                     }
     493              :                 }
     494              :             }
     495              :         }
     496      1526361 :     }
     497              : 
     498              :     #[cfg(test)]
     499           64 :     pub(crate) fn spawn_for_test() -> impl std::ops::DerefMut<Target = Self> {
     500              :         use std::ops::{Deref, DerefMut};
     501              :         use tracing::info;
     502              :         use utils::sync::gate::Gate;
     503              : 
     504              :         // Spawn needs a Gate, give it one.
     505              :         struct Wrapper {
     506              :             inner: IoConcurrency,
     507              :             #[allow(dead_code)]
     508              :             gate: Box<Gate>,
     509              :         }
     510              :         impl Deref for Wrapper {
     511              :             type Target = IoConcurrency;
     512              : 
     513        36974 :             fn deref(&self) -> &Self::Target {
     514        36974 :                 &self.inner
     515        36974 :             }
     516              :         }
     517              :         impl DerefMut for Wrapper {
     518            0 :             fn deref_mut(&mut self) -> &mut Self::Target {
     519            0 :                 &mut self.inner
     520            0 :             }
     521              :         }
     522           64 :         let gate = Box::new(Gate::default());
     523              : 
     524              :         // The default behavior when running Rust unit tests without any further
     525              :         // flags is to use the new behavior.
     526              :         // The CI uses the following environment variable to unit test both old
     527              :         // and new behavior.
     528              :         // NB: the Python regression & perf tests take the `else` branch
     529              :         // below and have their own defaults management.
     530           64 :         let selected = {
     531              :             // The pageserver_api::config type is unsuitable because it's internally tagged.
     532           64 :             #[derive(serde::Deserialize)]
     533              :             #[serde(rename_all = "kebab-case")]
     534              :             enum TestOverride {
     535              :                 Sequential,
     536              :                 SidecarTask,
     537              :             }
     538              :             use once_cell::sync::Lazy;
     539           64 :             static TEST_OVERRIDE: Lazy<TestOverride> = Lazy::new(|| {
     540           64 :                 utils::env::var_serde_json_string(
     541           64 :                     "NEON_PAGESERVER_UNIT_TEST_GET_VECTORED_CONCURRENT_IO",
     542           64 :                 )
     543           64 :                 .unwrap_or(TestOverride::SidecarTask)
     544           64 :             });
     545              : 
     546           64 :             match *TEST_OVERRIDE {
     547           32 :                 TestOverride::Sequential => SelectedIoConcurrency::Sequential,
     548              :                 TestOverride::SidecarTask => {
     549           32 :                     SelectedIoConcurrency::SidecarTask(gate.enter().expect("just created it"))
     550              :                 }
     551              :             }
     552              :         };
     553              : 
     554           64 :         info!(?selected, "get_vectored_concurrent_io test");
     555              : 
     556           64 :         Wrapper {
     557           64 :             inner: Self::spawn(selected),
     558           64 :             gate,
     559           64 :         }
     560           64 :     }
     561              : }
     562              : 
     563              : /// Make noise in case the [`ValuesReconstructState`] gets dropped while
     564              : /// there are still IOs in flight.
     565              : /// Refer to `collect_pending_ios` for why we prefer not to do that.
     566              : //
     567              : /// We log from here instead of from the sidecar task because the [`ValuesReconstructState`]
     568              : /// gets dropped in a tracing span with more context.
     569              : /// We repeat the sidecar tasks's `task_id` so we can correlate what we emit here with
     570              : /// the logs / panic handler logs from the sidecar task, which also logs the `task_id`.
     571              : impl Drop for ValuesReconstructState {
     572      1255316 :     fn drop(&mut self) {
     573      1255316 :         let num_active_ios = self
     574      1255316 :             .num_active_ios
     575      1255316 :             .load(std::sync::atomic::Ordering::Acquire);
     576      1255316 :         if num_active_ios == 0 {
     577      1255314 :             return;
     578            2 :         }
     579            2 :         let sidecar_task_id = match &self.io_concurrency {
     580            0 :             IoConcurrency::Sequential => None,
     581            2 :             IoConcurrency::SidecarTask { task_id, .. } => Some(*task_id),
     582              :         };
     583            2 :         tracing::warn!(
     584              :             num_active_ios,
     585              :             ?sidecar_task_id,
     586            0 :             backtrace=%std::backtrace::Backtrace::force_capture(),
     587            0 :             "dropping ValuesReconstructState while some IOs have not been completed",
     588              :         );
     589      1255316 :     }
     590              : }
     591              : 
     592              : impl ValuesReconstructState {
     593      1255316 :     pub(crate) fn new(io_concurrency: IoConcurrency) -> Self {
     594      1255316 :         Self {
     595      1255316 :             keys: HashMap::new(),
     596      1255316 :             keys_done: KeySpaceRandomAccum::new(),
     597      1255316 :             keys_with_image_coverage: None,
     598      1255316 :             layers_visited: 0,
     599      1255316 :             delta_layers_visited: 0,
     600      1255316 :             io_concurrency,
     601      1255316 :             num_active_ios: Arc::new(AtomicUsize::new(0)),
     602      1255316 :         }
     603      1255316 :     }
     604              : 
     605              :     /// Absolutely read [`IoConcurrency::spawn_io`] to learn about assumptions & pitfalls.
     606      1526361 :     pub(crate) async fn spawn_io<F>(&mut self, fut: F)
     607      1526361 :     where
     608      1526361 :         F: std::future::Future<Output = ()> + Send + 'static,
     609      1526361 :     {
     610      1526361 :         self.io_concurrency.spawn_io(fut).await;
     611      1526361 :     }
     612              : 
     613      1696103 :     pub(crate) fn on_layer_visited(&mut self, layer: &ReadableLayer) {
     614      1696103 :         self.layers_visited += 1;
     615      1696103 :         if let ReadableLayer::PersistentLayer(layer) = layer {
     616       482851 :             if layer.layer_desc().is_delta() {
     617       437759 :                 self.delta_layers_visited += 1;
     618       437759 :             }
     619      1213252 :         }
     620      1696103 :     }
     621              : 
     622          456 :     pub(crate) fn get_delta_layers_visited(&self) -> u32 {
     623          456 :         self.delta_layers_visited
     624          456 :     }
     625              : 
     626      1255258 :     pub(crate) fn get_layers_visited(&self) -> u32 {
     627      1255258 :         self.layers_visited
     628      1255258 :     }
     629              : 
     630              :     /// On hitting image layer, we can mark all keys in this range as done, because
     631              :     /// if the image layer does not contain a key, it is deleted/never added.
     632        45116 :     pub(crate) fn on_image_layer_visited(&mut self, key_range: &Range<Key>) {
     633        45116 :         let prev_val = self.keys_with_image_coverage.replace(key_range.clone());
     634        45116 :         assert_eq!(
     635              :             prev_val, None,
     636            0 :             "should consume the keyspace before the next iteration"
     637              :         );
     638        45116 :     }
     639              : 
     640              :     /// Update the state collected for a given key.
     641              :     /// Returns true if this was the last value needed for the key and false otherwise.
     642              :     ///
     643              :     /// If the key is done after the update, mark it as such.
     644              :     ///
     645              :     /// If the key is in the sparse keyspace (i.e., aux files), we do not track them in
     646              :     /// `key_done`.
     647              :     // TODO: rename this method & update description.
     648      1482755 :     pub(crate) fn update_key(&mut self, key: &Key, lsn: Lsn, completes: bool) -> OnDiskValueIo {
     649      1482755 :         let state = self.keys.entry(*key).or_default();
     650      1482755 : 
     651      1482755 :         let is_sparse_key = key.is_sparse();
     652              : 
     653      1482755 :         let required_io = match state.situation {
     654              :             ValueReconstructSituation::Complete => {
     655       145375 :                 if is_sparse_key {
     656              :                     // Sparse keyspace might be visited multiple times because
     657              :                     // we don't track unmapped keyspaces.
     658       145375 :                     return OnDiskValueIo::Unnecessary;
     659              :                 } else {
     660            0 :                     unreachable!()
     661              :                 }
     662              :             }
     663              :             ValueReconstructSituation::Continue => {
     664      1337380 :                 self.num_active_ios
     665      1337380 :                     .fetch_add(1, std::sync::atomic::Ordering::Release);
     666      1337380 :                 let (tx, rx) = tokio::sync::oneshot::channel();
     667      1337380 :                 state.on_disk_values.push((lsn, OnDiskValueIoWaiter { rx }));
     668      1337380 :                 OnDiskValueIo::Required {
     669      1337380 :                     tx,
     670      1337380 :                     num_active_ios: Arc::clone(&self.num_active_ios),
     671      1337380 :                 }
     672      1337380 :             }
     673      1337380 :         };
     674      1337380 : 
     675      1337380 :         if completes && state.situation == ValueReconstructSituation::Continue {
     676      1336068 :             state.situation = ValueReconstructSituation::Complete;
     677      1336068 :             if !is_sparse_key {
     678      1208552 :                 self.keys_done.add_key(*key);
     679      1208552 :             }
     680         1312 :         }
     681              : 
     682      1337380 :         required_io
     683      1482755 :     }
     684              : 
     685              :     /// Returns the key space describing the keys that have
     686              :     /// been marked as completed since the last call to this function.
     687              :     /// Returns individual keys done, and the image layer coverage.
     688      3403623 :     pub(crate) fn consume_done_keys(&mut self) -> (KeySpace, Option<Range<Key>>) {
     689      3403623 :         (
     690      3403623 :             self.keys_done.consume_keyspace(),
     691      3403623 :             self.keys_with_image_coverage.take(),
     692      3403623 :         )
     693      3403623 :     }
     694              : }
     695              : 
     696              : /// A key that uniquely identifies a layer in a timeline
     697              : #[derive(Debug, PartialEq, Eq, Clone, Hash)]
     698              : pub(crate) enum LayerId {
     699              :     PersitentLayerId(PersistentLayerKey),
     700              :     InMemoryLayerId(InMemoryLayerFileId),
     701              : }
     702              : 
     703              : /// Uniquely identify a layer visit by the layer
     704              : /// and LSN floor (or start LSN) of the reads.
     705              : /// The layer itself is not enough since we may
     706              : /// have different LSN lower bounds for delta layer reads.
     707              : #[derive(Debug, PartialEq, Eq, Clone, Hash)]
     708              : struct LayerToVisitId {
     709              :     layer_id: LayerId,
     710              :     lsn_floor: Lsn,
     711              : }
     712              : 
     713              : /// Layer wrapper for the read path. Note that it is valid
     714              : /// to use these layers even after external operations have
     715              : /// been performed on them (compaction, freeze, etc.).
     716              : #[derive(Debug)]
     717              : pub(crate) enum ReadableLayer {
     718              :     PersistentLayer(Layer),
     719              :     InMemoryLayer(Arc<InMemoryLayer>),
     720              : }
     721              : 
     722              : /// A partial description of a read to be done.
     723              : #[derive(Debug, Clone)]
     724              : struct LayerVisit {
     725              :     /// An id used to resolve the readable layer within the fringe
     726              :     layer_to_visit_id: LayerToVisitId,
     727              :     /// Lsn range for the read, used for selecting the next read
     728              :     lsn_range: Range<Lsn>,
     729              : }
     730              : 
     731              : /// Data structure which maintains a fringe of layers for the
     732              : /// read path. The fringe is the set of layers which intersects
     733              : /// the current keyspace that the search is descending on.
     734              : /// Each layer tracks the keyspace that intersects it.
     735              : ///
     736              : /// The fringe must appear sorted by Lsn. Hence, it uses
     737              : /// a two layer indexing scheme.
     738              : #[derive(Debug)]
     739              : pub(crate) struct LayerFringe {
     740              :     planned_visits_by_lsn: BinaryHeap<LayerVisit>,
     741              :     visit_reads: HashMap<LayerToVisitId, LayerVisitReads>,
     742              : }
     743              : 
     744              : #[derive(Debug)]
     745              : struct LayerVisitReads {
     746              :     layer: ReadableLayer,
     747              :     target_keyspace: KeySpaceRandomAccum,
     748              : }
     749              : 
     750              : impl LayerFringe {
     751      1707520 :     pub(crate) fn new() -> Self {
     752      1707520 :         LayerFringe {
     753      1707520 :             planned_visits_by_lsn: BinaryHeap::new(),
     754      1707520 :             visit_reads: HashMap::new(),
     755      1707520 :         }
     756      1707520 :     }
     757              : 
     758      3403623 :     pub(crate) fn next_layer(&mut self) -> Option<(ReadableLayer, KeySpace, Range<Lsn>)> {
     759      3403623 :         let read_desc = self.planned_visits_by_lsn.pop()?;
     760              : 
     761      1696103 :         let removed = self.visit_reads.remove_entry(&read_desc.layer_to_visit_id);
     762      1696103 : 
     763      1696103 :         match removed {
     764              :             Some((
     765              :                 _,
     766              :                 LayerVisitReads {
     767      1696103 :                     layer,
     768      1696103 :                     mut target_keyspace,
     769      1696103 :                 },
     770      1696103 :             )) => Some((
     771      1696103 :                 layer,
     772      1696103 :                 target_keyspace.consume_keyspace(),
     773      1696103 :                 read_desc.lsn_range,
     774      1696103 :             )),
     775            0 :             None => unreachable!("fringe internals are always consistent"),
     776              :         }
     777      3403623 :     }
     778              : 
     779      1696131 :     pub(crate) fn update(
     780      1696131 :         &mut self,
     781      1696131 :         layer: ReadableLayer,
     782      1696131 :         keyspace: KeySpace,
     783      1696131 :         lsn_range: Range<Lsn>,
     784      1696131 :     ) {
     785      1696131 :         let layer_to_visit_id = LayerToVisitId {
     786      1696131 :             layer_id: layer.id(),
     787      1696131 :             lsn_floor: lsn_range.start,
     788      1696131 :         };
     789      1696131 : 
     790      1696131 :         let entry = self.visit_reads.entry(layer_to_visit_id.clone());
     791      1696131 :         match entry {
     792           28 :             Entry::Occupied(mut entry) => {
     793           28 :                 entry.get_mut().target_keyspace.add_keyspace(keyspace);
     794           28 :             }
     795      1696103 :             Entry::Vacant(entry) => {
     796      1696103 :                 self.planned_visits_by_lsn.push(LayerVisit {
     797      1696103 :                     lsn_range,
     798      1696103 :                     layer_to_visit_id: layer_to_visit_id.clone(),
     799      1696103 :                 });
     800      1696103 :                 let mut accum = KeySpaceRandomAccum::new();
     801      1696103 :                 accum.add_keyspace(keyspace);
     802      1696103 :                 entry.insert(LayerVisitReads {
     803      1696103 :                     layer,
     804      1696103 :                     target_keyspace: accum,
     805      1696103 :                 });
     806      1696103 :             }
     807              :         }
     808      1696131 :     }
     809              : }
     810              : 
     811              : impl Default for LayerFringe {
     812            0 :     fn default() -> Self {
     813            0 :         Self::new()
     814            0 :     }
     815              : }
     816              : 
     817              : impl Ord for LayerVisit {
     818           60 :     fn cmp(&self, other: &Self) -> Ordering {
     819           60 :         let ord = self.lsn_range.end.cmp(&other.lsn_range.end);
     820           60 :         if ord == std::cmp::Ordering::Equal {
     821           44 :             self.lsn_range.start.cmp(&other.lsn_range.start).reverse()
     822              :         } else {
     823           16 :             ord
     824              :         }
     825           60 :     }
     826              : }
     827              : 
     828              : impl PartialOrd for LayerVisit {
     829           60 :     fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
     830           60 :         Some(self.cmp(other))
     831           60 :     }
     832              : }
     833              : 
     834              : impl PartialEq for LayerVisit {
     835            0 :     fn eq(&self, other: &Self) -> bool {
     836            0 :         self.lsn_range == other.lsn_range
     837            0 :     }
     838              : }
     839              : 
     840              : impl Eq for LayerVisit {}
     841              : 
     842              : impl ReadableLayer {
     843      1696131 :     pub(crate) fn id(&self) -> LayerId {
     844      1696131 :         match self {
     845       482879 :             Self::PersistentLayer(layer) => LayerId::PersitentLayerId(layer.layer_desc().key()),
     846      1213252 :             Self::InMemoryLayer(layer) => LayerId::InMemoryLayerId(layer.file_id()),
     847              :         }
     848      1696131 :     }
     849              : 
     850      1696103 :     pub(crate) async fn get_values_reconstruct_data(
     851      1696103 :         &self,
     852      1696103 :         keyspace: KeySpace,
     853      1696103 :         lsn_range: Range<Lsn>,
     854      1696103 :         reconstruct_state: &mut ValuesReconstructState,
     855      1696103 :         ctx: &RequestContext,
     856      1696103 :     ) -> Result<(), GetVectoredError> {
     857      1696103 :         match self {
     858       482851 :             ReadableLayer::PersistentLayer(layer) => {
     859       482851 :                 layer
     860       482851 :                     .get_values_reconstruct_data(keyspace, lsn_range, reconstruct_state, ctx)
     861       482851 :                     .await
     862              :             }
     863      1213252 :             ReadableLayer::InMemoryLayer(layer) => {
     864      1213252 :                 layer
     865      1213252 :                     .get_values_reconstruct_data(keyspace, lsn_range.end, reconstruct_state, ctx)
     866      1213252 :                     .await
     867              :             }
     868              :         }
     869      1696103 :     }
     870              : }
     871              : 
     872              : /// Layers contain a hint indicating whether they are likely to be used for reads.
     873              : ///
     874              : /// This is a hint rather than an authoritative value, so that we do not have to update it synchronously
     875              : /// when changing the visibility of layers (for example when creating a branch that makes some previously
     876              : /// covered layers visible).  It should be used for cache management but not for correctness-critical checks.
     877              : #[derive(Debug, Clone, PartialEq, Eq)]
     878              : pub enum LayerVisibilityHint {
     879              :     /// A Visible layer might be read while serving a read, because there is not an image layer between it
     880              :     /// and a readable LSN (the tip of the branch or a child's branch point)
     881              :     Visible,
     882              :     /// A Covered layer probably won't be read right now, but _can_ be read in future if someone creates
     883              :     /// a branch or ephemeral endpoint at an LSN below the layer that covers this.
     884              :     Covered,
     885              : }
     886              : 
     887              : pub(crate) struct LayerAccessStats(std::sync::atomic::AtomicU64);
     888              : 
     889            0 : #[derive(Clone, Copy, strum_macros::EnumString)]
     890              : pub(crate) enum LayerAccessStatsReset {
     891              :     NoReset,
     892              :     AllStats,
     893              : }
     894              : 
     895              : impl Default for LayerAccessStats {
     896         3836 :     fn default() -> Self {
     897         3836 :         // Default value is to assume resident since creation time, and visible.
     898         3836 :         let (_mask, mut value) = Self::to_low_res_timestamp(Self::RTIME_SHIFT, SystemTime::now());
     899         3836 :         value |= 0x1 << Self::VISIBILITY_SHIFT;
     900         3836 : 
     901         3836 :         Self(std::sync::atomic::AtomicU64::new(value))
     902         3836 :     }
     903              : }
     904              : 
     905              : // Efficient store of two very-low-resolution timestamps and some bits.  Used for storing last access time and
     906              : // last residence change time.
     907              : impl LayerAccessStats {
     908              :     // How many high bits to drop from a u32 timestamp?
     909              :     // - Only storing up to a u32 timestamp will work fine until 2038 (if this code is still in use
     910              :     //   after that, this software has been very successful!)
     911              :     // - Dropping the top bit is implicitly safe because unix timestamps are meant to be
     912              :     // stored in an i32, so they never used it.
     913              :     // - Dropping the next two bits is safe because this code is only running on systems in
     914              :     // years >= 2024, and these bits have been 1 since 2021
     915              :     //
     916              :     // Therefore we may store only 28 bits for a timestamp with one second resolution.  We do
     917              :     // this truncation to make space for some flags in the high bits of our u64.
     918              :     const TS_DROP_HIGH_BITS: u32 = u32::count_ones(Self::TS_ONES) + 1;
     919              :     const TS_MASK: u32 = 0x1f_ff_ff_ff;
     920              :     const TS_ONES: u32 = 0x60_00_00_00;
     921              : 
     922              :     const ATIME_SHIFT: u32 = 0;
     923              :     const RTIME_SHIFT: u32 = 32 - Self::TS_DROP_HIGH_BITS;
     924              :     const VISIBILITY_SHIFT: u32 = 64 - 2 * Self::TS_DROP_HIGH_BITS;
     925              : 
     926       483091 :     fn write_bits(&self, mask: u64, value: u64) -> u64 {
     927       483091 :         self.0
     928       483091 :             .fetch_update(
     929       483091 :                 // TODO: decide what orderings are correct
     930       483091 :                 std::sync::atomic::Ordering::Relaxed,
     931       483091 :                 std::sync::atomic::Ordering::Relaxed,
     932       483091 :                 |v| Some((v & !mask) | (value & mask)),
     933       483091 :             )
     934       483091 :             .expect("Inner function is infallible")
     935       483091 :     }
     936              : 
     937       486207 :     fn to_low_res_timestamp(shift: u32, time: SystemTime) -> (u64, u64) {
     938       486207 :         // Drop the low three bits of the timestamp, for an ~8s accuracy
     939       486207 :         let timestamp = time.duration_since(UNIX_EPOCH).unwrap().as_secs() & (Self::TS_MASK as u64);
     940       486207 : 
     941       486207 :         ((Self::TS_MASK as u64) << shift, timestamp << shift)
     942       486207 :     }
     943              : 
     944          124 :     fn read_low_res_timestamp(&self, shift: u32) -> Option<SystemTime> {
     945          124 :         let read = self.0.load(std::sync::atomic::Ordering::Relaxed);
     946          124 : 
     947          124 :         let ts_bits = (read & ((Self::TS_MASK as u64) << shift)) >> shift;
     948          124 :         if ts_bits == 0 {
     949           48 :             None
     950              :         } else {
     951           76 :             Some(UNIX_EPOCH + Duration::from_secs(ts_bits | (Self::TS_ONES as u64)))
     952              :         }
     953          124 :     }
     954              : 
     955              :     /// Record a change in layer residency.
     956              :     ///
     957              :     /// Recording the event must happen while holding the layer map lock to
     958              :     /// ensure that latest-activity-threshold-based layer eviction (eviction_task.rs)
     959              :     /// can do an "imitate access" to this layer, before it observes `now-latest_activity() > threshold`.
     960              :     ///
     961              :     /// If we instead recorded the residence event with a timestamp from before grabbing the layer map lock,
     962              :     /// the following race could happen:
     963              :     ///
     964              :     /// - Compact: Write out an L1 layer from several L0 layers. This records residence event LayerCreate with the current timestamp.
     965              :     /// - Eviction: imitate access logical size calculation. This accesses the L0 layers because the L1 layer is not yet in the layer map.
     966              :     /// - Compact: Grab layer map lock, add the new L1 to layer map and remove the L0s, release layer map lock.
     967              :     /// - Eviction: observes the new L1 layer whose only activity timestamp is the LayerCreate event.
     968           52 :     pub(crate) fn record_residence_event_at(&self, now: SystemTime) {
     969           52 :         let (mask, value) = Self::to_low_res_timestamp(Self::RTIME_SHIFT, now);
     970           52 :         self.write_bits(mask, value);
     971           52 :     }
     972              : 
     973           48 :     pub(crate) fn record_residence_event(&self) {
     974           48 :         self.record_residence_event_at(SystemTime::now())
     975           48 :     }
     976              : 
     977       482319 :     fn record_access_at(&self, now: SystemTime) -> bool {
     978       482319 :         let (mut mask, mut value) = Self::to_low_res_timestamp(Self::ATIME_SHIFT, now);
     979       482319 : 
     980       482319 :         // A layer which is accessed must be visible.
     981       482319 :         mask |= 0x1 << Self::VISIBILITY_SHIFT;
     982       482319 :         value |= 0x1 << Self::VISIBILITY_SHIFT;
     983       482319 : 
     984       482319 :         let old_bits = self.write_bits(mask, value);
     985            4 :         !matches!(
     986       482319 :             self.decode_visibility(old_bits),
     987              :             LayerVisibilityHint::Visible
     988              :         )
     989       482319 :     }
     990              : 
     991              :     /// Returns true if we modified the layer's visibility to set it to Visible implicitly
     992              :     /// as a result of this access
     993       482875 :     pub(crate) fn record_access(&self, ctx: &RequestContext) -> bool {
     994       482875 :         if ctx.access_stats_behavior() == AccessStatsBehavior::Skip {
     995          568 :             return false;
     996       482307 :         }
     997       482307 : 
     998       482307 :         self.record_access_at(SystemTime::now())
     999       482875 :     }
    1000              : 
    1001            0 :     fn as_api_model(
    1002            0 :         &self,
    1003            0 :         reset: LayerAccessStatsReset,
    1004            0 :     ) -> pageserver_api::models::LayerAccessStats {
    1005            0 :         let ret = pageserver_api::models::LayerAccessStats {
    1006            0 :             access_time: self
    1007            0 :                 .read_low_res_timestamp(Self::ATIME_SHIFT)
    1008            0 :                 .unwrap_or(UNIX_EPOCH),
    1009            0 :             residence_time: self
    1010            0 :                 .read_low_res_timestamp(Self::RTIME_SHIFT)
    1011            0 :                 .unwrap_or(UNIX_EPOCH),
    1012            0 :             visible: matches!(self.visibility(), LayerVisibilityHint::Visible),
    1013              :         };
    1014            0 :         match reset {
    1015            0 :             LayerAccessStatsReset::NoReset => {}
    1016            0 :             LayerAccessStatsReset::AllStats => {
    1017            0 :                 self.write_bits((Self::TS_MASK as u64) << Self::ATIME_SHIFT, 0x0);
    1018            0 :                 self.write_bits((Self::TS_MASK as u64) << Self::RTIME_SHIFT, 0x0);
    1019            0 :             }
    1020              :         }
    1021            0 :         ret
    1022            0 :     }
    1023              : 
    1024              :     /// Get the latest access timestamp, falling back to latest residence event.  The latest residence event
    1025              :     /// will be this Layer's construction time, if its residence hasn't changed since then.
    1026           32 :     pub(crate) fn latest_activity(&self) -> SystemTime {
    1027           32 :         if let Some(t) = self.read_low_res_timestamp(Self::ATIME_SHIFT) {
    1028           12 :             t
    1029              :         } else {
    1030           20 :             self.read_low_res_timestamp(Self::RTIME_SHIFT)
    1031           20 :                 .expect("Residence time is set on construction")
    1032              :         }
    1033           32 :     }
    1034              : 
    1035              :     /// Whether this layer has been accessed (excluding in [`AccessStatsBehavior::Skip`]).
    1036              :     ///
    1037              :     /// This indicates whether the layer has been used for some purpose that would motivate
    1038              :     /// us to keep it on disk, such as for serving a getpage request.
    1039           36 :     fn accessed(&self) -> bool {
    1040           36 :         // Consider it accessed if the most recent access is more recent than
    1041           36 :         // the most recent change in residence status.
    1042           36 :         match (
    1043           36 :             self.read_low_res_timestamp(Self::ATIME_SHIFT),
    1044           36 :             self.read_low_res_timestamp(Self::RTIME_SHIFT),
    1045              :         ) {
    1046           28 :             (None, _) => false,
    1047            0 :             (Some(_), None) => true,
    1048            8 :             (Some(a), Some(r)) => a >= r,
    1049              :         }
    1050           36 :     }
    1051              : 
    1052              :     /// Helper for extracting the visibility hint from the literal value of our inner u64
    1053       484464 :     fn decode_visibility(&self, bits: u64) -> LayerVisibilityHint {
    1054       484464 :         match (bits >> Self::VISIBILITY_SHIFT) & 0x1 {
    1055       484420 :             1 => LayerVisibilityHint::Visible,
    1056           44 :             0 => LayerVisibilityHint::Covered,
    1057            0 :             _ => unreachable!(),
    1058              :         }
    1059       484464 :     }
    1060              : 
    1061              :     /// Returns the old value which has been replaced
    1062          720 :     pub(crate) fn set_visibility(&self, visibility: LayerVisibilityHint) -> LayerVisibilityHint {
    1063          720 :         let value = match visibility {
    1064          616 :             LayerVisibilityHint::Visible => 0x1 << Self::VISIBILITY_SHIFT,
    1065          104 :             LayerVisibilityHint::Covered => 0x0,
    1066              :         };
    1067              : 
    1068          720 :         let old_bits = self.write_bits(0x1 << Self::VISIBILITY_SHIFT, value);
    1069          720 :         self.decode_visibility(old_bits)
    1070          720 :     }
    1071              : 
    1072         1425 :     pub(crate) fn visibility(&self) -> LayerVisibilityHint {
    1073         1425 :         let read = self.0.load(std::sync::atomic::Ordering::Relaxed);
    1074         1425 :         self.decode_visibility(read)
    1075         1425 :     }
    1076              : }
    1077              : 
    1078              : /// Get a layer descriptor from a layer.
    1079              : pub(crate) trait AsLayerDesc {
    1080              :     /// Get the layer descriptor.
    1081              :     fn layer_desc(&self) -> &PersistentLayerDesc;
    1082              : }
    1083              : 
    1084              : pub mod tests {
    1085              :     use pageserver_api::shard::TenantShardId;
    1086              :     use utils::id::TimelineId;
    1087              : 
    1088              :     use super::*;
    1089              : 
    1090              :     impl From<DeltaLayerName> for PersistentLayerDesc {
    1091            0 :         fn from(value: DeltaLayerName) -> Self {
    1092            0 :             PersistentLayerDesc::new_delta(
    1093            0 :                 TenantShardId::from([0; 18]),
    1094            0 :                 TimelineId::from_array([0; 16]),
    1095            0 :                 value.key_range,
    1096            0 :                 value.lsn_range,
    1097            0 :                 233,
    1098            0 :             )
    1099            0 :         }
    1100              :     }
    1101              : 
    1102              :     impl From<ImageLayerName> for PersistentLayerDesc {
    1103            0 :         fn from(value: ImageLayerName) -> Self {
    1104            0 :             PersistentLayerDesc::new_img(
    1105            0 :                 TenantShardId::from([0; 18]),
    1106            0 :                 TimelineId::from_array([0; 16]),
    1107            0 :                 value.key_range,
    1108            0 :                 value.lsn,
    1109            0 :                 233,
    1110            0 :             )
    1111            0 :         }
    1112              :     }
    1113              : 
    1114              :     impl From<LayerName> for PersistentLayerDesc {
    1115            0 :         fn from(value: LayerName) -> Self {
    1116            0 :             match value {
    1117            0 :                 LayerName::Delta(d) => Self::from(d),
    1118            0 :                 LayerName::Image(i) => Self::from(i),
    1119              :             }
    1120            0 :         }
    1121              :     }
    1122              : }
    1123              : 
    1124              : /// Range wrapping newtype, which uses display to render Debug.
    1125              : ///
    1126              : /// Useful with `Key`, which has too verbose `{:?}` for printing multiple layers.
    1127              : struct RangeDisplayDebug<'a, T: std::fmt::Display>(&'a Range<T>);
    1128              : 
    1129              : impl<T: std::fmt::Display> std::fmt::Debug for RangeDisplayDebug<'_, T> {
    1130            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
    1131            0 :         write!(f, "{}..{}", self.0.start, self.0.end)
    1132            0 :     }
    1133              : }
    1134              : 
    1135              : #[cfg(test)]
    1136              : mod tests2 {
    1137              :     use pageserver_api::key::DBDIR_KEY;
    1138              :     use tracing::info;
    1139              : 
    1140              :     use super::*;
    1141              :     use crate::tenant::storage_layer::IoConcurrency;
    1142              : 
    1143              :     /// TODO: currently this test relies on manual visual inspection of the --no-capture output.
    1144              :     /// Should look like so:
    1145              :     /// ```text
    1146              :     /// RUST_LOG=trace cargo nextest run  --features testing  --no-capture test_io_concurrency_noise
    1147              :     /// running 1 test
    1148              :     /// 2025-01-21T17:42:01.335679Z  INFO get_vectored_concurrent_io test selected=SidecarTask
    1149              :     /// 2025-01-21T17:42:01.335680Z TRACE spawning sidecar task task_id=0
    1150              :     /// 2025-01-21T17:42:01.335937Z TRACE IoConcurrency_sidecar{task_id=0}: start
    1151              :     /// 2025-01-21T17:42:01.335972Z TRACE IoConcurrency_sidecar{task_id=0}: received new io future
    1152              :     /// 2025-01-21T17:42:01.335999Z  INFO IoConcurrency_sidecar{task_id=0}: waiting for signal to complete IO
    1153              :     /// 2025-01-21T17:42:01.336229Z  WARN dropping ValuesReconstructState while some IOs have not been completed num_active_ios=1 sidecar_task_id=Some(0) backtrace=   0: <pageserver::tenant::storage_layer::ValuesReconstructState as core::ops::drop::Drop>::drop
    1154              :     ///              at ./src/tenant/storage_layer.rs:553:24
    1155              :     ///    1: core::ptr::drop_in_place<pageserver::tenant::storage_layer::ValuesReconstructState>
    1156              :     ///              at /home/christian/.rustup/toolchains/1.84.0-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/ptr/mod.rs:521:1
    1157              :     ///    2: core::mem::drop
    1158              :     ///              at /home/christian/.rustup/toolchains/1.84.0-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/mem/mod.rs:942:24
    1159              :     ///    3: pageserver::tenant::storage_layer::tests2::test_io_concurrency_noise::{{closure}}
    1160              :     ///              at ./src/tenant/storage_layer.rs:1159:9
    1161              :     ///   ...
    1162              :     ///   49: <unknown>
    1163              :     /// 2025-01-21T17:42:01.452293Z  INFO IoConcurrency_sidecar{task_id=0}: completing IO
    1164              :     /// 2025-01-21T17:42:01.452357Z TRACE IoConcurrency_sidecar{task_id=0}: io future completed
    1165              :     /// 2025-01-21T17:42:01.452473Z TRACE IoConcurrency_sidecar{task_id=0}: end
    1166              :     /// test tenant::storage_layer::tests2::test_io_concurrency_noise ... ok
    1167              :     ///
    1168              :     /// ```
    1169              :     #[tokio::test]
    1170            4 :     async fn test_io_concurrency_noise() {
    1171            4 :         crate::tenant::harness::setup_logging();
    1172            4 : 
    1173            4 :         let io_concurrency = IoConcurrency::spawn_for_test();
    1174            4 :         match *io_concurrency {
    1175            4 :             IoConcurrency::Sequential => {
    1176            4 :                 // This test asserts behavior in sidecar mode, doesn't make sense in sequential mode.
    1177            4 :                 return;
    1178            4 :             }
    1179            4 :             IoConcurrency::SidecarTask { .. } => {}
    1180            2 :         }
    1181            2 :         let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
    1182            2 : 
    1183            2 :         let (io_fut_is_waiting_tx, io_fut_is_waiting) = tokio::sync::oneshot::channel();
    1184            2 :         let (do_complete_io, should_complete_io) = tokio::sync::oneshot::channel();
    1185            2 :         let (io_fut_exiting_tx, io_fut_exiting) = tokio::sync::oneshot::channel();
    1186            2 : 
    1187            2 :         let io = reconstruct_state.update_key(&DBDIR_KEY, Lsn(8), true);
    1188            2 :         reconstruct_state
    1189            2 :             .spawn_io(async move {
    1190            2 :                 info!("waiting for signal to complete IO");
    1191            4 :                 io_fut_is_waiting_tx.send(()).unwrap();
    1192            2 :                 should_complete_io.await.unwrap();
    1193            2 :                 info!("completing IO");
    1194            4 :                 io.complete(Ok(OnDiskValue::RawImage(Bytes::new())));
    1195            2 :                 io_fut_exiting_tx.send(()).unwrap();
    1196            2 :             })
    1197            2 :             .await;
    1198            4 : 
    1199            4 :         io_fut_is_waiting.await.unwrap();
    1200            2 : 
    1201            2 :         // this is what makes the noise
    1202            2 :         drop(reconstruct_state);
    1203            2 : 
    1204            2 :         do_complete_io.send(()).unwrap();
    1205            2 : 
    1206            2 :         io_fut_exiting.await.unwrap();
    1207            4 :     }
    1208              : }
        

Generated by: LCOV version 2.1-beta