LCOV - code coverage report
Current view: top level - pageserver/src/tenant/storage_layer - inmemory_layer.rs (source / functions) Coverage Total Hit
Test: 07bee600374ccd486c69370d0972d9035964fe68.info Lines: 89.8 % 527 473
Test Date: 2025-02-20 13:11:02 Functions: 83.7 % 49 41

            Line data    Source code
       1              : //! An in-memory layer stores recently received key-value pairs.
       2              : //!
       3              : //! The "in-memory" part of the name is a bit misleading: the actual page versions are
       4              : //! held in an ephemeral file, not in memory. The metadata for each page version, i.e.
       5              : //! its position in the file, is kept in memory, though.
       6              : //!
       7              : use crate::assert_u64_eq_usize::{u64_to_usize, U64IsUsize, UsizeIsU64};
       8              : use crate::config::PageServerConf;
       9              : use crate::context::{PageContentKind, RequestContext, RequestContextBuilder};
      10              : use crate::tenant::ephemeral_file::EphemeralFile;
      11              : use crate::tenant::storage_layer::{OnDiskValue, OnDiskValueIo};
      12              : use crate::tenant::timeline::GetVectoredError;
      13              : use crate::virtual_file::owned_buffers_io::io_buf_ext::IoBufExt;
      14              : use crate::{l0_flush, page_cache};
      15              : use anyhow::Result;
      16              : use camino::Utf8PathBuf;
      17              : use pageserver_api::key::CompactKey;
      18              : use pageserver_api::key::Key;
      19              : use pageserver_api::keyspace::KeySpace;
      20              : use pageserver_api::models::InMemoryLayerInfo;
      21              : use pageserver_api::shard::TenantShardId;
      22              : use std::collections::{BTreeMap, HashMap};
      23              : use std::sync::{Arc, OnceLock};
      24              : use std::time::Instant;
      25              : use tracing::*;
      26              : use utils::{id::TimelineId, lsn::Lsn, vec_map::VecMap};
      27              : use wal_decoder::serialized_batch::{SerializedValueBatch, SerializedValueMeta, ValueMeta};
      28              : // avoid binding to Write (conflicts with std::io::Write)
      29              : // while being able to use std::fmt::Write's methods
      30              : use crate::metrics::TIMELINE_EPHEMERAL_BYTES;
      31              : use std::cmp::Ordering;
      32              : use std::fmt::Write;
      33              : use std::ops::Range;
      34              : use std::sync::atomic::Ordering as AtomicOrdering;
      35              : use std::sync::atomic::{AtomicU64, AtomicUsize};
      36              : use tokio::sync::RwLock;
      37              : 
      38              : use super::{DeltaLayerWriter, PersistentLayerDesc, ValuesReconstructState};
      39              : 
      40              : pub(crate) mod vectored_dio_read;
      41              : 
      42              : #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
      43              : pub(crate) struct InMemoryLayerFileId(page_cache::FileId);
      44              : 
      45              : pub struct InMemoryLayer {
      46              :     conf: &'static PageServerConf,
      47              :     tenant_shard_id: TenantShardId,
      48              :     timeline_id: TimelineId,
      49              :     file_id: InMemoryLayerFileId,
      50              : 
      51              :     /// This layer contains all the changes from 'start_lsn'. The
      52              :     /// start is inclusive.
      53              :     start_lsn: Lsn,
      54              : 
      55              :     /// Frozen layers have an exclusive end LSN.
      56              :     /// Writes are only allowed when this is `None`.
      57              :     pub(crate) end_lsn: OnceLock<Lsn>,
      58              : 
      59              :     /// Used for traversal path. Cached representation of the in-memory layer after frozen.
      60              :     frozen_local_path_str: OnceLock<Arc<str>>,
      61              : 
      62              :     opened_at: Instant,
      63              : 
      64              :     /// The above fields never change, except for `end_lsn`, which is only set once.
      65              :     /// All other changing parts are in `inner`, and protected by a mutex.
      66              :     inner: RwLock<InMemoryLayerInner>,
      67              : 
      68              :     estimated_in_mem_size: AtomicU64,
      69              : }
      70              : 
      71              : impl std::fmt::Debug for InMemoryLayer {
      72            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      73            0 :         f.debug_struct("InMemoryLayer")
      74            0 :             .field("start_lsn", &self.start_lsn)
      75            0 :             .field("end_lsn", &self.end_lsn)
      76            0 :             .field("inner", &self.inner)
      77            0 :             .finish()
      78            0 :     }
      79              : }
      80              : 
      81              : pub struct InMemoryLayerInner {
      82              :     /// All versions of all pages in the layer are kept here. Indexed
      83              :     /// by block number and LSN. The [`IndexEntry`] is an offset into the
      84              :     /// ephemeral file where the page version is stored.
      85              :     index: BTreeMap<CompactKey, VecMap<Lsn, IndexEntry>>,
      86              : 
      87              :     /// The values are stored in a serialized format in this file.
      88              :     /// Each serialized Value is preceded by a 'u32' length field.
      89              :     /// PerSeg::page_versions map stores offsets into this file.
      90              :     file: EphemeralFile,
      91              : 
      92              :     resource_units: GlobalResourceUnits,
      93              : }
      94              : 
      95              : /// Support the same max blob length as blob_io, because ultimately
      96              : /// all the InMemoryLayer contents end up being written into a delta layer,
      97              : /// using the [`crate::tenant::blob_io`].
      98              : const MAX_SUPPORTED_BLOB_LEN: usize = crate::tenant::blob_io::MAX_SUPPORTED_BLOB_LEN;
      99              : const MAX_SUPPORTED_BLOB_LEN_BITS: usize = {
     100              :     let trailing_ones = MAX_SUPPORTED_BLOB_LEN.trailing_ones() as usize;
     101              :     let leading_zeroes = MAX_SUPPORTED_BLOB_LEN.leading_zeros() as usize;
     102              :     assert!(trailing_ones + leading_zeroes == std::mem::size_of::<usize>() * 8);
     103              :     trailing_ones
     104              : };
     105              : 
     106              : /// See [`InMemoryLayerInner::index`].
     107              : ///
     108              : /// For memory efficiency, the data is packed into a u64.
     109              : ///
     110              : /// Layout:
     111              : /// - 1 bit: `will_init`
     112              : /// - [`MAX_SUPPORTED_BLOB_LEN_BITS`][]: `len`
     113              : /// - [`MAX_SUPPORTED_POS_BITS`](IndexEntry::MAX_SUPPORTED_POS_BITS): `pos`
     114              : #[derive(Debug, Clone, Copy, PartialEq, Eq)]
     115              : pub struct IndexEntry(u64);
     116              : 
     117              : impl IndexEntry {
     118              :     /// See [`Self::MAX_SUPPORTED_POS`].
     119              :     const MAX_SUPPORTED_POS_BITS: usize = {
     120              :         let remainder = 64 - 1 - MAX_SUPPORTED_BLOB_LEN_BITS;
     121              :         if remainder < 32 {
     122              :             panic!("pos can be u32 as per type system, support that");
     123              :         }
     124              :         remainder
     125              :     };
     126              :     /// The maximum supported blob offset that can be represented by [`Self`].
     127              :     /// See also [`Self::validate_checkpoint_distance`].
     128              :     const MAX_SUPPORTED_POS: usize = (1 << Self::MAX_SUPPORTED_POS_BITS) - 1;
     129              : 
     130              :     // Layout
     131              :     const WILL_INIT_RANGE: Range<usize> = 0..1;
     132              :     const LEN_RANGE: Range<usize> =
     133              :         Self::WILL_INIT_RANGE.end..Self::WILL_INIT_RANGE.end + MAX_SUPPORTED_BLOB_LEN_BITS;
     134              :     const POS_RANGE: Range<usize> =
     135              :         Self::LEN_RANGE.end..Self::LEN_RANGE.end + Self::MAX_SUPPORTED_POS_BITS;
     136              :     const _ASSERT: () = {
     137              :         if Self::POS_RANGE.end != 64 {
     138              :             panic!("we don't want undefined bits for our own sanity")
     139              :         }
     140              :     };
     141              : 
     142              :     /// Fails if and only if the offset or length encoded in `arg` is too large to be represented by [`Self`].
     143              :     ///
     144              :     /// The only reason why that can happen in the system is if the [`InMemoryLayer`] grows too long.
     145              :     /// The [`InMemoryLayer`] size is determined by the checkpoint distance, enforced by [`crate::tenant::Timeline::should_roll`].
     146              :     ///
     147              :     /// Thus, to avoid failure of this function, whenever we start up and/or change checkpoint distance,
     148              :     /// call [`Self::validate_checkpoint_distance`] with the new checkpoint distance value.
     149              :     ///
     150              :     /// TODO: this check should happen ideally at config parsing time (and in the request handler when a change to checkpoint distance is requested)
     151              :     /// When cleaning this up, also look into the s3 max file size check that is performed in delta layer writer.
     152              :     #[inline(always)]
     153     10181460 :     fn new(arg: IndexEntryNewArgs) -> anyhow::Result<Self> {
     154     10181460 :         let IndexEntryNewArgs {
     155     10181460 :             base_offset,
     156     10181460 :             batch_offset,
     157     10181460 :             len,
     158     10181460 :             will_init,
     159     10181460 :         } = arg;
     160              : 
     161     10181460 :         let pos = base_offset
     162     10181460 :             .checked_add(batch_offset)
     163     10181460 :             .ok_or_else(|| anyhow::anyhow!("base_offset + batch_offset overflows u64: base_offset={base_offset} batch_offset={batch_offset}"))?;
     164              : 
     165     10181460 :         if pos.into_usize() > Self::MAX_SUPPORTED_POS {
     166           16 :             anyhow::bail!(
     167           16 :                 "base_offset+batch_offset exceeds the maximum supported value: base_offset={base_offset} batch_offset={batch_offset} (+)={pos} max={max}",
     168           16 :                 max = Self::MAX_SUPPORTED_POS
     169           16 :             );
     170     10181444 :         }
     171     10181444 : 
     172     10181444 :         if len > MAX_SUPPORTED_BLOB_LEN {
     173            4 :             anyhow::bail!(
     174            4 :                 "len exceeds the maximum supported length: len={len} max={MAX_SUPPORTED_BLOB_LEN}",
     175            4 :             );
     176     10181440 :         }
     177     10181440 : 
     178     10181440 :         let mut data: u64 = 0;
     179              :         use bit_field::BitField;
     180     10181440 :         data.set_bits(Self::WILL_INIT_RANGE, if will_init { 1 } else { 0 });
     181     10181440 :         data.set_bits(Self::LEN_RANGE, len.into_u64());
     182     10181440 :         data.set_bits(Self::POS_RANGE, pos);
     183     10181440 : 
     184     10181440 :         Ok(Self(data))
     185     10181460 :     }
     186              : 
     187              :     #[inline(always)]
     188      9769451 :     fn unpack(&self) -> IndexEntryUnpacked {
     189              :         use bit_field::BitField;
     190      9769451 :         IndexEntryUnpacked {
     191      9769451 :             will_init: self.0.get_bits(Self::WILL_INIT_RANGE) != 0,
     192      9769451 :             len: self.0.get_bits(Self::LEN_RANGE),
     193      9769451 :             pos: self.0.get_bits(Self::POS_RANGE),
     194      9769451 :         }
     195      9769451 :     }
     196              : 
     197              :     /// See [`Self::new`].
     198          480 :     pub(crate) const fn validate_checkpoint_distance(
     199          480 :         checkpoint_distance: u64,
     200          480 :     ) -> Result<(), &'static str> {
     201          480 :         if checkpoint_distance > Self::MAX_SUPPORTED_POS as u64 {
     202            0 :             return Err("exceeds the maximum supported value");
     203          480 :         }
     204          480 :         let res = u64_to_usize(checkpoint_distance).checked_add(MAX_SUPPORTED_BLOB_LEN);
     205          480 :         if res.is_none() {
     206            0 :             return Err(
     207            0 :                 "checkpoint distance + max supported blob len overflows in-memory addition",
     208            0 :             );
     209          480 :         }
     210          480 : 
     211          480 :         // NB: it is ok for the result of the addition to be larger than MAX_SUPPORTED_POS
     212          480 : 
     213          480 :         Ok(())
     214          480 :     }
     215              : 
     216              :     const _ASSERT_DEFAULT_CHECKPOINT_DISTANCE_IS_VALID: () = {
     217              :         let res = Self::validate_checkpoint_distance(
     218              :             pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE,
     219              :         );
     220              :         if res.is_err() {
     221              :             panic!("default checkpoint distance is valid")
     222              :         }
     223              :     };
     224              : }
     225              : 
     226              : /// Args to [`IndexEntry::new`].
     227              : #[derive(Clone, Copy)]
     228              : struct IndexEntryNewArgs {
     229              :     base_offset: u64,
     230              :     batch_offset: u64,
     231              :     len: usize,
     232              :     will_init: bool,
     233              : }
     234              : 
     235              : /// Unpacked representation of the bitfielded [`IndexEntry`].
     236              : #[derive(Clone, Copy, PartialEq, Eq, Debug)]
     237              : struct IndexEntryUnpacked {
     238              :     will_init: bool,
     239              :     len: u64,
     240              :     pos: u64,
     241              : }
     242              : 
     243              : impl std::fmt::Debug for InMemoryLayerInner {
     244            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     245            0 :         f.debug_struct("InMemoryLayerInner").finish()
     246            0 :     }
     247              : }
     248              : 
     249              : /// State shared by all in-memory (ephemeral) layers.  Updated infrequently during background ticks in Timeline,
     250              : /// to minimize contention.
     251              : ///
     252              : /// This global state is used to implement behaviors that require a global view of the system, e.g.
     253              : /// rolling layers proactively to limit the total amount of dirty data.
     254              : pub(crate) struct GlobalResources {
     255              :     // Limit on how high dirty_bytes may grow before we start freezing layers to reduce it.
     256              :     // Zero means unlimited.
     257              :     pub(crate) max_dirty_bytes: AtomicU64,
     258              :     // How many bytes are in all EphemeralFile objects
     259              :     dirty_bytes: AtomicU64,
     260              :     // How many layers are contributing to dirty_bytes
     261              :     dirty_layers: AtomicUsize,
     262              : }
     263              : 
     264              : // Per-timeline RAII struct for its contribution to [`GlobalResources`]
     265              : struct GlobalResourceUnits {
     266              :     // How many dirty bytes have I added to the global dirty_bytes: this guard object is responsible
     267              :     // for decrementing the global counter by this many bytes when dropped.
     268              :     dirty_bytes: u64,
     269              : }
     270              : 
     271              : impl GlobalResourceUnits {
     272              :     // Hint for the layer append path to update us when the layer size differs from the last
     273              :     // call to update_size by this much.  If we don't reach this threshold, we'll still get
     274              :     // updated when the Timeline "ticks" in the background.
     275              :     const MAX_SIZE_DRIFT: u64 = 10 * 1024 * 1024;
     276              : 
     277         2596 :     fn new() -> Self {
     278         2596 :         GLOBAL_RESOURCES
     279         2596 :             .dirty_layers
     280         2596 :             .fetch_add(1, AtomicOrdering::Relaxed);
     281         2596 :         Self { dirty_bytes: 0 }
     282         2596 :     }
     283              : 
     284              :     /// Do not call this frequently: all timelines will write to these same global atomics,
     285              :     /// so this is a relatively expensive operation.  Wait at least a few seconds between calls.
     286              :     ///
     287              :     /// Returns the effective layer size limit that should be applied, if any, to keep
     288              :     /// the total number of dirty bytes below the configured maximum.
     289         2368 :     fn publish_size(&mut self, size: u64) -> Option<u64> {
     290         2368 :         let new_global_dirty_bytes = match size.cmp(&self.dirty_bytes) {
     291         2348 :             Ordering::Equal => GLOBAL_RESOURCES.dirty_bytes.load(AtomicOrdering::Relaxed),
     292              :             Ordering::Greater => {
     293           16 :                 let delta = size - self.dirty_bytes;
     294           16 :                 let old = GLOBAL_RESOURCES
     295           16 :                     .dirty_bytes
     296           16 :                     .fetch_add(delta, AtomicOrdering::Relaxed);
     297           16 :                 old + delta
     298              :             }
     299              :             Ordering::Less => {
     300            4 :                 let delta = self.dirty_bytes - size;
     301            4 :                 let old = GLOBAL_RESOURCES
     302            4 :                     .dirty_bytes
     303            4 :                     .fetch_sub(delta, AtomicOrdering::Relaxed);
     304            4 :                 old - delta
     305              :             }
     306              :         };
     307              : 
     308              :         // This is a sloppy update: concurrent updates to the counter will race, and the exact
     309              :         // value of the metric might not be the exact latest value of GLOBAL_RESOURCES::dirty_bytes.
     310              :         // That's okay: as long as the metric contains some recent value, it doesn't have to always
     311              :         // be literally the last update.
     312         2368 :         TIMELINE_EPHEMERAL_BYTES.set(new_global_dirty_bytes);
     313         2368 : 
     314         2368 :         self.dirty_bytes = size;
     315         2368 : 
     316         2368 :         let max_dirty_bytes = GLOBAL_RESOURCES
     317         2368 :             .max_dirty_bytes
     318         2368 :             .load(AtomicOrdering::Relaxed);
     319         2368 :         if max_dirty_bytes > 0 && new_global_dirty_bytes > max_dirty_bytes {
     320              :             // Set the layer file limit to the average layer size: this implies that all above-average
     321              :             // sized layers will be elegible for freezing.  They will be frozen in the order they
     322              :             // next enter publish_size.
     323            0 :             Some(
     324            0 :                 new_global_dirty_bytes
     325            0 :                     / GLOBAL_RESOURCES.dirty_layers.load(AtomicOrdering::Relaxed) as u64,
     326            0 :             )
     327              :         } else {
     328         2368 :             None
     329              :         }
     330         2368 :     }
     331              : 
     332              :     // Call publish_size if the input size differs from last published size by more than
     333              :     // the drift limit
     334      9608464 :     fn maybe_publish_size(&mut self, size: u64) {
     335      9608464 :         let publish = match size.cmp(&self.dirty_bytes) {
     336            0 :             Ordering::Equal => false,
     337      9608464 :             Ordering::Greater => size - self.dirty_bytes > Self::MAX_SIZE_DRIFT,
     338            0 :             Ordering::Less => self.dirty_bytes - size > Self::MAX_SIZE_DRIFT,
     339              :         };
     340              : 
     341      9608464 :         if publish {
     342           16 :             self.publish_size(size);
     343      9608448 :         }
     344      9608464 :     }
     345              : }
     346              : 
     347              : impl Drop for GlobalResourceUnits {
     348         2352 :     fn drop(&mut self) {
     349         2352 :         GLOBAL_RESOURCES
     350         2352 :             .dirty_layers
     351         2352 :             .fetch_sub(1, AtomicOrdering::Relaxed);
     352         2352 : 
     353         2352 :         // Subtract our contribution to the global total dirty bytes
     354         2352 :         self.publish_size(0);
     355         2352 :     }
     356              : }
     357              : 
     358              : pub(crate) static GLOBAL_RESOURCES: GlobalResources = GlobalResources {
     359              :     max_dirty_bytes: AtomicU64::new(0),
     360              :     dirty_bytes: AtomicU64::new(0),
     361              :     dirty_layers: AtomicUsize::new(0),
     362              : };
     363              : 
     364              : impl InMemoryLayer {
     365      1213347 :     pub(crate) fn file_id(&self) -> InMemoryLayerFileId {
     366      1213347 :         self.file_id
     367      1213347 :     }
     368              : 
     369         2348 :     pub(crate) fn get_timeline_id(&self) -> TimelineId {
     370         2348 :         self.timeline_id
     371         2348 :     }
     372              : 
     373         4696 :     pub(crate) fn info(&self) -> InMemoryLayerInfo {
     374         4696 :         let lsn_start = self.start_lsn;
     375              : 
     376         4696 :         if let Some(&lsn_end) = self.end_lsn.get() {
     377         4696 :             InMemoryLayerInfo::Frozen { lsn_start, lsn_end }
     378              :         } else {
     379            0 :             InMemoryLayerInfo::Open { lsn_start }
     380              :         }
     381         4696 :     }
     382              : 
     383         4696 :     pub(crate) fn try_len(&self) -> Option<u64> {
     384         4696 :         self.inner.try_read().map(|i| i.file.len()).ok()
     385         4696 :     }
     386              : 
     387      9608464 :     pub(crate) fn assert_writable(&self) {
     388      9608464 :         assert!(self.end_lsn.get().is_none());
     389      9608464 :     }
     390              : 
     391      4259800 :     pub(crate) fn end_lsn_or_max(&self) -> Lsn {
     392      4259800 :         self.end_lsn.get().copied().unwrap_or(Lsn::MAX)
     393      4259800 :     }
     394              : 
     395      4257452 :     pub(crate) fn get_lsn_range(&self) -> Range<Lsn> {
     396      4257452 :         self.start_lsn..self.end_lsn_or_max()
     397      4257452 :     }
     398              : 
     399              :     /// debugging function to print out the contents of the layer
     400              :     ///
     401              :     /// this is likely completly unused
     402            0 :     pub async fn dump(&self, _verbose: bool, _ctx: &RequestContext) -> Result<()> {
     403            0 :         let end_str = self.end_lsn_or_max();
     404            0 : 
     405            0 :         println!(
     406            0 :             "----- in-memory layer for tli {} LSNs {}-{} ----",
     407            0 :             self.timeline_id, self.start_lsn, end_str,
     408            0 :         );
     409            0 : 
     410            0 :         Ok(())
     411            0 :     }
     412              : 
     413              :     // Look up the keys in the provided keyspace and update
     414              :     // the reconstruct state with whatever is found.
     415      1213347 :     pub(crate) async fn get_values_reconstruct_data(
     416      1213347 :         self: &Arc<InMemoryLayer>,
     417      1213347 :         keyspace: KeySpace,
     418      1213347 :         end_lsn: Lsn,
     419      1213347 :         reconstruct_state: &mut ValuesReconstructState,
     420      1213347 :         ctx: &RequestContext,
     421      1213347 :     ) -> Result<(), GetVectoredError> {
     422      1213347 :         let ctx = RequestContextBuilder::extend(ctx)
     423      1213347 :             .page_content_kind(PageContentKind::InMemoryLayer)
     424      1213347 :             .build();
     425              : 
     426      1213347 :         let inner = self.inner.read().await;
     427              : 
     428              :         struct ValueRead {
     429              :             entry_lsn: Lsn,
     430              :             read: vectored_dio_read::LogicalRead<Vec<u8>>,
     431              :         }
     432      1213347 :         let mut reads: HashMap<Key, Vec<ValueRead>> = HashMap::new();
     433      1213347 :         let mut ios: HashMap<(Key, Lsn), OnDiskValueIo> = Default::default();
     434      1213347 : 
     435      1213347 :         let lsn_range = self.start_lsn..end_lsn;
     436              : 
     437      1213759 :         for range in keyspace.ranges.iter() {
     438      1213759 :             for (key, vec_map) in inner
     439      1213759 :                 .index
     440      1213759 :                 .range(range.start.to_compact()..range.end.to_compact())
     441              :             {
     442       998347 :                 let key = Key::from_compact(*key);
     443       998347 :                 let slice = vec_map.slice_range(lsn_range.clone());
     444              : 
     445       998347 :                 for (entry_lsn, index_entry) in slice.iter().rev() {
     446              :                     let IndexEntryUnpacked {
     447       998339 :                         pos,
     448       998339 :                         len,
     449       998339 :                         will_init,
     450       998339 :                     } = index_entry.unpack();
     451       998339 : 
     452       998339 :                     reads.entry(key).or_default().push(ValueRead {
     453       998339 :                         entry_lsn: *entry_lsn,
     454       998339 :                         read: vectored_dio_read::LogicalRead::new(
     455       998339 :                             pos,
     456       998339 :                             Vec::with_capacity(len as usize),
     457       998339 :                         ),
     458       998339 :                     });
     459       998339 : 
     460       998339 :                     let io = reconstruct_state.update_key(&key, *entry_lsn, will_init);
     461       998339 :                     ios.insert((key, *entry_lsn), io);
     462       998339 : 
     463       998339 :                     if will_init {
     464       998339 :                         break;
     465            0 :                     }
     466              :                 }
     467              :             }
     468              :         }
     469      1213347 :         drop(inner); // release the lock before we spawn the IO; if it's serial-mode IO we will deadlock on the read().await below
     470      1213347 :         let read_from = Arc::clone(self);
     471      1213347 :         let read_ctx = ctx.attached_child();
     472      1213347 :         reconstruct_state
     473      1213347 :             .spawn_io(async move {
     474      1213347 :                 let inner = read_from.inner.read().await;
     475      1213347 :                 let f = vectored_dio_read::execute(
     476      1213347 :                     &inner.file,
     477      1213347 :                     reads
     478      1213347 :                         .iter()
     479      1213347 :                         .flat_map(|(_, value_reads)| value_reads.iter().map(|v| &v.read)),
     480      1213347 :                     &read_ctx,
     481      1213347 :                 );
     482      1213347 :                 send_future::SendFuture::send(f) // https://github.com/rust-lang/rust/issues/96865
     483      1213347 :                     .await;
     484              : 
     485      2211686 :                 for (key, value_reads) in reads {
     486      1996678 :                     for ValueRead { entry_lsn, read } in value_reads {
     487       998339 :                         let io = ios.remove(&(key, entry_lsn)).expect("sender must exist");
     488       998339 :                         match read.into_result().expect("we run execute() above") {
     489            0 :                             Err(e) => {
     490            0 :                                 io.complete(Err(std::io::Error::new(
     491            0 :                                     e.kind(),
     492            0 :                                     "dio vec read failed",
     493            0 :                                 )));
     494            0 :                             }
     495       998339 :                             Ok(value_buf) => {
     496       998339 :                                 io.complete(Ok(OnDiskValue::WalRecordOrImage(value_buf.into())));
     497       998339 :                             }
     498              :                         }
     499              :                     }
     500              :                 }
     501              : 
     502      1213347 :                 assert!(ios.is_empty());
     503              : 
     504              :                 // Keep layer existent until this IO is done;
     505              :                 // This is kinda forced for InMemoryLayer because we need to inner.read() anyway,
     506              :                 // but it's less obvious for DeltaLayer and ImageLayer. So, keep this explicit
     507              :                 // drop for consistency among all three layer types.
     508      1213347 :                 drop(inner);
     509      1213347 :                 drop(read_from);
     510      1213347 :             })
     511      1213347 :             .await;
     512              : 
     513      1213347 :         Ok(())
     514      1213347 :     }
     515              : }
     516              : 
     517         4696 : fn inmem_layer_display(mut f: impl Write, start_lsn: Lsn, end_lsn: Lsn) -> std::fmt::Result {
     518         4696 :     write!(f, "inmem-{:016X}-{:016X}", start_lsn.0, end_lsn.0)
     519         4696 : }
     520              : 
     521         2348 : fn inmem_layer_log_display(
     522         2348 :     mut f: impl Write,
     523         2348 :     timeline: TimelineId,
     524         2348 :     start_lsn: Lsn,
     525         2348 :     end_lsn: Lsn,
     526         2348 : ) -> std::fmt::Result {
     527         2348 :     write!(f, "timeline {} in-memory ", timeline)?;
     528         2348 :     inmem_layer_display(f, start_lsn, end_lsn)
     529         2348 : }
     530              : 
     531              : impl std::fmt::Display for InMemoryLayer {
     532         2348 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     533         2348 :         let end_lsn = self.end_lsn_or_max();
     534         2348 :         inmem_layer_display(f, self.start_lsn, end_lsn)
     535         2348 :     }
     536              : }
     537              : 
     538              : impl InMemoryLayer {
     539              :     /// Get layer size.
     540         2596 :     pub async fn size(&self) -> Result<u64> {
     541         2596 :         let inner = self.inner.read().await;
     542         2596 :         Ok(inner.file.len())
     543         2596 :     }
     544              : 
     545         2426 :     pub fn estimated_in_mem_size(&self) -> u64 {
     546         2426 :         self.estimated_in_mem_size.load(AtomicOrdering::Relaxed)
     547         2426 :     }
     548              : 
     549              :     /// Create a new, empty, in-memory layer
     550         2596 :     pub async fn create(
     551         2596 :         conf: &'static PageServerConf,
     552         2596 :         timeline_id: TimelineId,
     553         2596 :         tenant_shard_id: TenantShardId,
     554         2596 :         start_lsn: Lsn,
     555         2596 :         gate: &utils::sync::gate::Gate,
     556         2596 :         ctx: &RequestContext,
     557         2596 :     ) -> Result<InMemoryLayer> {
     558         2596 :         trace!("initializing new empty InMemoryLayer for writing on timeline {timeline_id} at {start_lsn}");
     559              : 
     560         2596 :         let file = EphemeralFile::create(conf, tenant_shard_id, timeline_id, gate, ctx).await?;
     561         2596 :         let key = InMemoryLayerFileId(file.page_cache_file_id());
     562         2596 : 
     563         2596 :         Ok(InMemoryLayer {
     564         2596 :             file_id: key,
     565         2596 :             frozen_local_path_str: OnceLock::new(),
     566         2596 :             conf,
     567         2596 :             timeline_id,
     568         2596 :             tenant_shard_id,
     569         2596 :             start_lsn,
     570         2596 :             end_lsn: OnceLock::new(),
     571         2596 :             opened_at: Instant::now(),
     572         2596 :             inner: RwLock::new(InMemoryLayerInner {
     573         2596 :                 index: BTreeMap::new(),
     574         2596 :                 file,
     575         2596 :                 resource_units: GlobalResourceUnits::new(),
     576         2596 :             }),
     577         2596 :             estimated_in_mem_size: AtomicU64::new(0),
     578         2596 :         })
     579         2596 :     }
     580              : 
     581              :     /// Write path.
     582              :     ///
     583              :     /// Errors are not retryable, the [`InMemoryLayer`] must be discarded, and not be read from.
     584              :     /// The reason why it's not retryable is that the [`EphemeralFile`] writes are not retryable.
     585              :     /// TODO: it can be made retryable if we aborted the process on EphemeralFile write errors.
     586      9608464 :     pub async fn put_batch(
     587      9608464 :         &self,
     588      9608464 :         serialized_batch: SerializedValueBatch,
     589      9608464 :         ctx: &RequestContext,
     590      9608464 :     ) -> anyhow::Result<()> {
     591      9608464 :         let mut inner = self.inner.write().await;
     592      9608464 :         self.assert_writable();
     593      9608464 : 
     594      9608464 :         let base_offset = inner.file.len();
     595      9608464 : 
     596      9608464 :         let SerializedValueBatch {
     597      9608464 :             raw,
     598      9608464 :             metadata,
     599      9608464 :             max_lsn: _,
     600      9608464 :             len: _,
     601      9608464 :         } = serialized_batch;
     602      9608464 : 
     603      9608464 :         // Write the batch to the file
     604      9608464 :         inner.file.write_raw(&raw, ctx).await?;
     605      9608464 :         let new_size = inner.file.len();
     606      9608464 : 
     607      9608464 :         let expected_new_len = base_offset
     608      9608464 :             .checked_add(raw.len().into_u64())
     609      9608464 :             // write_raw would error if we were to overflow u64.
     610      9608464 :             // also IndexEntry and higher levels in
     611      9608464 :             //the code don't allow the file to grow that large
     612      9608464 :             .unwrap();
     613      9608464 :         assert_eq!(new_size, expected_new_len);
     614              : 
     615              :         // Update the index with the new entries
     616     19789824 :         for meta in metadata {
     617              :             let SerializedValueMeta {
     618     10181360 :                 key,
     619     10181360 :                 lsn,
     620     10181360 :                 batch_offset,
     621     10181360 :                 len,
     622     10181360 :                 will_init,
     623     10181360 :             } = match meta {
     624     10181360 :                 ValueMeta::Serialized(ser) => ser,
     625              :                 ValueMeta::Observed(_) => {
     626            0 :                     continue;
     627              :                 }
     628              :             };
     629              : 
     630              :             // Add the base_offset to the batch's index entries which are relative to the batch start.
     631     10181360 :             let index_entry = IndexEntry::new(IndexEntryNewArgs {
     632     10181360 :                 base_offset,
     633     10181360 :                 batch_offset,
     634     10181360 :                 len,
     635     10181360 :                 will_init,
     636     10181360 :             })?;
     637              : 
     638     10181360 :             let vec_map = inner.index.entry(key).or_default();
     639     10181360 :             let old = vec_map.append_or_update_last(lsn, index_entry).unwrap().0;
     640     10181360 :             if old.is_some() {
     641              :                 // This should not break anything, but is unexpected: ingestion code aims to filter out
     642              :                 // multiple writes to the same key at the same LSN.  This happens in cases where our
     643              :                 // ingenstion code generates some write like an empty page, and we see a write from postgres
     644              :                 // to the same key in the same wal record.  If one such write makes it through, we
     645              :                 // index the most recent write, implicitly ignoring the earlier write.  We log a warning
     646              :                 // because this case is unexpected, and we would like tests to fail if this happens.
     647            0 :                 warn!("Key {} at {} written twice at same LSN", key, lsn);
     648     10181360 :             }
     649     10181360 :             self.estimated_in_mem_size.fetch_add(
     650     10181360 :                 (std::mem::size_of::<CompactKey>()
     651     10181360 :                     + std::mem::size_of::<Lsn>()
     652     10181360 :                     + std::mem::size_of::<IndexEntry>()) as u64,
     653     10181360 :                 AtomicOrdering::Relaxed,
     654     10181360 :             );
     655              :         }
     656              : 
     657      9608464 :         inner.resource_units.maybe_publish_size(new_size);
     658      9608464 : 
     659      9608464 :         Ok(())
     660      9608464 :     }
     661              : 
     662      9606020 :     pub(crate) fn get_opened_at(&self) -> Instant {
     663      9606020 :         self.opened_at
     664      9606020 :     }
     665              : 
     666            0 :     pub(crate) async fn tick(&self) -> Option<u64> {
     667            0 :         let mut inner = self.inner.write().await;
     668            0 :         let size = inner.file.len();
     669            0 :         inner.resource_units.publish_size(size)
     670            0 :     }
     671              : 
     672            4 :     pub(crate) async fn put_tombstones(&self, _key_ranges: &[(Range<Key>, Lsn)]) -> Result<()> {
     673            4 :         // TODO: Currently, we just leak the storage for any deleted keys
     674            4 :         Ok(())
     675            4 :     }
     676              : 
     677              :     /// Records the end_lsn for non-dropped layers.
     678              :     /// `end_lsn` is exclusive
     679         2348 :     pub async fn freeze(&self, end_lsn: Lsn) {
     680         2348 :         assert!(
     681         2348 :             self.start_lsn < end_lsn,
     682            0 :             "{} >= {}",
     683              :             self.start_lsn,
     684              :             end_lsn
     685              :         );
     686         2348 :         self.end_lsn.set(end_lsn).expect("end_lsn set only once");
     687         2348 : 
     688         2348 :         self.frozen_local_path_str
     689         2348 :             .set({
     690         2348 :                 let mut buf = String::new();
     691         2348 :                 inmem_layer_log_display(&mut buf, self.get_timeline_id(), self.start_lsn, end_lsn)
     692         2348 :                     .unwrap();
     693         2348 :                 buf.into()
     694         2348 :             })
     695         2348 :             .expect("frozen_local_path_str set only once");
     696              : 
     697              :         #[cfg(debug_assertions)]
     698              :         {
     699         2348 :             let inner = self.inner.write().await;
     700      8511946 :             for vec_map in inner.index.values() {
     701      8773880 :                 for (lsn, _) in vec_map.as_slice() {
     702      8773880 :                     assert!(*lsn < end_lsn);
     703              :                 }
     704              :             }
     705              :         }
     706         2348 :     }
     707              : 
     708              :     /// Write this frozen in-memory layer to disk. If `key_range` is set, the delta
     709              :     /// layer will only contain the key range the user specifies, and may return `None`
     710              :     /// if there are no matching keys.
     711              :     ///
     712              :     /// Returns a new delta layer with all the same data as this in-memory layer
     713         1936 :     pub async fn write_to_disk(
     714         1936 :         &self,
     715         1936 :         ctx: &RequestContext,
     716         1936 :         key_range: Option<Range<Key>>,
     717         1936 :         l0_flush_global_state: &l0_flush::Inner,
     718         1936 :     ) -> Result<Option<(PersistentLayerDesc, Utf8PathBuf)>> {
     719              :         // Grab the lock in read-mode. We hold it over the I/O, but because this
     720              :         // layer is not writeable anymore, no one should be trying to acquire the
     721              :         // write lock on it, so we shouldn't block anyone. There's one exception
     722              :         // though: another thread might have grabbed a reference to this layer
     723              :         // in `get_layer_for_write' just before the checkpointer called
     724              :         // `freeze`, and then `write_to_disk` on it. When the thread gets the
     725              :         // lock, it will see that it's not writeable anymore and retry, but it
     726              :         // would have to wait until we release it. That race condition is very
     727              :         // rare though, so we just accept the potential latency hit for now.
     728         1936 :         let inner = self.inner.read().await;
     729              : 
     730              :         use l0_flush::Inner;
     731         1936 :         let _concurrency_permit = match l0_flush_global_state {
     732         1936 :             Inner::Direct { semaphore, .. } => Some(semaphore.acquire().await),
     733              :         };
     734              : 
     735         1936 :         let end_lsn = *self.end_lsn.get().unwrap();
     736              : 
     737         1936 :         let key_count = if let Some(key_range) = key_range {
     738            0 :             let key_range = key_range.start.to_compact()..key_range.end.to_compact();
     739            0 : 
     740            0 :             inner
     741            0 :                 .index
     742            0 :                 .iter()
     743            0 :                 .filter(|(k, _)| key_range.contains(k))
     744            0 :                 .count()
     745              :         } else {
     746         1936 :             inner.index.len()
     747              :         };
     748         1936 :         if key_count == 0 {
     749            0 :             return Ok(None);
     750         1936 :         }
     751              : 
     752         1936 :         let mut delta_layer_writer = DeltaLayerWriter::new(
     753         1936 :             self.conf,
     754         1936 :             self.timeline_id,
     755         1936 :             self.tenant_shard_id,
     756         1936 :             Key::MIN,
     757         1936 :             self.start_lsn..end_lsn,
     758         1936 :             ctx,
     759         1936 :         )
     760         1936 :         .await?;
     761              : 
     762         1936 :         match l0_flush_global_state {
     763              :             l0_flush::Inner::Direct { .. } => {
     764         1936 :                 let file_contents = inner.file.load_to_io_buf(ctx).await?;
     765         1936 :                 let file_contents = file_contents.freeze();
     766              : 
     767      8509098 :                 for (key, vec_map) in inner.index.iter() {
     768              :                     // Write all page versions
     769      8771032 :                     for (lsn, entry) in vec_map
     770      8509098 :                         .as_slice()
     771      8509098 :                         .iter()
     772      8771032 :                         .map(|(lsn, entry)| (lsn, entry.unpack()))
     773              :                     {
     774              :                         let IndexEntryUnpacked {
     775      8771032 :                             pos,
     776      8771032 :                             len,
     777      8771032 :                             will_init,
     778      8771032 :                         } = entry;
     779      8771032 :                         let buf = file_contents.slice(pos as usize..(pos + len) as usize);
     780      8771032 :                         let (_buf, res) = delta_layer_writer
     781      8771032 :                             .put_value_bytes(
     782      8771032 :                                 Key::from_compact(*key),
     783      8771032 :                                 *lsn,
     784      8771032 :                                 buf.slice_len(),
     785      8771032 :                                 will_init,
     786      8771032 :                                 ctx,
     787      8771032 :                             )
     788      8771032 :                             .await;
     789      8771032 :                         res?;
     790              :                     }
     791              :                 }
     792              :             }
     793              :         }
     794              : 
     795              :         // MAX is used here because we identify L0 layers by full key range
     796         1936 :         let (desc, path) = delta_layer_writer.finish(Key::MAX, ctx).await?;
     797              : 
     798              :         // Hold the permit until all the IO is done, including the fsync in `delta_layer_writer.finish()``.
     799              :         //
     800              :         // If we didn't and our caller drops this future, tokio-epoll-uring would extend the lifetime of
     801              :         // the `file_contents: Vec<u8>` until the IO is done, but not the permit's lifetime.
     802              :         // Thus, we'd have more concurrenct `Vec<u8>` in existence than the semaphore allows.
     803              :         //
     804              :         // We hold across the fsync so that on ext4 mounted with data=ordered, all the kernel page cache pages
     805              :         // we dirtied when writing to the filesystem have been flushed and marked !dirty.
     806         1936 :         drop(_concurrency_permit);
     807         1936 : 
     808         1936 :         Ok(Some((desc, path)))
     809         1936 :     }
     810              : }
     811              : 
     812              : #[cfg(test)]
     813              : mod tests {
     814              :     use super::*;
     815              : 
     816              :     #[test]
     817            4 :     fn test_index_entry() {
     818              :         const MAX_SUPPORTED_POS: usize = IndexEntry::MAX_SUPPORTED_POS;
     819              :         use IndexEntryNewArgs as Args;
     820              :         use IndexEntryUnpacked as Unpacked;
     821              : 
     822           80 :         let roundtrip = |args, expect: Unpacked| {
     823           80 :             let res = IndexEntry::new(args).expect("this tests expects no errors");
     824           80 :             let IndexEntryUnpacked {
     825           80 :                 will_init,
     826           80 :                 len,
     827           80 :                 pos,
     828           80 :             } = res.unpack();
     829           80 :             assert_eq!(will_init, expect.will_init);
     830           80 :             assert_eq!(len, expect.len);
     831           80 :             assert_eq!(pos, expect.pos);
     832           80 :         };
     833              : 
     834              :         // basic roundtrip
     835           12 :         for pos in [0, MAX_SUPPORTED_POS] {
     836           24 :             for len in [0, MAX_SUPPORTED_BLOB_LEN] {
     837           48 :                 for will_init in [true, false] {
     838           32 :                     let expect = Unpacked {
     839           32 :                         will_init,
     840           32 :                         len: len.into_u64(),
     841           32 :                         pos: pos.into_u64(),
     842           32 :                     };
     843           32 :                     roundtrip(
     844           32 :                         Args {
     845           32 :                             will_init,
     846           32 :                             base_offset: pos.into_u64(),
     847           32 :                             batch_offset: 0,
     848           32 :                             len,
     849           32 :                         },
     850           32 :                         expect,
     851           32 :                     );
     852           32 :                     roundtrip(
     853           32 :                         Args {
     854           32 :                             will_init,
     855           32 :                             base_offset: 0,
     856           32 :                             batch_offset: pos.into_u64(),
     857           32 :                             len,
     858           32 :                         },
     859           32 :                         expect,
     860           32 :                     );
     861           32 :                 }
     862              :             }
     863              :         }
     864              : 
     865              :         // too-large len
     866            4 :         let too_large = Args {
     867            4 :             will_init: false,
     868            4 :             len: MAX_SUPPORTED_BLOB_LEN + 1,
     869            4 :             base_offset: 0,
     870            4 :             batch_offset: 0,
     871            4 :         };
     872            4 :         assert!(IndexEntry::new(too_large).is_err());
     873              : 
     874              :         // too-large pos
     875              :         {
     876            4 :             let too_large = Args {
     877            4 :                 will_init: false,
     878            4 :                 len: 0,
     879            4 :                 base_offset: MAX_SUPPORTED_POS.into_u64() + 1,
     880            4 :                 batch_offset: 0,
     881            4 :             };
     882            4 :             assert!(IndexEntry::new(too_large).is_err());
     883            4 :             let too_large = Args {
     884            4 :                 will_init: false,
     885            4 :                 len: 0,
     886            4 :                 base_offset: 0,
     887            4 :                 batch_offset: MAX_SUPPORTED_POS.into_u64() + 1,
     888            4 :             };
     889            4 :             assert!(IndexEntry::new(too_large).is_err());
     890              :         }
     891              : 
     892              :         // too large (base_offset + batch_offset)
     893              :         {
     894            4 :             let too_large = Args {
     895            4 :                 will_init: false,
     896            4 :                 len: 0,
     897            4 :                 base_offset: MAX_SUPPORTED_POS.into_u64(),
     898            4 :                 batch_offset: 1,
     899            4 :             };
     900            4 :             assert!(IndexEntry::new(too_large).is_err());
     901            4 :             let too_large = Args {
     902            4 :                 will_init: false,
     903            4 :                 len: 0,
     904            4 :                 base_offset: MAX_SUPPORTED_POS.into_u64() - 1,
     905            4 :                 batch_offset: MAX_SUPPORTED_POS.into_u64() - 1,
     906            4 :             };
     907            4 :             assert!(IndexEntry::new(too_large).is_err());
     908              :         }
     909              : 
     910              :         // valid special cases
     911              :         // - area past the max supported pos that is accessible by len
     912           12 :         for len in [1, MAX_SUPPORTED_BLOB_LEN] {
     913            8 :             roundtrip(
     914            8 :                 Args {
     915            8 :                     will_init: false,
     916            8 :                     len,
     917            8 :                     base_offset: MAX_SUPPORTED_POS.into_u64(),
     918            8 :                     batch_offset: 0,
     919            8 :                 },
     920            8 :                 Unpacked {
     921            8 :                     will_init: false,
     922            8 :                     len: len as u64,
     923            8 :                     pos: MAX_SUPPORTED_POS.into_u64(),
     924            8 :                 },
     925            8 :             );
     926            8 :             roundtrip(
     927            8 :                 Args {
     928            8 :                     will_init: false,
     929            8 :                     len,
     930            8 :                     base_offset: 0,
     931            8 :                     batch_offset: MAX_SUPPORTED_POS.into_u64(),
     932            8 :                 },
     933            8 :                 Unpacked {
     934            8 :                     will_init: false,
     935            8 :                     len: len as u64,
     936            8 :                     pos: MAX_SUPPORTED_POS.into_u64(),
     937            8 :                 },
     938            8 :             );
     939            8 :         }
     940            4 :     }
     941              : }
        

Generated by: LCOV version 2.1-beta