LCOV - code coverage report
Current view: top level - pageserver/src/tenant/storage_layer - delta_layer.rs (source / functions) Coverage Total Hit
Test: cdfd4a1429c778e692c18a840bea0cc32d3c8fc8.info Lines: 86.8 % 1642 1426
Test Date: 2025-04-18 12:38:32 Functions: 71.8 % 163 117

            Line data    Source code
       1              : //! A DeltaLayer represents a collection of WAL records or page images in a range of
       2              : //! LSNs, and in a range of Keys. It is stored on a file on disk.
       3              : //!
       4              : //! Usually a delta layer only contains differences, in the form of WAL records
       5              : //! against a base LSN. However, if a relation extended or a whole new relation
       6              : //! is created, there would be no base for the new pages. The entries for them
       7              : //! must be page images or WAL records with the 'will_init' flag set, so that
       8              : //! they can be replayed without referring to an older page version.
       9              : //!
      10              : //! The delta files are stored in `timelines/<timeline_id>` directory.  Currently,
      11              : //! there are no subdirectories, and each delta file is named like this:
      12              : //!
      13              : //! ```text
      14              : //!    <key start>-<key end>__<start LSN>-<end LSN>
      15              : //! ```
      16              : //!
      17              : //! For example:
      18              : //!
      19              : //! ```text
      20              : //!    000000067F000032BE0000400000000020B6-000000067F000032BE0000400000000030B6__000000578C6B29-0000000057A50051
      21              : //! ```
      22              : //!
      23              : //! Every delta file consists of three parts: "summary", "values", and
      24              : //! "index". The summary is a fixed size header at the beginning of the file,
      25              : //! and it contains basic information about the layer, and offsets to the other
      26              : //! parts. The "index" is a B-tree, mapping from Key and LSN to an offset in the
      27              : //! "values" part.  The actual page images and WAL records are stored in the
      28              : //! "values" part.
      29              : //!
      30              : use std::collections::{HashMap, VecDeque};
      31              : use std::fs::File;
      32              : use std::io::SeekFrom;
      33              : use std::ops::Range;
      34              : use std::os::unix::fs::FileExt;
      35              : use std::str::FromStr;
      36              : use std::sync::Arc;
      37              : 
      38              : use anyhow::{Context, Result, bail, ensure};
      39              : use camino::{Utf8Path, Utf8PathBuf};
      40              : use futures::StreamExt;
      41              : use itertools::Itertools;
      42              : use pageserver_api::config::MaxVectoredReadBytes;
      43              : use pageserver_api::key::{DBDIR_KEY, KEY_SIZE, Key};
      44              : use pageserver_api::keyspace::KeySpace;
      45              : use pageserver_api::models::ImageCompressionAlgorithm;
      46              : use pageserver_api::shard::TenantShardId;
      47              : use pageserver_api::value::Value;
      48              : use rand::Rng;
      49              : use rand::distributions::Alphanumeric;
      50              : use serde::{Deserialize, Serialize};
      51              : use tokio::sync::OnceCell;
      52              : use tokio_epoll_uring::IoBuf;
      53              : use tokio_util::sync::CancellationToken;
      54              : use tracing::*;
      55              : use utils::bin_ser::BeSer;
      56              : use utils::id::{TenantId, TimelineId};
      57              : use utils::lsn::Lsn;
      58              : 
      59              : use super::{
      60              :     AsLayerDesc, LayerName, OnDiskValue, OnDiskValueIo, PersistentLayerDesc, ResidentLayer,
      61              :     ValuesReconstructState,
      62              : };
      63              : use crate::config::PageServerConf;
      64              : use crate::context::{PageContentKind, RequestContext, RequestContextBuilder};
      65              : use crate::page_cache::{self, FileId, PAGE_SZ};
      66              : use crate::tenant::blob_io::BlobWriter;
      67              : use crate::tenant::block_io::{BlockBuf, BlockCursor, BlockLease, BlockReader, FileBlockReader};
      68              : use crate::tenant::disk_btree::{
      69              :     DiskBtreeBuilder, DiskBtreeIterator, DiskBtreeReader, VisitDirection,
      70              : };
      71              : use crate::tenant::storage_layer::layer::S3_UPLOAD_LIMIT;
      72              : use crate::tenant::timeline::GetVectoredError;
      73              : use crate::tenant::vectored_blob_io::{
      74              :     BlobFlag, BufView, StreamingVectoredReadPlanner, VectoredBlobReader, VectoredRead,
      75              :     VectoredReadPlanner,
      76              : };
      77              : use crate::virtual_file::owned_buffers_io::io_buf_ext::{FullSlice, IoBufExt};
      78              : use crate::virtual_file::{self, IoBufferMut, MaybeFatalIo, VirtualFile};
      79              : use crate::{DELTA_FILE_MAGIC, STORAGE_FORMAT_VERSION, TEMP_FILE_SUFFIX};
      80              : 
      81              : ///
      82              : /// Header stored in the beginning of the file
      83              : ///
      84              : /// After this comes the 'values' part, starting on block 1. After that,
      85              : /// the 'index' starts at the block indicated by 'index_start_blk'
      86              : ///
      87            0 : #[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
      88              : pub struct Summary {
      89              :     /// Magic value to identify this as a neon delta file. Always DELTA_FILE_MAGIC.
      90              :     pub magic: u16,
      91              :     pub format_version: u16,
      92              : 
      93              :     pub tenant_id: TenantId,
      94              :     pub timeline_id: TimelineId,
      95              :     pub key_range: Range<Key>,
      96              :     pub lsn_range: Range<Lsn>,
      97              : 
      98              :     /// Block number where the 'index' part of the file begins.
      99              :     pub index_start_blk: u32,
     100              :     /// Block within the 'index', where the B-tree root page is stored
     101              :     pub index_root_blk: u32,
     102              : }
     103              : 
     104              : impl From<&DeltaLayer> for Summary {
     105            0 :     fn from(layer: &DeltaLayer) -> Self {
     106            0 :         Self::expected(
     107            0 :             layer.desc.tenant_shard_id.tenant_id,
     108            0 :             layer.desc.timeline_id,
     109            0 :             layer.desc.key_range.clone(),
     110            0 :             layer.desc.lsn_range.clone(),
     111            0 :         )
     112            0 :     }
     113              : }
     114              : 
     115              : impl Summary {
     116         6612 :     pub(super) fn expected(
     117         6612 :         tenant_id: TenantId,
     118         6612 :         timeline_id: TimelineId,
     119         6612 :         keys: Range<Key>,
     120         6612 :         lsns: Range<Lsn>,
     121         6612 :     ) -> Self {
     122         6612 :         Self {
     123         6612 :             magic: DELTA_FILE_MAGIC,
     124         6612 :             format_version: STORAGE_FORMAT_VERSION,
     125         6612 : 
     126         6612 :             tenant_id,
     127         6612 :             timeline_id,
     128         6612 :             key_range: keys,
     129         6612 :             lsn_range: lsns,
     130         6612 : 
     131         6612 :             index_start_blk: 0,
     132         6612 :             index_root_blk: 0,
     133         6612 :         }
     134         6612 :     }
     135              : }
     136              : 
     137              : // Flag indicating that this version initialize the page
     138              : const WILL_INIT: u64 = 1;
     139              : 
     140              : /// Struct representing reference to BLOB in layers.
     141              : ///
     142              : /// Reference contains BLOB offset, and for WAL records it also contains
     143              : /// `will_init` flag. The flag helps to determine the range of records
     144              : /// that needs to be applied, without reading/deserializing records themselves.
     145            0 : #[derive(Debug, Serialize, Deserialize, Copy, Clone)]
     146              : pub struct BlobRef(pub u64);
     147              : 
     148              : impl BlobRef {
     149     28828666 :     pub fn will_init(&self) -> bool {
     150     28828666 :         (self.0 & WILL_INIT) != 0
     151     28828666 :     }
     152              : 
     153     46434963 :     pub fn pos(&self) -> u64 {
     154     46434963 :         self.0 >> 1
     155     46434963 :     }
     156              : 
     157     38952492 :     pub fn new(pos: u64, will_init: bool) -> BlobRef {
     158     38952492 :         let mut blob_ref = pos << 1;
     159     38952492 :         if will_init {
     160     38820996 :             blob_ref |= WILL_INIT;
     161     38820996 :         }
     162     38952492 :         BlobRef(blob_ref)
     163     38952492 :     }
     164              : }
     165              : 
     166              : pub const DELTA_KEY_SIZE: usize = KEY_SIZE + 8;
     167              : struct DeltaKey([u8; DELTA_KEY_SIZE]);
     168              : 
     169              : /// This is the key of the B-tree index stored in the delta layer. It consists
     170              : /// of the serialized representation of a Key and LSN.
     171              : impl DeltaKey {
     172     12385188 :     fn from_slice(buf: &[u8]) -> Self {
     173     12385188 :         let mut bytes: [u8; DELTA_KEY_SIZE] = [0u8; DELTA_KEY_SIZE];
     174     12385188 :         bytes.copy_from_slice(buf);
     175     12385188 :         DeltaKey(bytes)
     176     12385188 :     }
     177              : 
     178     40400324 :     fn from_key_lsn(key: &Key, lsn: Lsn) -> Self {
     179     40400324 :         let mut bytes: [u8; DELTA_KEY_SIZE] = [0u8; DELTA_KEY_SIZE];
     180     40400324 :         key.write_to_byte_slice(&mut bytes[0..KEY_SIZE]);
     181     40400324 :         bytes[KEY_SIZE..].copy_from_slice(&u64::to_be_bytes(lsn.0));
     182     40400324 :         DeltaKey(bytes)
     183     40400324 :     }
     184              : 
     185     12385188 :     fn key(&self) -> Key {
     186     12385188 :         Key::from_slice(&self.0)
     187     12385188 :     }
     188              : 
     189     12385188 :     fn lsn(&self) -> Lsn {
     190     12385188 :         Lsn(u64::from_be_bytes(self.0[KEY_SIZE..].try_into().unwrap()))
     191     12385188 :     }
     192              : 
     193     34049415 :     fn extract_lsn_from_buf(buf: &[u8]) -> Lsn {
     194     34049415 :         let mut lsn_buf = [0u8; 8];
     195     34049415 :         lsn_buf.copy_from_slice(&buf[KEY_SIZE..]);
     196     34049415 :         Lsn(u64::from_be_bytes(lsn_buf))
     197     34049415 :     }
     198              : }
     199              : 
     200              : /// This is used only from `pagectl`. Within pageserver, all layers are
     201              : /// [`crate::tenant::storage_layer::Layer`], which can hold a [`DeltaLayerInner`].
     202              : pub struct DeltaLayer {
     203              :     path: Utf8PathBuf,
     204              :     pub desc: PersistentLayerDesc,
     205              :     inner: OnceCell<Arc<DeltaLayerInner>>,
     206              : }
     207              : 
     208              : impl std::fmt::Debug for DeltaLayer {
     209            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     210              :         use super::RangeDisplayDebug;
     211              : 
     212            0 :         f.debug_struct("DeltaLayer")
     213            0 :             .field("key_range", &RangeDisplayDebug(&self.desc.key_range))
     214            0 :             .field("lsn_range", &self.desc.lsn_range)
     215            0 :             .field("file_size", &self.desc.file_size)
     216            0 :             .field("inner", &self.inner)
     217            0 :             .finish()
     218            0 :     }
     219              : }
     220              : 
     221              : /// `DeltaLayerInner` is the in-memory data structure associated with an on-disk delta
     222              : /// file.
     223              : pub struct DeltaLayerInner {
     224              :     // values copied from summary
     225              :     index_start_blk: u32,
     226              :     index_root_blk: u32,
     227              : 
     228              :     file: Arc<VirtualFile>,
     229              :     file_id: FileId,
     230              : 
     231              :     layer_key_range: Range<Key>,
     232              :     layer_lsn_range: Range<Lsn>,
     233              : 
     234              :     max_vectored_read_bytes: Option<MaxVectoredReadBytes>,
     235              : }
     236              : 
     237              : impl DeltaLayerInner {
     238            0 :     pub(crate) fn layer_dbg_info(&self) -> String {
     239            0 :         format!(
     240            0 :             "delta {}..{} {}..{}",
     241            0 :             self.key_range().start,
     242            0 :             self.key_range().end,
     243            0 :             self.lsn_range().start,
     244            0 :             self.lsn_range().end
     245            0 :         )
     246            0 :     }
     247              : }
     248              : 
     249              : impl std::fmt::Debug for DeltaLayerInner {
     250            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     251            0 :         f.debug_struct("DeltaLayerInner")
     252            0 :             .field("index_start_blk", &self.index_start_blk)
     253            0 :             .field("index_root_blk", &self.index_root_blk)
     254            0 :             .finish()
     255            0 :     }
     256              : }
     257              : 
     258              : /// Boilerplate to implement the Layer trait, always use layer_desc for persistent layers.
     259              : impl std::fmt::Display for DeltaLayer {
     260            0 :     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
     261            0 :         write!(f, "{}", self.layer_desc().short_id())
     262            0 :     }
     263              : }
     264              : 
     265              : impl AsLayerDesc for DeltaLayer {
     266            0 :     fn layer_desc(&self) -> &PersistentLayerDesc {
     267            0 :         &self.desc
     268            0 :     }
     269              : }
     270              : 
     271              : impl DeltaLayer {
     272            0 :     pub async fn dump(&self, verbose: bool, ctx: &RequestContext) -> Result<()> {
     273            0 :         self.desc.dump();
     274            0 : 
     275            0 :         if !verbose {
     276            0 :             return Ok(());
     277            0 :         }
     278              : 
     279            0 :         let inner = self.load(ctx).await?;
     280              : 
     281            0 :         inner.dump(ctx).await
     282            0 :     }
     283              : 
     284         8856 :     fn temp_path_for(
     285         8856 :         conf: &PageServerConf,
     286         8856 :         tenant_shard_id: &TenantShardId,
     287         8856 :         timeline_id: &TimelineId,
     288         8856 :         key_start: Key,
     289         8856 :         lsn_range: &Range<Lsn>,
     290         8856 :     ) -> Utf8PathBuf {
     291         8856 :         let rand_string: String = rand::thread_rng()
     292         8856 :             .sample_iter(&Alphanumeric)
     293         8856 :             .take(8)
     294         8856 :             .map(char::from)
     295         8856 :             .collect();
     296         8856 : 
     297         8856 :         conf.timeline_path(tenant_shard_id, timeline_id)
     298         8856 :             .join(format!(
     299         8856 :                 "{}-XXX__{:016X}-{:016X}.{}.{}",
     300         8856 :                 key_start,
     301         8856 :                 u64::from(lsn_range.start),
     302         8856 :                 u64::from(lsn_range.end),
     303         8856 :                 rand_string,
     304         8856 :                 TEMP_FILE_SUFFIX,
     305         8856 :             ))
     306         8856 :     }
     307              : 
     308              :     ///
     309              :     /// Open the underlying file and read the metadata into memory, if it's
     310              :     /// not loaded already.
     311              :     ///
     312            0 :     async fn load(&self, ctx: &RequestContext) -> Result<&Arc<DeltaLayerInner>> {
     313            0 :         // Quick exit if already loaded
     314            0 :         self.inner
     315            0 :             .get_or_try_init(|| self.load_inner(ctx))
     316            0 :             .await
     317            0 :             .with_context(|| format!("Failed to load delta layer {}", self.path()))
     318            0 :     }
     319              : 
     320            0 :     async fn load_inner(&self, ctx: &RequestContext) -> anyhow::Result<Arc<DeltaLayerInner>> {
     321            0 :         let path = self.path();
     322              : 
     323            0 :         let loaded = DeltaLayerInner::load(&path, None, None, ctx).await?;
     324              : 
     325              :         // not production code
     326            0 :         let actual_layer_name = LayerName::from_str(path.file_name().unwrap()).unwrap();
     327            0 :         let expected_layer_name = self.layer_desc().layer_name();
     328            0 : 
     329            0 :         if actual_layer_name != expected_layer_name {
     330            0 :             println!("warning: filename does not match what is expected from in-file summary");
     331            0 :             println!("actual: {:?}", actual_layer_name.to_string());
     332            0 :             println!("expected: {:?}", expected_layer_name.to_string());
     333            0 :         }
     334              : 
     335            0 :         Ok(Arc::new(loaded))
     336            0 :     }
     337              : 
     338              :     /// Create a DeltaLayer struct representing an existing file on disk.
     339              :     ///
     340              :     /// This variant is only used for debugging purposes, by the 'pagectl' binary.
     341            0 :     pub fn new_for_path(path: &Utf8Path, file: File) -> Result<Self> {
     342            0 :         let mut summary_buf = vec![0; PAGE_SZ];
     343            0 :         file.read_exact_at(&mut summary_buf, 0)?;
     344            0 :         let summary = Summary::des_prefix(&summary_buf)?;
     345              : 
     346            0 :         let metadata = file
     347            0 :             .metadata()
     348            0 :             .context("get file metadata to determine size")?;
     349              : 
     350              :         // This function is never used for constructing layers in a running pageserver,
     351              :         // so it does not need an accurate TenantShardId.
     352            0 :         let tenant_shard_id = TenantShardId::unsharded(summary.tenant_id);
     353            0 : 
     354            0 :         Ok(DeltaLayer {
     355            0 :             path: path.to_path_buf(),
     356            0 :             desc: PersistentLayerDesc::new_delta(
     357            0 :                 tenant_shard_id,
     358            0 :                 summary.timeline_id,
     359            0 :                 summary.key_range,
     360            0 :                 summary.lsn_range,
     361            0 :                 metadata.len(),
     362            0 :             ),
     363            0 :             inner: OnceCell::new(),
     364            0 :         })
     365            0 :     }
     366              : 
     367              :     /// Path to the layer file in pageserver workdir.
     368            0 :     fn path(&self) -> Utf8PathBuf {
     369            0 :         self.path.clone()
     370            0 :     }
     371              : }
     372              : 
     373              : /// A builder object for constructing a new delta layer.
     374              : ///
     375              : /// Usage:
     376              : ///
     377              : /// 1. Create the DeltaLayerWriter by calling DeltaLayerWriter::new(...)
     378              : ///
     379              : /// 2. Write the contents by calling `put_value` for every page
     380              : ///    version to store in the layer.
     381              : ///
     382              : /// 3. Call `finish`.
     383              : ///
     384              : struct DeltaLayerWriterInner {
     385              :     pub path: Utf8PathBuf,
     386              :     timeline_id: TimelineId,
     387              :     tenant_shard_id: TenantShardId,
     388              : 
     389              :     key_start: Key,
     390              :     lsn_range: Range<Lsn>,
     391              : 
     392              :     tree: DiskBtreeBuilder<BlockBuf, DELTA_KEY_SIZE>,
     393              : 
     394              :     blob_writer: BlobWriter<true>,
     395              : 
     396              :     // Number of key-lsns in the layer.
     397              :     num_keys: usize,
     398              : }
     399              : 
     400              : impl DeltaLayerWriterInner {
     401              :     ///
     402              :     /// Start building a new delta layer.
     403              :     ///
     404              :     #[allow(clippy::too_many_arguments)]
     405         8856 :     async fn new(
     406         8856 :         conf: &'static PageServerConf,
     407         8856 :         timeline_id: TimelineId,
     408         8856 :         tenant_shard_id: TenantShardId,
     409         8856 :         key_start: Key,
     410         8856 :         lsn_range: Range<Lsn>,
     411         8856 :         gate: &utils::sync::gate::Gate,
     412         8856 :         cancel: CancellationToken,
     413         8856 :         ctx: &RequestContext,
     414         8856 :     ) -> anyhow::Result<Self> {
     415         8856 :         // Create the file initially with a temporary filename. We don't know
     416         8856 :         // the end key yet, so we cannot form the final filename yet. We will
     417         8856 :         // rename it when we're done.
     418         8856 :         //
     419         8856 :         // Note: This overwrites any existing file. There shouldn't be any.
     420         8856 :         // FIXME: throw an error instead?
     421         8856 :         let path =
     422         8856 :             DeltaLayer::temp_path_for(conf, &tenant_shard_id, &timeline_id, key_start, &lsn_range);
     423              : 
     424         8856 :         let mut file = VirtualFile::create(&path, ctx).await?;
     425              :         // make room for the header block
     426         8856 :         file.seek(SeekFrom::Start(PAGE_SZ as u64)).await?;
     427         8856 :         let blob_writer = BlobWriter::new(file, PAGE_SZ as u64, gate, cancel, ctx);
     428         8856 : 
     429         8856 :         // Initialize the b-tree index builder
     430         8856 :         let block_buf = BlockBuf::new();
     431         8856 :         let tree_builder = DiskBtreeBuilder::new(block_buf);
     432         8856 : 
     433         8856 :         Ok(Self {
     434         8856 :             path,
     435         8856 :             timeline_id,
     436         8856 :             tenant_shard_id,
     437         8856 :             key_start,
     438         8856 :             lsn_range,
     439         8856 :             tree: tree_builder,
     440         8856 :             blob_writer,
     441         8856 :             num_keys: 0,
     442         8856 :         })
     443         8856 :     }
     444              : 
     445              :     ///
     446              :     /// Append a key-value pair to the file.
     447              :     ///
     448              :     /// The values must be appended in key, lsn order.
     449              :     ///
     450     12631908 :     async fn put_value(
     451     12631908 :         &mut self,
     452     12631908 :         key: Key,
     453     12631908 :         lsn: Lsn,
     454     12631908 :         val: Value,
     455     12631908 :         ctx: &RequestContext,
     456     12631908 :     ) -> anyhow::Result<()> {
     457     12631908 :         let (_, res) = self
     458     12631908 :             .put_value_bytes(
     459     12631908 :                 key,
     460     12631908 :                 lsn,
     461     12631908 :                 Value::ser(&val)?.slice_len(),
     462     12631908 :                 val.will_init(),
     463     12631908 :                 ctx,
     464     12631908 :             )
     465     12631908 :             .await;
     466     12631908 :         res
     467     12631908 :     }
     468              : 
     469     38951868 :     async fn put_value_bytes<Buf>(
     470     38951868 :         &mut self,
     471     38951868 :         key: Key,
     472     38951868 :         lsn: Lsn,
     473     38951868 :         val: FullSlice<Buf>,
     474     38951868 :         will_init: bool,
     475     38951868 :         ctx: &RequestContext,
     476     38951868 :     ) -> (FullSlice<Buf>, anyhow::Result<()>)
     477     38951868 :     where
     478     38951868 :         Buf: IoBuf + Send,
     479     38951868 :     {
     480     38951868 :         assert!(
     481     38951868 :             self.lsn_range.start <= lsn,
     482            0 :             "lsn_start={}, lsn={}",
     483              :             self.lsn_range.start,
     484              :             lsn
     485              :         );
     486              :         // We don't want to use compression in delta layer creation
     487     38951868 :         let compression = ImageCompressionAlgorithm::Disabled;
     488     38951868 :         let (val, res) = self
     489     38951868 :             .blob_writer
     490     38951868 :             .write_blob_maybe_compressed(val, ctx, compression)
     491     38951868 :             .await;
     492     38951868 :         let off = match res {
     493     38951868 :             Ok((off, _)) => off,
     494            0 :             Err(e) => return (val, Err(anyhow::anyhow!(e))),
     495              :         };
     496              : 
     497     38951868 :         let blob_ref = BlobRef::new(off, will_init);
     498     38951868 : 
     499     38951868 :         let delta_key = DeltaKey::from_key_lsn(&key, lsn);
     500     38951868 :         let res = self.tree.append(&delta_key.0, blob_ref.0);
     501     38951868 : 
     502     38951868 :         self.num_keys += 1;
     503     38951868 : 
     504     38951868 :         (val, res.map_err(|e| anyhow::anyhow!(e)))
     505     38951868 :     }
     506              : 
     507     12143832 :     fn size(&self) -> u64 {
     508     12143832 :         self.blob_writer.size() + self.tree.borrow_writer().size()
     509     12143832 :     }
     510              : 
     511              :     ///
     512              :     /// Finish writing the delta layer.
     513              :     ///
     514         8700 :     async fn finish(
     515         8700 :         self,
     516         8700 :         key_end: Key,
     517         8700 :         ctx: &RequestContext,
     518         8700 :     ) -> anyhow::Result<(PersistentLayerDesc, Utf8PathBuf)> {
     519         8700 :         let temp_path = self.path.clone();
     520         8700 :         let result = self.finish0(key_end, ctx).await;
     521         8700 :         if let Err(ref e) = result {
     522            0 :             tracing::info!(%temp_path, "cleaning up temporary file after error during writing: {e}");
     523            0 :             if let Err(e) = std::fs::remove_file(&temp_path) {
     524            0 :                 tracing::warn!(error=%e, %temp_path, "error cleaning up temporary layer file after error during writing");
     525            0 :             }
     526         8700 :         }
     527         8700 :         result
     528         8700 :     }
     529              : 
     530         8700 :     async fn finish0(
     531         8700 :         self,
     532         8700 :         key_end: Key,
     533         8700 :         ctx: &RequestContext,
     534         8700 :     ) -> anyhow::Result<(PersistentLayerDesc, Utf8PathBuf)> {
     535         8700 :         let index_start_blk = self.blob_writer.size().div_ceil(PAGE_SZ as u64) as u32;
     536              : 
     537         8700 :         let mut file = self.blob_writer.into_inner(ctx).await?;
     538              : 
     539              :         // Write out the index
     540         8700 :         let (index_root_blk, block_buf) = self.tree.finish()?;
     541         8700 :         file.seek(SeekFrom::Start(index_start_blk as u64 * PAGE_SZ as u64))
     542         8700 :             .await?;
     543        90971 :         for buf in block_buf.blocks {
     544        82271 :             let (_buf, res) = file.write_all(buf.slice_len(), ctx).await;
     545        82271 :             res?;
     546              :         }
     547         8700 :         assert!(self.lsn_range.start < self.lsn_range.end);
     548              :         // Fill in the summary on blk 0
     549         8700 :         let summary = Summary {
     550         8700 :             magic: DELTA_FILE_MAGIC,
     551         8700 :             format_version: STORAGE_FORMAT_VERSION,
     552         8700 :             tenant_id: self.tenant_shard_id.tenant_id,
     553         8700 :             timeline_id: self.timeline_id,
     554         8700 :             key_range: self.key_start..key_end,
     555         8700 :             lsn_range: self.lsn_range.clone(),
     556         8700 :             index_start_blk,
     557         8700 :             index_root_blk,
     558         8700 :         };
     559         8700 : 
     560         8700 :         let mut buf = Vec::with_capacity(PAGE_SZ);
     561         8700 :         // TODO: could use smallvec here but it's a pain with Slice<T>
     562         8700 :         Summary::ser_into(&summary, &mut buf)?;
     563         8700 :         file.seek(SeekFrom::Start(0)).await?;
     564         8700 :         let (_buf, res) = file.write_all(buf.slice_len(), ctx).await;
     565         8700 :         res?;
     566              : 
     567         8700 :         let metadata = file
     568         8700 :             .metadata()
     569         8700 :             .await
     570         8700 :             .context("get file metadata to determine size")?;
     571              : 
     572              :         // 5GB limit for objects without multipart upload (which we don't want to use)
     573              :         // Make it a little bit below to account for differing GB units
     574              :         // https://docs.aws.amazon.com/AmazonS3/latest/userguide/upload-objects.html
     575         8700 :         ensure!(
     576         8700 :             metadata.len() <= S3_UPLOAD_LIMIT,
     577            0 :             "Created delta layer file at {} of size {} above limit {S3_UPLOAD_LIMIT}!",
     578            0 :             file.path(),
     579            0 :             metadata.len()
     580              :         );
     581              : 
     582              :         // Note: Because we opened the file in write-only mode, we cannot
     583              :         // reuse the same VirtualFile for reading later. That's why we don't
     584              :         // set inner.file here. The first read will have to re-open it.
     585              : 
     586         8700 :         let desc = PersistentLayerDesc::new_delta(
     587         8700 :             self.tenant_shard_id,
     588         8700 :             self.timeline_id,
     589         8700 :             self.key_start..key_end,
     590         8700 :             self.lsn_range.clone(),
     591         8700 :             metadata.len(),
     592         8700 :         );
     593         8700 : 
     594         8700 :         // fsync the file
     595         8700 :         file.sync_all()
     596         8700 :             .await
     597         8700 :             .maybe_fatal_err("delta_layer sync_all")?;
     598              : 
     599         8700 :         trace!("created delta layer {}", self.path);
     600              : 
     601         8700 :         Ok((desc, self.path))
     602         8700 :     }
     603              : }
     604              : 
     605              : /// A builder object for constructing a new delta layer.
     606              : ///
     607              : /// Usage:
     608              : ///
     609              : /// 1. Create the DeltaLayerWriter by calling DeltaLayerWriter::new(...)
     610              : ///
     611              : /// 2. Write the contents by calling `put_value` for every page
     612              : ///    version to store in the layer.
     613              : ///
     614              : /// 3. Call `finish`.
     615              : ///
     616              : /// # Note
     617              : ///
     618              : /// As described in <https://github.com/neondatabase/neon/issues/2650>, it's
     619              : /// possible for the writer to drop before `finish` is actually called. So this
     620              : /// could lead to odd temporary files in the directory, exhausting file system.
     621              : /// This structure wraps `DeltaLayerWriterInner` and also contains `Drop`
     622              : /// implementation that cleans up the temporary file in failure. It's not
     623              : /// possible to do this directly in `DeltaLayerWriterInner` since `finish` moves
     624              : /// out some fields, making it impossible to implement `Drop`.
     625              : ///
     626              : #[must_use]
     627              : pub struct DeltaLayerWriter {
     628              :     inner: Option<DeltaLayerWriterInner>,
     629              : }
     630              : 
     631              : impl DeltaLayerWriter {
     632              :     ///
     633              :     /// Start building a new delta layer.
     634              :     ///
     635              :     #[allow(clippy::too_many_arguments)]
     636         8856 :     pub async fn new(
     637         8856 :         conf: &'static PageServerConf,
     638         8856 :         timeline_id: TimelineId,
     639         8856 :         tenant_shard_id: TenantShardId,
     640         8856 :         key_start: Key,
     641         8856 :         lsn_range: Range<Lsn>,
     642         8856 :         gate: &utils::sync::gate::Gate,
     643         8856 :         cancel: CancellationToken,
     644         8856 :         ctx: &RequestContext,
     645         8856 :     ) -> anyhow::Result<Self> {
     646         8856 :         Ok(Self {
     647         8856 :             inner: Some(
     648         8856 :                 DeltaLayerWriterInner::new(
     649         8856 :                     conf,
     650         8856 :                     timeline_id,
     651         8856 :                     tenant_shard_id,
     652         8856 :                     key_start,
     653         8856 :                     lsn_range,
     654         8856 :                     gate,
     655         8856 :                     cancel,
     656         8856 :                     ctx,
     657         8856 :                 )
     658         8856 :                 .await?,
     659              :             ),
     660              :         })
     661         8856 :     }
     662              : 
     663            0 :     pub fn is_empty(&self) -> bool {
     664            0 :         self.inner.as_ref().unwrap().num_keys == 0
     665            0 :     }
     666              : 
     667              :     ///
     668              :     /// Append a key-value pair to the file.
     669              :     ///
     670              :     /// The values must be appended in key, lsn order.
     671              :     ///
     672     12631908 :     pub async fn put_value(
     673     12631908 :         &mut self,
     674     12631908 :         key: Key,
     675     12631908 :         lsn: Lsn,
     676     12631908 :         val: Value,
     677     12631908 :         ctx: &RequestContext,
     678     12631908 :     ) -> anyhow::Result<()> {
     679     12631908 :         self.inner
     680     12631908 :             .as_mut()
     681     12631908 :             .unwrap()
     682     12631908 :             .put_value(key, lsn, val, ctx)
     683     12631908 :             .await
     684     12631908 :     }
     685              : 
     686     26319960 :     pub async fn put_value_bytes<Buf>(
     687     26319960 :         &mut self,
     688     26319960 :         key: Key,
     689     26319960 :         lsn: Lsn,
     690     26319960 :         val: FullSlice<Buf>,
     691     26319960 :         will_init: bool,
     692     26319960 :         ctx: &RequestContext,
     693     26319960 :     ) -> (FullSlice<Buf>, anyhow::Result<()>)
     694     26319960 :     where
     695     26319960 :         Buf: IoBuf + Send,
     696     26319960 :     {
     697     26319960 :         self.inner
     698     26319960 :             .as_mut()
     699     26319960 :             .unwrap()
     700     26319960 :             .put_value_bytes(key, lsn, val, will_init, ctx)
     701     26319960 :             .await
     702     26319960 :     }
     703              : 
     704     12143832 :     pub fn size(&self) -> u64 {
     705     12143832 :         self.inner.as_ref().unwrap().size()
     706     12143832 :     }
     707              : 
     708              :     ///
     709              :     /// Finish writing the delta layer.
     710              :     ///
     711         8700 :     pub(crate) async fn finish(
     712         8700 :         mut self,
     713         8700 :         key_end: Key,
     714         8700 :         ctx: &RequestContext,
     715         8700 :     ) -> anyhow::Result<(PersistentLayerDesc, Utf8PathBuf)> {
     716         8700 :         self.inner.take().unwrap().finish(key_end, ctx).await
     717         8700 :     }
     718              : 
     719        73524 :     pub(crate) fn num_keys(&self) -> usize {
     720        73524 :         self.inner.as_ref().unwrap().num_keys
     721        73524 :     }
     722              : 
     723        90840 :     pub(crate) fn estimated_size(&self) -> u64 {
     724        90840 :         let inner = self.inner.as_ref().unwrap();
     725        90840 :         inner.blob_writer.size() + inner.tree.borrow_writer().size() + PAGE_SZ as u64
     726        90840 :     }
     727              : }
     728              : 
     729              : impl Drop for DeltaLayerWriter {
     730         8856 :     fn drop(&mut self) {
     731         8856 :         if let Some(inner) = self.inner.take() {
     732          156 :             // We want to remove the virtual file here, so it's fine to not
     733          156 :             // having completely flushed unwritten data.
     734          156 :             let vfile = inner.blob_writer.into_inner_no_flush();
     735          156 :             vfile.remove();
     736         8700 :         }
     737         8856 :     }
     738              : }
     739              : 
     740              : #[derive(thiserror::Error, Debug)]
     741              : pub enum RewriteSummaryError {
     742              :     #[error("magic mismatch")]
     743              :     MagicMismatch,
     744              :     #[error(transparent)]
     745              :     Other(#[from] anyhow::Error),
     746              : }
     747              : 
     748              : impl From<std::io::Error> for RewriteSummaryError {
     749            0 :     fn from(e: std::io::Error) -> Self {
     750            0 :         Self::Other(anyhow::anyhow!(e))
     751            0 :     }
     752              : }
     753              : 
     754              : impl DeltaLayer {
     755            0 :     pub async fn rewrite_summary<F>(
     756            0 :         path: &Utf8Path,
     757            0 :         rewrite: F,
     758            0 :         ctx: &RequestContext,
     759            0 :     ) -> Result<(), RewriteSummaryError>
     760            0 :     where
     761            0 :         F: Fn(Summary) -> Summary,
     762            0 :     {
     763            0 :         let mut file = VirtualFile::open_with_options(
     764            0 :             path,
     765            0 :             virtual_file::OpenOptions::new().read(true).write(true),
     766            0 :             ctx,
     767            0 :         )
     768            0 :         .await
     769            0 :         .with_context(|| format!("Failed to open file '{}'", path))?;
     770            0 :         let file_id = page_cache::next_file_id();
     771            0 :         let block_reader = FileBlockReader::new(&file, file_id);
     772            0 :         let summary_blk = block_reader.read_blk(0, ctx).await?;
     773            0 :         let actual_summary = Summary::des_prefix(summary_blk.as_ref()).context("deserialize")?;
     774            0 :         if actual_summary.magic != DELTA_FILE_MAGIC {
     775            0 :             return Err(RewriteSummaryError::MagicMismatch);
     776            0 :         }
     777            0 : 
     778            0 :         let new_summary = rewrite(actual_summary);
     779            0 : 
     780            0 :         let mut buf = Vec::with_capacity(PAGE_SZ);
     781            0 :         // TODO: could use smallvec here, but it's a pain with Slice<T>
     782            0 :         Summary::ser_into(&new_summary, &mut buf).context("serialize")?;
     783            0 :         file.seek(SeekFrom::Start(0)).await?;
     784            0 :         let (_buf, res) = file.write_all(buf.slice_len(), ctx).await;
     785            0 :         res?;
     786            0 :         Ok(())
     787            0 :     }
     788              : }
     789              : 
     790              : impl DeltaLayerInner {
     791         6240 :     pub(crate) fn key_range(&self) -> &Range<Key> {
     792         6240 :         &self.layer_key_range
     793         6240 :     }
     794              : 
     795         6240 :     pub(crate) fn lsn_range(&self) -> &Range<Lsn> {
     796         6240 :         &self.layer_lsn_range
     797         6240 :     }
     798              : 
     799         6612 :     pub(super) async fn load(
     800         6612 :         path: &Utf8Path,
     801         6612 :         summary: Option<Summary>,
     802         6612 :         max_vectored_read_bytes: Option<MaxVectoredReadBytes>,
     803         6612 :         ctx: &RequestContext,
     804         6612 :     ) -> anyhow::Result<Self> {
     805         6612 :         let file = Arc::new(
     806         6612 :             VirtualFile::open_v2(path, ctx)
     807         6612 :                 .await
     808         6612 :                 .context("open layer file")?,
     809              :         );
     810              : 
     811         6612 :         let file_id = page_cache::next_file_id();
     812         6612 : 
     813         6612 :         let block_reader = FileBlockReader::new(&file, file_id);
     814              : 
     815         6612 :         let summary_blk = block_reader
     816         6612 :             .read_blk(0, ctx)
     817         6612 :             .await
     818         6612 :             .context("read first block")?;
     819              : 
     820              :         // TODO: this should be an assertion instead; see ImageLayerInner::load
     821         6612 :         let actual_summary =
     822         6612 :             Summary::des_prefix(summary_blk.as_ref()).context("deserialize first block")?;
     823              : 
     824         6612 :         if let Some(mut expected_summary) = summary {
     825              :             // production code path
     826         6612 :             expected_summary.index_start_blk = actual_summary.index_start_blk;
     827         6612 :             expected_summary.index_root_blk = actual_summary.index_root_blk;
     828         6612 :             // mask out the timeline_id, but still require the layers to be from the same tenant
     829         6612 :             expected_summary.timeline_id = actual_summary.timeline_id;
     830         6612 : 
     831         6612 :             if actual_summary != expected_summary {
     832            0 :                 bail!(
     833            0 :                     "in-file summary does not match expected summary. actual = {:?} expected = {:?}",
     834            0 :                     actual_summary,
     835            0 :                     expected_summary
     836            0 :                 );
     837         6612 :             }
     838            0 :         }
     839              : 
     840         6612 :         Ok(DeltaLayerInner {
     841         6612 :             file,
     842         6612 :             file_id,
     843         6612 :             index_start_blk: actual_summary.index_start_blk,
     844         6612 :             index_root_blk: actual_summary.index_root_blk,
     845         6612 :             max_vectored_read_bytes,
     846         6612 :             layer_key_range: actual_summary.key_range,
     847         6612 :             layer_lsn_range: actual_summary.lsn_range,
     848         6612 :         })
     849         6612 :     }
     850              : 
     851              :     // Look up the keys in the provided keyspace and update
     852              :     // the reconstruct state with whatever is found.
     853              :     //
     854              :     // Currently, the index is visited for each range, but this
     855              :     // can be further optimised to visit the index only once.
     856      1416320 :     pub(super) async fn get_values_reconstruct_data(
     857      1416320 :         &self,
     858      1416320 :         this: ResidentLayer,
     859      1416320 :         keyspace: KeySpace,
     860      1416320 :         lsn_range: Range<Lsn>,
     861      1416320 :         reconstruct_state: &mut ValuesReconstructState,
     862      1416320 :         ctx: &RequestContext,
     863      1416320 :     ) -> Result<(), GetVectoredError> {
     864      1416320 :         let block_reader = FileBlockReader::new(&self.file, self.file_id);
     865      1416320 :         let index_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(
     866      1416320 :             self.index_start_blk,
     867      1416320 :             self.index_root_blk,
     868      1416320 :             block_reader,
     869      1416320 :         );
     870      1416320 : 
     871      1416320 :         let planner = VectoredReadPlanner::new(
     872      1416320 :             self.max_vectored_read_bytes
     873      1416320 :                 .expect("Layer is loaded with max vectored bytes config")
     874      1416320 :                 .0
     875      1416320 :                 .into(),
     876      1416320 :         );
     877      1416320 : 
     878      1416320 :         let data_end_offset = self.index_start_offset();
     879              : 
     880      1416320 :         let reads = Self::plan_reads(
     881      1416320 :             &keyspace,
     882      1416320 :             lsn_range.clone(),
     883      1416320 :             data_end_offset,
     884      1416320 :             index_reader,
     885      1416320 :             planner,
     886      1416320 :             ctx,
     887      1416320 :         )
     888      1416320 :         .await
     889      1416320 :         .map_err(GetVectoredError::Other)?;
     890              : 
     891      1416320 :         self.do_reads_and_update_state(this, reads, reconstruct_state, ctx)
     892      1416320 :             .await;
     893              : 
     894      1416320 :         Ok(())
     895      1416320 :     }
     896              : 
     897      1417532 :     async fn plan_reads<Reader>(
     898      1417532 :         keyspace: &KeySpace,
     899      1417532 :         lsn_range: Range<Lsn>,
     900      1417532 :         data_end_offset: u64,
     901      1417532 :         index_reader: DiskBtreeReader<Reader, DELTA_KEY_SIZE>,
     902      1417532 :         mut planner: VectoredReadPlanner,
     903      1417532 :         ctx: &RequestContext,
     904      1417532 :     ) -> anyhow::Result<Vec<VectoredRead>>
     905      1417532 :     where
     906      1417532 :         Reader: BlockReader + Clone,
     907      1417532 :     {
     908      1417532 :         let ctx = RequestContextBuilder::from(ctx)
     909      1417532 :             .page_content_kind(PageContentKind::DeltaLayerBtreeNode)
     910      1417532 :             .attached_child();
     911              : 
     912      1448252 :         for range in keyspace.ranges.iter() {
     913      1448252 :             let mut range_end_handled = false;
     914      1448252 : 
     915      1448252 :             let start_key = DeltaKey::from_key_lsn(&range.start, lsn_range.start);
     916      1448252 :             let index_stream = index_reader.clone().into_stream(&start_key.0, &ctx);
     917      1448252 :             let mut index_stream = std::pin::pin!(index_stream);
     918              : 
     919     21506875 :             while let Some(index_entry) = index_stream.next().await {
     920     21454923 :                 let (raw_key, value) = index_entry?;
     921     21454923 :                 let key = Key::from_slice(&raw_key[..KEY_SIZE]);
     922     21454923 :                 let lsn = DeltaKey::extract_lsn_from_buf(&raw_key);
     923     21454923 :                 let blob_ref = BlobRef(value);
     924     21454923 : 
     925     21454923 :                 // Lsns are not monotonically increasing across keys, so we don't assert on them.
     926     21454923 :                 assert!(key >= range.start);
     927              : 
     928     21454923 :                 let outside_lsn_range = !lsn_range.contains(&lsn);
     929              : 
     930     21454923 :                 let flag = {
     931     21454923 :                     if outside_lsn_range {
     932      5220749 :                         BlobFlag::Ignore
     933     16234174 :                     } else if blob_ref.will_init() {
     934      3290458 :                         BlobFlag::ReplaceAll
     935              :                     } else {
     936              :                         // Usual path: add blob to the read
     937     12943716 :                         BlobFlag::None
     938              :                     }
     939              :                 };
     940              : 
     941     21454923 :                 if key >= range.end || (key.next() == range.end && lsn >= lsn_range.end) {
     942      1396300 :                     planner.handle_range_end(blob_ref.pos());
     943      1396300 :                     range_end_handled = true;
     944      1396300 :                     break;
     945     20058623 :                 } else {
     946     20058623 :                     planner.handle(key, lsn, blob_ref.pos(), flag);
     947     20058623 :                 }
     948              :             }
     949              : 
     950      1448252 :             if !range_end_handled {
     951        51952 :                 tracing::debug!("Handling range end fallback at {}", data_end_offset);
     952        51952 :                 planner.handle_range_end(data_end_offset);
     953      1396300 :             }
     954              :         }
     955              : 
     956      1417532 :         Ok(planner.finish())
     957      1417532 :     }
     958              : 
     959      1417520 :     fn get_min_read_buffer_size(
     960      1417520 :         planned_reads: &[VectoredRead],
     961      1417520 :         read_size_soft_max: usize,
     962      1417520 :     ) -> usize {
     963      1417520 :         let Some(largest_read) = planned_reads.iter().max_by_key(|read| read.size()) else {
     964       519091 :             return read_size_soft_max;
     965              :         };
     966              : 
     967       898429 :         let largest_read_size = largest_read.size();
     968       898429 :         if largest_read_size > read_size_soft_max {
     969              :             // If the read is oversized, it should only contain one key.
     970         1200 :             let offenders = largest_read
     971         1200 :                 .blobs_at
     972         1200 :                 .as_slice()
     973         1200 :                 .iter()
     974         1200 :                 .filter_map(|(_, blob_meta)| {
     975         1200 :                     if blob_meta.key.is_rel_dir_key()
     976         1200 :                         || blob_meta.key == DBDIR_KEY
     977         1200 :                         || blob_meta.key.is_aux_file_key()
     978              :                     {
     979              :                         // The size of values for these keys is unbounded and can
     980              :                         // grow very large in pathological cases.
     981            0 :                         None
     982              :                     } else {
     983         1200 :                         Some(format!("{}@{}", blob_meta.key, blob_meta.lsn))
     984              :                     }
     985         1200 :                 })
     986         1200 :                 .join(", ");
     987         1200 : 
     988         1200 :             if !offenders.is_empty() {
     989         1200 :                 tracing::warn!(
     990            0 :                     "Oversized vectored read ({} > {}) for keys {}",
     991              :                     largest_read_size,
     992              :                     read_size_soft_max,
     993              :                     offenders
     994              :                 );
     995            0 :             }
     996       897229 :         }
     997              : 
     998       898429 :         largest_read_size
     999      1417520 :     }
    1000              : 
    1001      1416320 :     async fn do_reads_and_update_state(
    1002      1416320 :         &self,
    1003      1416320 :         this: ResidentLayer,
    1004      1416320 :         reads: Vec<VectoredRead>,
    1005      1416320 :         reconstruct_state: &mut ValuesReconstructState,
    1006      1416320 :         ctx: &RequestContext,
    1007      1416320 :     ) {
    1008      1416320 :         let max_vectored_read_bytes = self
    1009      1416320 :             .max_vectored_read_bytes
    1010      1416320 :             .expect("Layer is loaded with max vectored bytes config")
    1011      1416320 :             .0
    1012      1416320 :             .into();
    1013      1416320 :         let buf_size = Self::get_min_read_buffer_size(&reads, max_vectored_read_bytes);
    1014              : 
    1015              :         // Note that reads are processed in reverse order (from highest key+lsn).
    1016              :         // This is the order that `ReconstructState` requires such that it can
    1017              :         // track when a key is done.
    1018      1416320 :         for read in reads.into_iter().rev() {
    1019      1022790 :             let mut ios: HashMap<(Key, Lsn), OnDiskValueIo> = Default::default();
    1020      9444102 :             for (_, blob_meta) in read.blobs_at.as_slice().iter().rev() {
    1021      9444102 :                 let io = reconstruct_state.update_key(
    1022      9444102 :                     &blob_meta.key,
    1023      9444102 :                     blob_meta.lsn,
    1024      9444102 :                     blob_meta.will_init,
    1025      9444102 :                 );
    1026      9444102 :                 ios.insert((blob_meta.key, blob_meta.lsn), io);
    1027      9444102 :             }
    1028              : 
    1029      1022790 :             let read_extend_residency = this.clone();
    1030      1022790 :             let read_from = self.file.clone();
    1031      1022790 :             let read_ctx = ctx.attached_child();
    1032      1022790 :             reconstruct_state
    1033      1022790 :                 .spawn_io(async move {
    1034      1022790 :                     let vectored_blob_reader = VectoredBlobReader::new(&read_from);
    1035      1022790 :                     let buf = IoBufferMut::with_capacity(buf_size);
    1036              : 
    1037      1022790 :                     let res = vectored_blob_reader.read_blobs(&read, buf, &read_ctx).await;
    1038      1022790 :                     match res {
    1039      1022790 :                         Ok(blobs_buf) => {
    1040      1022790 :                             let view = BufView::new_slice(&blobs_buf.buf);
    1041      9444102 :                             for meta in blobs_buf.blobs.iter().rev() {
    1042      9444102 :                                 let io = ios.remove(&(meta.meta.key, meta.meta.lsn)).unwrap();
    1043              : 
    1044      9444102 :                                 let blob_read = meta.read(&view).await;
    1045      9444102 :                                 let blob_read = match blob_read {
    1046      9444102 :                                     Ok(buf) => buf,
    1047            0 :                                     Err(e) => {
    1048            0 :                                         io.complete(Err(e));
    1049            0 :                                         continue;
    1050              :                                     }
    1051              :                                 };
    1052              : 
    1053      9444102 :                                 io.complete(Ok(OnDiskValue::WalRecordOrImage(
    1054      9444102 :                                     blob_read.into_bytes(),
    1055      9444102 :                                 )));
    1056              :                             }
    1057              : 
    1058      1022790 :                             assert!(ios.is_empty());
    1059              :                         }
    1060            0 :                         Err(err) => {
    1061            0 :                             for (_, sender) in ios {
    1062            0 :                                 sender.complete(Err(std::io::Error::new(
    1063            0 :                                     err.kind(),
    1064            0 :                                     "vec read failed",
    1065            0 :                                 )));
    1066            0 :                             }
    1067              :                         }
    1068              :                     }
    1069              : 
    1070              :                     // keep layer resident until this IO is done; this spawned IO future generally outlives the
    1071              :                     // call to `self` / the `Arc<DownloadedLayer>` / the `ResidentLayer` that guarantees residency
    1072      1022790 :                     drop(read_extend_residency);
    1073      1022790 :                 })
    1074      1022790 :                 .await;
    1075              :         }
    1076      1416320 :     }
    1077              : 
    1078         2436 :     pub(crate) async fn index_entries<'a>(
    1079         2436 :         &'a self,
    1080         2436 :         ctx: &RequestContext,
    1081         2436 :     ) -> Result<Vec<DeltaEntry<'a>>> {
    1082         2436 :         let block_reader = FileBlockReader::new(&self.file, self.file_id);
    1083         2436 :         let tree_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(
    1084         2436 :             self.index_start_blk,
    1085         2436 :             self.index_root_blk,
    1086         2436 :             block_reader,
    1087         2436 :         );
    1088         2436 : 
    1089         2436 :         let mut all_keys: Vec<DeltaEntry<'_>> = Vec::new();
    1090         2436 : 
    1091         2436 :         tree_reader
    1092         2436 :             .visit(
    1093         2436 :                 &[0u8; DELTA_KEY_SIZE],
    1094         2436 :                 VisitDirection::Forwards,
    1095     12384276 :                 |key, value| {
    1096     12384276 :                     let delta_key = DeltaKey::from_slice(key);
    1097     12384276 :                     let val_ref = ValueRef {
    1098     12384276 :                         blob_ref: BlobRef(value),
    1099     12384276 :                         layer: self,
    1100     12384276 :                     };
    1101     12384276 :                     let pos = BlobRef(value).pos();
    1102     12384276 :                     if let Some(last) = all_keys.last_mut() {
    1103     12381840 :                         // subtract offset of the current and last entries to get the size
    1104     12381840 :                         // of the value associated with this (key, lsn) tuple
    1105     12381840 :                         let first_pos = last.size;
    1106     12381840 :                         last.size = pos - first_pos;
    1107     12381840 :                     }
    1108     12384276 :                     let entry = DeltaEntry {
    1109     12384276 :                         key: delta_key.key(),
    1110     12384276 :                         lsn: delta_key.lsn(),
    1111     12384276 :                         size: pos,
    1112     12384276 :                         val: val_ref,
    1113     12384276 :                     };
    1114     12384276 :                     all_keys.push(entry);
    1115     12384276 :                     true
    1116     12384276 :                 },
    1117         2436 :                 &RequestContextBuilder::from(ctx)
    1118         2436 :                     .page_content_kind(PageContentKind::DeltaLayerBtreeNode)
    1119         2436 :                     .attached_child(),
    1120         2436 :             )
    1121         2436 :             .await?;
    1122         2436 :         if let Some(last) = all_keys.last_mut() {
    1123         2436 :             // Last key occupies all space till end of value storage,
    1124         2436 :             // which corresponds to beginning of the index
    1125         2436 :             last.size = self.index_start_offset() - last.size;
    1126         2436 :         }
    1127         2436 :         Ok(all_keys)
    1128         2436 :     }
    1129              : 
    1130              :     /// Using the given writer, write out a version which has the earlier Lsns than `until`.
    1131              :     ///
    1132              :     /// Return the amount of key value records pushed to the writer.
    1133           60 :     pub(super) async fn copy_prefix(
    1134           60 :         &self,
    1135           60 :         writer: &mut DeltaLayerWriter,
    1136           60 :         until: Lsn,
    1137           60 :         ctx: &RequestContext,
    1138           60 :     ) -> anyhow::Result<usize> {
    1139              :         use futures::stream::TryStreamExt;
    1140              : 
    1141              :         use crate::tenant::vectored_blob_io::{
    1142              :             BlobMeta, ChunkedVectoredReadBuilder, VectoredReadExtended,
    1143              :         };
    1144              : 
    1145              :         #[derive(Debug)]
    1146              :         enum Item {
    1147              :             Actual(Key, Lsn, BlobRef),
    1148              :             Sentinel,
    1149              :         }
    1150              : 
    1151              :         impl From<Item> for Option<(Key, Lsn, BlobRef)> {
    1152          420 :             fn from(value: Item) -> Self {
    1153          420 :                 match value {
    1154          360 :                     Item::Actual(key, lsn, blob) => Some((key, lsn, blob)),
    1155           60 :                     Item::Sentinel => None,
    1156              :                 }
    1157          420 :             }
    1158              :         }
    1159              : 
    1160              :         impl Item {
    1161          420 :             fn offset(&self) -> Option<BlobRef> {
    1162          420 :                 match self {
    1163          360 :                     Item::Actual(_, _, blob) => Some(*blob),
    1164           60 :                     Item::Sentinel => None,
    1165              :                 }
    1166          420 :             }
    1167              : 
    1168          420 :             fn is_last(&self) -> bool {
    1169          420 :                 matches!(self, Item::Sentinel)
    1170          420 :             }
    1171              :         }
    1172              : 
    1173           60 :         let block_reader = FileBlockReader::new(&self.file, self.file_id);
    1174           60 :         let tree_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(
    1175           60 :             self.index_start_blk,
    1176           60 :             self.index_root_blk,
    1177           60 :             block_reader,
    1178           60 :         );
    1179           60 : 
    1180           60 :         let stream = self.stream_index_forwards(tree_reader, &[0u8; DELTA_KEY_SIZE], ctx);
    1181          360 :         let stream = stream.map_ok(|(key, lsn, pos)| Item::Actual(key, lsn, pos));
    1182           60 :         // put in a sentinel value for getting the end offset for last item, and not having to
    1183           60 :         // repeat the whole read part
    1184           60 :         let stream = stream.chain(futures::stream::once(futures::future::ready(Ok(
    1185           60 :             Item::Sentinel,
    1186           60 :         ))));
    1187           60 :         let mut stream = std::pin::pin!(stream);
    1188           60 : 
    1189           60 :         let mut prev: Option<(Key, Lsn, BlobRef)> = None;
    1190           60 : 
    1191           60 :         let mut read_builder: Option<ChunkedVectoredReadBuilder> = None;
    1192           60 : 
    1193           60 :         let max_read_size = self
    1194           60 :             .max_vectored_read_bytes
    1195           60 :             .map(|x| x.0.get())
    1196           60 :             .unwrap_or(8192);
    1197           60 : 
    1198           60 :         let mut buffer = Some(IoBufferMut::with_capacity(max_read_size));
    1199           60 : 
    1200           60 :         // FIXME: buffering of DeltaLayerWriter
    1201           60 :         let mut per_blob_copy = Vec::new();
    1202           60 : 
    1203           60 :         let mut records = 0;
    1204              : 
    1205          480 :         while let Some(item) = stream.try_next().await? {
    1206          420 :             tracing::debug!(?item, "popped");
    1207          420 :             let offset = item
    1208          420 :                 .offset()
    1209          420 :                 .unwrap_or(BlobRef::new(self.index_start_offset(), false));
    1210              : 
    1211          420 :             let actionable = if let Some((key, lsn, start_offset)) = prev.take() {
    1212          360 :                 let end_offset = offset;
    1213          360 : 
    1214          360 :                 Some((
    1215          360 :                     BlobMeta {
    1216          360 :                         key,
    1217          360 :                         lsn,
    1218          360 :                         will_init: false,
    1219          360 :                     },
    1220          360 :                     start_offset..end_offset,
    1221          360 :                 ))
    1222              :             } else {
    1223           60 :                 None
    1224              :             };
    1225              : 
    1226          420 :             let is_last = item.is_last();
    1227          420 : 
    1228          420 :             prev = Option::from(item);
    1229          420 : 
    1230          420 :             let actionable = actionable.filter(|x| x.0.lsn < until);
    1231              : 
    1232          420 :             let builder = if let Some((meta, offsets)) = actionable {
    1233              :                 // extend or create a new builder
    1234          192 :                 if read_builder
    1235          192 :                     .as_mut()
    1236          192 :                     .map(|x| x.extend(offsets.start.pos(), offsets.end.pos(), meta))
    1237          192 :                     .unwrap_or(VectoredReadExtended::No)
    1238          192 :                     == VectoredReadExtended::Yes
    1239              :                 {
    1240           96 :                     None
    1241              :                 } else {
    1242           96 :                     read_builder.replace(ChunkedVectoredReadBuilder::new(
    1243           96 :                         offsets.start.pos(),
    1244           96 :                         offsets.end.pos(),
    1245           96 :                         meta,
    1246           96 :                         max_read_size,
    1247           96 :                     ))
    1248              :                 }
    1249              :             } else {
    1250              :                 // nothing to do, except perhaps flush any existing for the last element
    1251          228 :                 None
    1252              :             };
    1253              : 
    1254              :             // flush the possible older builder and also the new one if the item was the last one
    1255          420 :             let builders = builder.into_iter();
    1256          420 :             let builders = if is_last {
    1257           60 :                 builders.chain(read_builder.take())
    1258              :             } else {
    1259          360 :                 builders.chain(None)
    1260              :             };
    1261              : 
    1262          516 :             for builder in builders {
    1263           96 :                 let read = builder.build();
    1264           96 : 
    1265           96 :                 let reader = VectoredBlobReader::new(&self.file);
    1266           96 : 
    1267           96 :                 let mut buf = buffer.take().unwrap();
    1268           96 : 
    1269           96 :                 buf.clear();
    1270           96 :                 buf.reserve(read.size());
    1271           96 :                 let res = reader.read_blobs(&read, buf, ctx).await?;
    1272              : 
    1273           96 :                 let view = BufView::new_slice(&res.buf);
    1274              : 
    1275          288 :                 for blob in res.blobs {
    1276          192 :                     let key = blob.meta.key;
    1277          192 :                     let lsn = blob.meta.lsn;
    1278              : 
    1279          192 :                     let data = blob.read(&view).await?;
    1280              : 
    1281              :                     #[cfg(debug_assertions)]
    1282          192 :                     Value::des(&data)
    1283          192 :                         .with_context(|| {
    1284            0 :                             format!(
    1285            0 :                                 "blob failed to deserialize for {}: {:?}",
    1286            0 :                                 blob,
    1287            0 :                                 utils::Hex(&data)
    1288            0 :                             )
    1289          192 :                         })
    1290          192 :                         .unwrap();
    1291          192 : 
    1292          192 :                     // is it an image or will_init walrecord?
    1293          192 :                     // FIXME: this could be handled by threading the BlobRef to the
    1294          192 :                     // VectoredReadBuilder
    1295          192 :                     let will_init = pageserver_api::value::ValueBytes::will_init(&data)
    1296          192 :                         .inspect_err(|_e| {
    1297            0 :                             #[cfg(feature = "testing")]
    1298            0 :                             tracing::error!(data=?utils::Hex(&data), err=?_e, %key, %lsn, "failed to parse will_init out of serialized value");
    1299          192 :                         })
    1300          192 :                         .unwrap_or(false);
    1301          192 : 
    1302          192 :                     per_blob_copy.clear();
    1303          192 :                     per_blob_copy.extend_from_slice(&data);
    1304              : 
    1305          192 :                     let (tmp, res) = writer
    1306          192 :                         .put_value_bytes(
    1307          192 :                             key,
    1308          192 :                             lsn,
    1309          192 :                             std::mem::take(&mut per_blob_copy).slice_len(),
    1310          192 :                             will_init,
    1311          192 :                             ctx,
    1312          192 :                         )
    1313          192 :                         .await;
    1314          192 :                     per_blob_copy = tmp.into_raw_slice().into_inner();
    1315          192 : 
    1316          192 :                     res?;
    1317              : 
    1318          192 :                     records += 1;
    1319              :                 }
    1320              : 
    1321           96 :                 buffer = Some(res.buf);
    1322              :             }
    1323              :         }
    1324              : 
    1325           60 :         assert!(
    1326           60 :             read_builder.is_none(),
    1327            0 :             "with the sentinel above loop should had handled all"
    1328              :         );
    1329              : 
    1330           60 :         Ok(records)
    1331           60 :     }
    1332              : 
    1333           24 :     pub(super) async fn dump(&self, ctx: &RequestContext) -> anyhow::Result<()> {
    1334           24 :         println!(
    1335           24 :             "index_start_blk: {}, root {}",
    1336           24 :             self.index_start_blk, self.index_root_blk
    1337           24 :         );
    1338           24 : 
    1339           24 :         let block_reader = FileBlockReader::new(&self.file, self.file_id);
    1340           24 :         let tree_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(
    1341           24 :             self.index_start_blk,
    1342           24 :             self.index_root_blk,
    1343           24 :             block_reader,
    1344           24 :         );
    1345           24 : 
    1346           24 :         tree_reader.dump(ctx).await?;
    1347              : 
    1348           24 :         let keys = self.index_entries(ctx).await?;
    1349              : 
    1350           48 :         async fn dump_blob(val: &ValueRef<'_>, ctx: &RequestContext) -> anyhow::Result<String> {
    1351           48 :             let buf = val.load_raw(ctx).await?;
    1352           48 :             let val = Value::des(&buf)?;
    1353           48 :             let desc = match val {
    1354           48 :                 Value::Image(img) => {
    1355           48 :                     format!(" img {} bytes", img.len())
    1356              :                 }
    1357            0 :                 Value::WalRecord(rec) => {
    1358            0 :                     let wal_desc = pageserver_api::record::describe_wal_record(&rec)?;
    1359            0 :                     format!(
    1360            0 :                         " rec {} bytes will_init: {} {}",
    1361            0 :                         buf.len(),
    1362            0 :                         rec.will_init(),
    1363            0 :                         wal_desc
    1364            0 :                     )
    1365              :                 }
    1366              :             };
    1367           48 :             Ok(desc)
    1368           48 :         }
    1369              : 
    1370           72 :         for entry in keys {
    1371           48 :             let DeltaEntry { key, lsn, val, .. } = entry;
    1372           48 :             let desc = match dump_blob(&val, ctx).await {
    1373           48 :                 Ok(desc) => desc,
    1374            0 :                 Err(err) => {
    1375            0 :                     format!("ERROR: {err}")
    1376              :                 }
    1377              :             };
    1378           48 :             println!("  key {key} at {lsn}: {desc}");
    1379              : 
    1380              :             // Print more details about CHECKPOINT records. Would be nice to print details
    1381              :             // of many other record types too, but these are particularly interesting, as
    1382              :             // have a lot of special processing for them in walingest.rs.
    1383           24 :             use pageserver_api::key::CHECKPOINT_KEY;
    1384           24 :             use postgres_ffi::CheckPoint;
    1385           48 :             if key == CHECKPOINT_KEY {
    1386            0 :                 let val = val.load(ctx).await?;
    1387            0 :                 match val {
    1388            0 :                     Value::Image(img) => {
    1389            0 :                         let checkpoint = CheckPoint::decode(&img)?;
    1390            0 :                         println!("   CHECKPOINT: {:?}", checkpoint);
    1391              :                     }
    1392            0 :                     Value::WalRecord(_rec) => {
    1393            0 :                         println!("   unexpected walrecord value for checkpoint key");
    1394            0 :                     }
    1395              :                 }
    1396           48 :             }
    1397              :         }
    1398              : 
    1399           24 :         Ok(())
    1400           24 :     }
    1401              : 
    1402          180 :     fn stream_index_forwards<'a, R>(
    1403          180 :         &'a self,
    1404          180 :         reader: DiskBtreeReader<R, DELTA_KEY_SIZE>,
    1405          180 :         start: &'a [u8; DELTA_KEY_SIZE],
    1406          180 :         ctx: &'a RequestContext,
    1407          180 :     ) -> impl futures::stream::Stream<
    1408          180 :         Item = Result<(Key, Lsn, BlobRef), crate::tenant::disk_btree::DiskBtreeError>,
    1409          180 :     > + 'a
    1410          180 :     where
    1411          180 :         R: BlockReader + 'a,
    1412          180 :     {
    1413              :         use futures::stream::TryStreamExt;
    1414          180 :         let stream = reader.into_stream(start, ctx);
    1415          912 :         stream.map_ok(|(key, value)| {
    1416          912 :             let key = DeltaKey::from_slice(&key);
    1417          912 :             let (key, lsn) = (key.key(), key.lsn());
    1418          912 :             let offset = BlobRef(value);
    1419          912 : 
    1420          912 :             (key, lsn, offset)
    1421          912 :         })
    1422          180 :     }
    1423              : 
    1424              :     /// The file offset to the first block of index.
    1425              :     ///
    1426              :     /// The file structure is summary, values, and index. We often need this for the size of last blob.
    1427      1422464 :     fn index_start_offset(&self) -> u64 {
    1428      1422464 :         let offset = self.index_start_blk as u64 * PAGE_SZ as u64;
    1429      1422464 :         let bref = BlobRef(offset);
    1430      1422464 :         tracing::debug!(
    1431              :             index_start_blk = self.index_start_blk,
    1432              :             offset,
    1433            0 :             pos = bref.pos(),
    1434            0 :             "index_start_offset"
    1435              :         );
    1436      1422464 :         offset
    1437      1422464 :     }
    1438              : 
    1439         3456 :     pub fn iter<'a>(&'a self, ctx: &'a RequestContext) -> DeltaLayerIterator<'a> {
    1440         3456 :         let block_reader = FileBlockReader::new(&self.file, self.file_id);
    1441         3456 :         let tree_reader =
    1442         3456 :             DiskBtreeReader::new(self.index_start_blk, self.index_root_blk, block_reader);
    1443         3456 :         DeltaLayerIterator {
    1444         3456 :             delta_layer: self,
    1445         3456 :             ctx,
    1446         3456 :             index_iter: tree_reader.iter(&[0; DELTA_KEY_SIZE], ctx),
    1447         3456 :             key_values_batch: std::collections::VecDeque::new(),
    1448         3456 :             is_end: false,
    1449         3456 :             planner: StreamingVectoredReadPlanner::new(
    1450         3456 :                 1024 * 8192, // The default value. Unit tests might use a different value. 1024 * 8K = 8MB buffer.
    1451         3456 :                 1024,        // The default value. Unit tests might use a different value
    1452         3456 :             ),
    1453         3456 :         }
    1454         3456 :     }
    1455              : 
    1456              :     /// NB: not super efficient, but not terrible either. Should prob be an iterator.
    1457              :     //
    1458              :     // We're reusing the index traversal logical in plan_reads; would be nice to
    1459              :     // factor that out.
    1460            0 :     pub(crate) async fn load_keys(&self, ctx: &RequestContext) -> anyhow::Result<Vec<Key>> {
    1461            0 :         self.index_entries(ctx)
    1462            0 :             .await
    1463            0 :             .map(|entries| entries.into_iter().map(|entry| entry.key).collect())
    1464            0 :     }
    1465              : }
    1466              : 
    1467              : /// A set of data associated with a delta layer key and its value
    1468              : pub struct DeltaEntry<'a> {
    1469              :     pub key: Key,
    1470              :     pub lsn: Lsn,
    1471              :     /// Size of the stored value
    1472              :     pub size: u64,
    1473              :     /// Reference to the on-disk value
    1474              :     pub val: ValueRef<'a>,
    1475              : }
    1476              : 
    1477              : /// Reference to an on-disk value
    1478              : pub struct ValueRef<'a> {
    1479              :     blob_ref: BlobRef,
    1480              :     layer: &'a DeltaLayerInner,
    1481              : }
    1482              : 
    1483              : impl ValueRef<'_> {
    1484              :     /// Loads the value from disk
    1485            0 :     pub async fn load(&self, ctx: &RequestContext) -> Result<Value> {
    1486            0 :         let buf = self.load_raw(ctx).await?;
    1487            0 :         let val = Value::des(&buf)?;
    1488            0 :         Ok(val)
    1489            0 :     }
    1490              : 
    1491           48 :     async fn load_raw(&self, ctx: &RequestContext) -> Result<Vec<u8>> {
    1492           48 :         let reader = BlockCursor::new(crate::tenant::block_io::BlockReaderRef::Adapter(Adapter(
    1493           48 :             self.layer,
    1494           48 :         )));
    1495           48 :         let buf = reader.read_blob(self.blob_ref.pos(), ctx).await?;
    1496           48 :         Ok(buf)
    1497           48 :     }
    1498              : }
    1499              : 
    1500              : pub(crate) struct Adapter<T>(T);
    1501              : 
    1502              : impl<T: AsRef<DeltaLayerInner>> Adapter<T> {
    1503           48 :     pub(crate) async fn read_blk(
    1504           48 :         &self,
    1505           48 :         blknum: u32,
    1506           48 :         ctx: &RequestContext,
    1507           48 :     ) -> Result<BlockLease, std::io::Error> {
    1508           48 :         let block_reader = FileBlockReader::new(&self.0.as_ref().file, self.0.as_ref().file_id);
    1509           48 :         block_reader.read_blk(blknum, ctx).await
    1510           48 :     }
    1511              : }
    1512              : 
    1513              : impl AsRef<DeltaLayerInner> for DeltaLayerInner {
    1514           96 :     fn as_ref(&self) -> &DeltaLayerInner {
    1515           96 :         self
    1516           96 :     }
    1517              : }
    1518              : 
    1519              : impl<'a> pageserver_compaction::interface::CompactionDeltaEntry<'a, Key> for DeltaEntry<'a> {
    1520            0 :     fn key(&self) -> Key {
    1521            0 :         self.key
    1522            0 :     }
    1523            0 :     fn lsn(&self) -> Lsn {
    1524            0 :         self.lsn
    1525            0 :     }
    1526            0 :     fn size(&self) -> u64 {
    1527            0 :         self.size
    1528            0 :     }
    1529              : }
    1530              : 
    1531              : pub struct DeltaLayerIterator<'a> {
    1532              :     delta_layer: &'a DeltaLayerInner,
    1533              :     ctx: &'a RequestContext,
    1534              :     planner: StreamingVectoredReadPlanner,
    1535              :     index_iter: DiskBtreeIterator<'a>,
    1536              :     key_values_batch: VecDeque<(Key, Lsn, Value)>,
    1537              :     is_end: bool,
    1538              : }
    1539              : 
    1540              : impl DeltaLayerIterator<'_> {
    1541            0 :     pub(crate) fn layer_dbg_info(&self) -> String {
    1542            0 :         self.delta_layer.layer_dbg_info()
    1543            0 :     }
    1544              : 
    1545              :     /// Retrieve a batch of key-value pairs into the iterator buffer.
    1546       128436 :     async fn next_batch(&mut self) -> anyhow::Result<()> {
    1547       128436 :         assert!(self.key_values_batch.is_empty());
    1548       128436 :         assert!(!self.is_end);
    1549              : 
    1550       128436 :         let plan = loop {
    1551     12597780 :             if let Some(res) = self.index_iter.next().await {
    1552     12594492 :                 let (raw_key, value) = res?;
    1553     12594492 :                 let key = Key::from_slice(&raw_key[..KEY_SIZE]);
    1554     12594492 :                 let lsn = DeltaKey::extract_lsn_from_buf(&raw_key);
    1555     12594492 :                 let blob_ref = BlobRef(value);
    1556     12594492 :                 let offset = blob_ref.pos();
    1557       125148 :                 if let Some(batch_plan) =
    1558     12594492 :                     self.planner.handle(key, lsn, offset, blob_ref.will_init())
    1559              :                 {
    1560       125148 :                     break batch_plan;
    1561     12469344 :                 }
    1562              :             } else {
    1563         3288 :                 self.is_end = true;
    1564         3288 :                 let data_end_offset = self.delta_layer.index_start_offset();
    1565         3288 :                 if let Some(item) = self.planner.handle_range_end(data_end_offset) {
    1566         3288 :                     break item;
    1567              :                 } else {
    1568            0 :                     return Ok(()); // TODO: test empty iterator
    1569              :                 }
    1570              :             }
    1571              :         };
    1572       128436 :         let vectored_blob_reader = VectoredBlobReader::new(&self.delta_layer.file);
    1573       128436 :         let mut next_batch = std::collections::VecDeque::new();
    1574       128436 :         let buf_size = plan.size();
    1575       128436 :         let buf = IoBufferMut::with_capacity(buf_size);
    1576       128436 :         let blobs_buf = vectored_blob_reader
    1577       128436 :             .read_blobs(&plan, buf, self.ctx)
    1578       128436 :             .await?;
    1579       128436 :         let view = BufView::new_slice(&blobs_buf.buf);
    1580     12594324 :         for meta in blobs_buf.blobs.iter() {
    1581     12594324 :             let blob_read = meta.read(&view).await?;
    1582     12594324 :             let value = Value::des(&blob_read)?;
    1583              : 
    1584     12594324 :             next_batch.push_back((meta.meta.key, meta.meta.lsn, value));
    1585              :         }
    1586       128436 :         self.key_values_batch = next_batch;
    1587       128436 :         Ok(())
    1588       128436 :     }
    1589              : 
    1590     12599052 :     pub async fn next(&mut self) -> anyhow::Result<Option<(Key, Lsn, Value)>> {
    1591     12599052 :         if self.key_values_batch.is_empty() {
    1592       134304 :             if self.is_end {
    1593         6372 :                 return Ok(None);
    1594       127932 :             }
    1595       127932 :             self.next_batch().await?;
    1596     12464748 :         }
    1597     12592680 :         Ok(Some(
    1598     12592680 :             self.key_values_batch
    1599     12592680 :                 .pop_front()
    1600     12592680 :                 .expect("should not be empty"),
    1601     12592680 :         ))
    1602     12599052 :     }
    1603              : }
    1604              : 
    1605              : #[cfg(test)]
    1606              : pub(crate) mod test {
    1607              :     use std::collections::BTreeMap;
    1608              : 
    1609              :     use bytes::Bytes;
    1610              :     use itertools::MinMaxResult;
    1611              :     use pageserver_api::value::Value;
    1612              :     use rand::RngCore;
    1613              :     use rand::prelude::{SeedableRng, SliceRandom, StdRng};
    1614              : 
    1615              :     use super::*;
    1616              :     use crate::DEFAULT_PG_VERSION;
    1617              :     use crate::context::DownloadBehavior;
    1618              :     use crate::task_mgr::TaskKind;
    1619              :     use crate::tenant::disk_btree::tests::TestDisk;
    1620              :     use crate::tenant::harness::{TIMELINE_ID, TenantHarness};
    1621              :     use crate::tenant::storage_layer::{Layer, ResidentLayer};
    1622              :     use crate::tenant::vectored_blob_io::StreamingVectoredReadPlanner;
    1623              :     use crate::tenant::{TenantShard, Timeline};
    1624              : 
    1625              :     /// Construct an index for a fictional delta layer and and then
    1626              :     /// traverse in order to plan vectored reads for a query. Finally,
    1627              :     /// verify that the traversal fed the right index key and value
    1628              :     /// pairs into the planner.
    1629              :     #[tokio::test]
    1630           12 :     async fn test_delta_layer_index_traversal() {
    1631           12 :         let base_key = Key {
    1632           12 :             field1: 0,
    1633           12 :             field2: 1663,
    1634           12 :             field3: 12972,
    1635           12 :             field4: 16396,
    1636           12 :             field5: 0,
    1637           12 :             field6: 246080,
    1638           12 :         };
    1639           12 : 
    1640           12 :         // Populate the index with some entries
    1641           12 :         let entries: BTreeMap<Key, Vec<Lsn>> = BTreeMap::from([
    1642           12 :             (base_key, vec![Lsn(1), Lsn(5), Lsn(25), Lsn(26), Lsn(28)]),
    1643           12 :             (base_key.add(1), vec![Lsn(2), Lsn(5), Lsn(10), Lsn(50)]),
    1644           12 :             (base_key.add(2), vec![Lsn(2), Lsn(5), Lsn(10), Lsn(50)]),
    1645           12 :             (base_key.add(5), vec![Lsn(10), Lsn(15), Lsn(16), Lsn(20)]),
    1646           12 :         ]);
    1647           12 : 
    1648           12 :         let mut disk = TestDisk::default();
    1649           12 :         let mut writer = DiskBtreeBuilder::<_, DELTA_KEY_SIZE>::new(&mut disk);
    1650           12 : 
    1651           12 :         let mut disk_offset = 0;
    1652           60 :         for (key, lsns) in &entries {
    1653          252 :             for lsn in lsns {
    1654          204 :                 let index_key = DeltaKey::from_key_lsn(key, *lsn);
    1655          204 :                 let blob_ref = BlobRef::new(disk_offset, false);
    1656          204 :                 writer
    1657          204 :                     .append(&index_key.0, blob_ref.0)
    1658          204 :                     .expect("In memory disk append should never fail");
    1659          204 : 
    1660          204 :                 disk_offset += 1;
    1661          204 :             }
    1662           12 :         }
    1663           12 : 
    1664           12 :         // Prepare all the arguments for the call into `plan_reads` below
    1665           12 :         let (root_offset, _writer) = writer
    1666           12 :             .finish()
    1667           12 :             .expect("In memory disk finish should never fail");
    1668           12 :         let reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(0, root_offset, disk);
    1669           12 :         let planner = VectoredReadPlanner::new(100);
    1670           12 :         let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
    1671           12 : 
    1672           12 :         let keyspace = KeySpace {
    1673           12 :             ranges: vec![
    1674           12 :                 base_key..base_key.add(3),
    1675           12 :                 base_key.add(3)..base_key.add(100),
    1676           12 :             ],
    1677           12 :         };
    1678           12 :         let lsn_range = Lsn(2)..Lsn(40);
    1679           12 : 
    1680           12 :         // Plan and validate
    1681           12 :         let vectored_reads = DeltaLayerInner::plan_reads(
    1682           12 :             &keyspace,
    1683           12 :             lsn_range.clone(),
    1684           12 :             disk_offset,
    1685           12 :             reader,
    1686           12 :             planner,
    1687           12 :             &ctx,
    1688           12 :         )
    1689           12 :         .await
    1690           12 :         .expect("Read planning should not fail");
    1691           12 : 
    1692           12 :         validate(keyspace, lsn_range, vectored_reads, entries);
    1693           12 :     }
    1694              : 
    1695           12 :     fn validate(
    1696           12 :         keyspace: KeySpace,
    1697           12 :         lsn_range: Range<Lsn>,
    1698           12 :         vectored_reads: Vec<VectoredRead>,
    1699           12 :         index_entries: BTreeMap<Key, Vec<Lsn>>,
    1700           12 :     ) {
    1701              :         #[derive(Debug, PartialEq, Eq)]
    1702              :         struct BlobSpec {
    1703              :             key: Key,
    1704              :             lsn: Lsn,
    1705              :             at: u64,
    1706              :         }
    1707              : 
    1708           12 :         let mut planned_blobs = Vec::new();
    1709          180 :         for read in vectored_reads {
    1710          168 :             for (at, meta) in read.blobs_at.as_slice() {
    1711          168 :                 planned_blobs.push(BlobSpec {
    1712          168 :                     key: meta.key,
    1713          168 :                     lsn: meta.lsn,
    1714          168 :                     at: *at,
    1715          168 :                 });
    1716          168 :             }
    1717              :         }
    1718              : 
    1719           12 :         let mut expected_blobs = Vec::new();
    1720           12 :         let mut disk_offset = 0;
    1721           60 :         for (key, lsns) in index_entries {
    1722          252 :             for lsn in lsns {
    1723          252 :                 let key_included = keyspace.ranges.iter().any(|range| range.contains(&key));
    1724          204 :                 let lsn_included = lsn_range.contains(&lsn);
    1725          204 : 
    1726          204 :                 if key_included && lsn_included {
    1727          168 :                     expected_blobs.push(BlobSpec {
    1728          168 :                         key,
    1729          168 :                         lsn,
    1730          168 :                         at: disk_offset,
    1731          168 :                     });
    1732          168 :                 }
    1733              : 
    1734          204 :                 disk_offset += 1;
    1735              :             }
    1736              :         }
    1737              : 
    1738           12 :         assert_eq!(planned_blobs, expected_blobs);
    1739           12 :     }
    1740              : 
    1741              :     mod constants {
    1742              :         use utils::lsn::Lsn;
    1743              : 
    1744              :         /// Offset used by all lsns in this test
    1745              :         pub(super) const LSN_OFFSET: Lsn = Lsn(0x08);
    1746              :         /// Number of unique keys including in the test data
    1747              :         pub(super) const KEY_COUNT: u8 = 60;
    1748              :         /// Max number of different lsns for each key
    1749              :         pub(super) const MAX_ENTRIES_PER_KEY: u8 = 20;
    1750              :         /// Possible value sizes for each key along with a probability weight
    1751              :         pub(super) const VALUE_SIZES: [(usize, u8); 3] = [(100, 2), (1024, 2), (1024 * 1024, 1)];
    1752              :         /// Probability that there will be a gap between the current key and the next one (33.3%)
    1753              :         pub(super) const KEY_GAP_CHANGES: [(bool, u8); 2] = [(true, 1), (false, 2)];
    1754              :         /// The minimum size of a key range in all the generated reads
    1755              :         pub(super) const MIN_RANGE_SIZE: i128 = 10;
    1756              :         /// The number of ranges included in each vectored read
    1757              :         pub(super) const RANGES_COUNT: u8 = 2;
    1758              :         /// The number of vectored reads performed
    1759              :         pub(super) const READS_COUNT: u8 = 100;
    1760              :         /// Soft max size of a vectored read. Will be violated if we have to read keys
    1761              :         /// with values larger than the limit
    1762              :         pub(super) const MAX_VECTORED_READ_BYTES: usize = 64 * 1024;
    1763              :     }
    1764              : 
    1765              :     struct Entry {
    1766              :         key: Key,
    1767              :         lsn: Lsn,
    1768              :         value: Vec<u8>,
    1769              :     }
    1770              : 
    1771           12 :     fn generate_entries(rng: &mut StdRng) -> Vec<Entry> {
    1772           12 :         let mut current_key = Key::MIN;
    1773           12 : 
    1774           12 :         let mut entries = Vec::new();
    1775          732 :         for _ in 0..constants::KEY_COUNT {
    1776          720 :             let count = rng.gen_range(1..constants::MAX_ENTRIES_PER_KEY);
    1777          720 :             let mut lsns_iter =
    1778        13560 :                 std::iter::successors(Some(Lsn(constants::LSN_OFFSET.0 + 0x08)), |lsn| {
    1779        13560 :                     Some(Lsn(lsn.0 + 0x08))
    1780        13560 :                 });
    1781          720 :             let mut lsns = Vec::new();
    1782        14280 :             while lsns.len() < count as usize {
    1783        13560 :                 let take = rng.gen_bool(0.5);
    1784        13560 :                 let lsn = lsns_iter.next().unwrap();
    1785        13560 :                 if take {
    1786         6672 :                     lsns.push(lsn);
    1787         6888 :                 }
    1788              :             }
    1789              : 
    1790         7392 :             for lsn in lsns {
    1791         6672 :                 let size = constants::VALUE_SIZES
    1792        20016 :                     .choose_weighted(rng, |item| item.1)
    1793         6672 :                     .unwrap()
    1794         6672 :                     .0;
    1795         6672 :                 let mut buf = vec![0; size];
    1796         6672 :                 rng.fill_bytes(&mut buf);
    1797         6672 : 
    1798         6672 :                 entries.push(Entry {
    1799         6672 :                     key: current_key,
    1800         6672 :                     lsn,
    1801         6672 :                     value: buf,
    1802         6672 :                 })
    1803              :             }
    1804              : 
    1805          720 :             let gap = constants::KEY_GAP_CHANGES
    1806         1440 :                 .choose_weighted(rng, |item| item.1)
    1807          720 :                 .unwrap()
    1808          720 :                 .0;
    1809          720 :             if gap {
    1810          228 :                 current_key = current_key.add(2);
    1811          492 :             } else {
    1812          492 :                 current_key = current_key.add(1);
    1813          492 :             }
    1814              :         }
    1815              : 
    1816           12 :         entries
    1817           12 :     }
    1818              : 
    1819              :     struct EntriesMeta {
    1820              :         key_range: Range<Key>,
    1821              :         lsn_range: Range<Lsn>,
    1822              :         index: BTreeMap<(Key, Lsn), Vec<u8>>,
    1823              :     }
    1824              : 
    1825           12 :     fn get_entries_meta(entries: &[Entry]) -> EntriesMeta {
    1826         6672 :         let key_range = match entries.iter().minmax_by_key(|e| e.key) {
    1827           12 :             MinMaxResult::MinMax(min, max) => min.key..max.key.next(),
    1828            0 :             _ => panic!("More than one entry is always expected"),
    1829              :         };
    1830              : 
    1831         6672 :         let lsn_range = match entries.iter().minmax_by_key(|e| e.lsn) {
    1832           12 :             MinMaxResult::MinMax(min, max) => min.lsn..Lsn(max.lsn.0 + 1),
    1833            0 :             _ => panic!("More than one entry is always expected"),
    1834              :         };
    1835              : 
    1836           12 :         let mut index = BTreeMap::new();
    1837         6672 :         for entry in entries.iter() {
    1838         6672 :             index.insert((entry.key, entry.lsn), entry.value.clone());
    1839         6672 :         }
    1840              : 
    1841           12 :         EntriesMeta {
    1842           12 :             key_range,
    1843           12 :             lsn_range,
    1844           12 :             index,
    1845           12 :         }
    1846           12 :     }
    1847              : 
    1848         1200 :     fn pick_random_keyspace(rng: &mut StdRng, key_range: &Range<Key>) -> KeySpace {
    1849         1200 :         let start = key_range.start.to_i128();
    1850         1200 :         let end = key_range.end.to_i128();
    1851         1200 : 
    1852         1200 :         let mut keyspace = KeySpace::default();
    1853              : 
    1854         3600 :         for _ in 0..constants::RANGES_COUNT {
    1855         2400 :             let mut range: Option<Range<Key>> = Option::default();
    1856         7464 :             while range.is_none() || keyspace.overlaps(range.as_ref().unwrap()) {
    1857         5064 :                 let range_start = rng.gen_range(start..end);
    1858         5064 :                 let range_end_offset = range_start + constants::MIN_RANGE_SIZE;
    1859         5064 :                 if range_end_offset >= end {
    1860          600 :                     range = Some(Key::from_i128(range_start)..Key::from_i128(end));
    1861         4464 :                 } else {
    1862         4464 :                     let range_end = rng.gen_range((range_start + constants::MIN_RANGE_SIZE)..end);
    1863         4464 :                     range = Some(Key::from_i128(range_start)..Key::from_i128(range_end));
    1864         4464 :                 }
    1865              :             }
    1866         2400 :             keyspace.ranges.push(range.unwrap());
    1867              :         }
    1868              : 
    1869         1200 :         keyspace
    1870         1200 :     }
    1871              : 
    1872              :     #[tokio::test]
    1873           12 :     async fn test_delta_layer_vectored_read_end_to_end() -> anyhow::Result<()> {
    1874           12 :         let harness = TenantHarness::create("test_delta_layer_oversized_vectored_read").await?;
    1875           12 :         let (tenant, ctx) = harness.load().await;
    1876           12 : 
    1877           12 :         let timeline_id = TimelineId::generate();
    1878           12 :         let timeline = tenant
    1879           12 :             .create_test_timeline(timeline_id, constants::LSN_OFFSET, DEFAULT_PG_VERSION, &ctx)
    1880           12 :             .await?;
    1881           12 : 
    1882           12 :         tracing::info!("Generating test data ...");
    1883           12 : 
    1884           12 :         let rng = &mut StdRng::seed_from_u64(0);
    1885           12 :         let entries = generate_entries(rng);
    1886           12 :         let entries_meta = get_entries_meta(&entries);
    1887           12 : 
    1888           12 :         tracing::info!("Done generating {} entries", entries.len());
    1889           12 : 
    1890           12 :         tracing::info!("Writing test data to delta layer ...");
    1891           12 :         let mut writer = DeltaLayerWriter::new(
    1892           12 :             harness.conf,
    1893           12 :             timeline_id,
    1894           12 :             harness.tenant_shard_id,
    1895           12 :             entries_meta.key_range.start,
    1896           12 :             entries_meta.lsn_range.clone(),
    1897           12 :             &timeline.gate,
    1898           12 :             timeline.cancel.clone(),
    1899           12 :             &ctx,
    1900           12 :         )
    1901           12 :         .await?;
    1902           12 : 
    1903         6684 :         for entry in entries {
    1904         6672 :             let (_, res) = writer
    1905         6672 :                 .put_value_bytes(entry.key, entry.lsn, entry.value.slice_len(), false, &ctx)
    1906         6672 :                 .await;
    1907         6672 :             res?;
    1908           12 :         }
    1909           12 : 
    1910           12 :         let (desc, path) = writer.finish(entries_meta.key_range.end, &ctx).await?;
    1911           12 :         let resident = Layer::finish_creating(harness.conf, &timeline, desc, &path)?;
    1912           12 : 
    1913           12 :         let inner = resident.get_as_delta(&ctx).await?;
    1914           12 : 
    1915           12 :         let file_size = inner.file.metadata().await?.len();
    1916           12 :         tracing::info!(
    1917           12 :             "Done writing test data to delta layer. Resulting file size is: {}",
    1918           12 :             file_size
    1919           12 :         );
    1920           12 : 
    1921         1212 :         for i in 0..constants::READS_COUNT {
    1922         1200 :             tracing::info!("Doing vectored read {}/{}", i + 1, constants::READS_COUNT);
    1923           12 : 
    1924         1200 :             let block_reader = FileBlockReader::new(&inner.file, inner.file_id);
    1925         1200 :             let index_reader = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(
    1926         1200 :                 inner.index_start_blk,
    1927         1200 :                 inner.index_root_blk,
    1928         1200 :                 block_reader,
    1929         1200 :             );
    1930         1200 : 
    1931         1200 :             let planner = VectoredReadPlanner::new(constants::MAX_VECTORED_READ_BYTES);
    1932         1200 :             let keyspace = pick_random_keyspace(rng, &entries_meta.key_range);
    1933         1200 :             let data_end_offset = inner.index_start_blk as u64 * PAGE_SZ as u64;
    1934           12 : 
    1935         1200 :             let vectored_reads = DeltaLayerInner::plan_reads(
    1936         1200 :                 &keyspace,
    1937         1200 :                 entries_meta.lsn_range.clone(),
    1938         1200 :                 data_end_offset,
    1939         1200 :                 index_reader,
    1940         1200 :                 planner,
    1941         1200 :                 &ctx,
    1942         1200 :             )
    1943         1200 :             .await?;
    1944           12 : 
    1945         1200 :             let vectored_blob_reader = VectoredBlobReader::new(&inner.file);
    1946         1200 :             let buf_size = DeltaLayerInner::get_min_read_buffer_size(
    1947         1200 :                 &vectored_reads,
    1948         1200 :                 constants::MAX_VECTORED_READ_BYTES,
    1949         1200 :             );
    1950         1200 :             let mut buf = Some(IoBufferMut::with_capacity(buf_size));
    1951           12 : 
    1952       119544 :             for read in vectored_reads {
    1953       118344 :                 let blobs_buf = vectored_blob_reader
    1954       118344 :                     .read_blobs(&read, buf.take().expect("Should have a buffer"), &ctx)
    1955       118344 :                     .await?;
    1956       118344 :                 let view = BufView::new_slice(&blobs_buf.buf);
    1957       343824 :                 for meta in blobs_buf.blobs.iter() {
    1958       343824 :                     let value = meta.read(&view).await?;
    1959       343824 :                     assert_eq!(
    1960       343824 :                         &value[..],
    1961       343824 :                         &entries_meta.index[&(meta.meta.key, meta.meta.lsn)]
    1962       343824 :                     );
    1963           12 :                 }
    1964           12 : 
    1965       118344 :                 buf = Some(blobs_buf.buf);
    1966           12 :             }
    1967           12 :         }
    1968           12 : 
    1969           12 :         Ok(())
    1970           12 :     }
    1971              : 
    1972              :     #[tokio::test]
    1973           12 :     async fn copy_delta_prefix_smoke() {
    1974           12 :         use bytes::Bytes;
    1975           12 :         use pageserver_api::record::NeonWalRecord;
    1976           12 : 
    1977           12 :         let h = crate::tenant::harness::TenantHarness::create("truncate_delta_smoke")
    1978           12 :             .await
    1979           12 :             .unwrap();
    1980           12 :         let (tenant, ctx) = h.load().await;
    1981           12 :         let ctx = &ctx;
    1982           12 :         let timeline = tenant
    1983           12 :             .create_test_timeline(TimelineId::generate(), Lsn(0x10), 14, ctx)
    1984           12 :             .await
    1985           12 :             .unwrap();
    1986           12 :         let ctx = &ctx.with_scope_timeline(&timeline);
    1987           12 : 
    1988           12 :         let initdb_layer = timeline
    1989           12 :             .layers
    1990           12 :             .read()
    1991           12 :             .await
    1992           12 :             .likely_resident_layers()
    1993           12 :             .next()
    1994           12 :             .cloned()
    1995           12 :             .unwrap();
    1996           12 : 
    1997           12 :         {
    1998           12 :             let mut writer = timeline.writer().await;
    1999           12 : 
    2000           12 :             let data = [
    2001           12 :                 (0x20, 12, Value::Image(Bytes::from_static(b"foobar"))),
    2002           12 :                 (
    2003           12 :                     0x30,
    2004           12 :                     12,
    2005           12 :                     Value::WalRecord(NeonWalRecord::Postgres {
    2006           12 :                         will_init: false,
    2007           12 :                         rec: Bytes::from_static(b"1"),
    2008           12 :                     }),
    2009           12 :                 ),
    2010           12 :                 (
    2011           12 :                     0x40,
    2012           12 :                     12,
    2013           12 :                     Value::WalRecord(NeonWalRecord::Postgres {
    2014           12 :                         will_init: true,
    2015           12 :                         rec: Bytes::from_static(b"2"),
    2016           12 :                     }),
    2017           12 :                 ),
    2018           12 :                 // build an oversized value so we cannot extend and existing read over
    2019           12 :                 // this
    2020           12 :                 (
    2021           12 :                     0x50,
    2022           12 :                     12,
    2023           12 :                     Value::WalRecord(NeonWalRecord::Postgres {
    2024           12 :                         will_init: true,
    2025           12 :                         rec: {
    2026           12 :                             let mut buf =
    2027           12 :                                 vec![0u8; tenant.conf.max_vectored_read_bytes.0.get() + 1024];
    2028           12 :                             buf.iter_mut()
    2029           12 :                                 .enumerate()
    2030      1609728 :                                 .for_each(|(i, slot)| *slot = (i % 256) as u8);
    2031           12 :                             Bytes::from(buf)
    2032           12 :                         },
    2033           12 :                     }),
    2034           12 :                 ),
    2035           12 :                 // because the oversized read cannot be extended further, we are sure to exercise the
    2036           12 :                 // builder created on the last round with this:
    2037           12 :                 (
    2038           12 :                     0x60,
    2039           12 :                     12,
    2040           12 :                     Value::WalRecord(NeonWalRecord::Postgres {
    2041           12 :                         will_init: true,
    2042           12 :                         rec: Bytes::from_static(b"3"),
    2043           12 :                     }),
    2044           12 :                 ),
    2045           12 :                 (
    2046           12 :                     0x60,
    2047           12 :                     9,
    2048           12 :                     Value::Image(Bytes::from_static(b"something for a different key")),
    2049           12 :                 ),
    2050           12 :             ];
    2051           12 : 
    2052           12 :             let mut last_lsn = None;
    2053           12 : 
    2054           84 :             for (lsn, key, value) in data {
    2055           72 :                 let key = Key::from_i128(key);
    2056           72 :                 writer.put(key, Lsn(lsn), &value, ctx).await.unwrap();
    2057           72 :                 last_lsn = Some(lsn);
    2058           12 :             }
    2059           12 : 
    2060           12 :             writer.finish_write(Lsn(last_lsn.unwrap()));
    2061           12 :         }
    2062           12 :         timeline.freeze_and_flush().await.unwrap();
    2063           12 : 
    2064           12 :         let new_layer = timeline
    2065           12 :             .layers
    2066           12 :             .read()
    2067           12 :             .await
    2068           12 :             .likely_resident_layers()
    2069           17 :             .find(|&x| x != &initdb_layer)
    2070           12 :             .cloned()
    2071           12 :             .unwrap();
    2072           12 : 
    2073           12 :         // create a copy for the timeline, so we don't overwrite the file
    2074           12 :         let branch = tenant
    2075           12 :             .branch_timeline_test(&timeline, TimelineId::generate(), None, ctx)
    2076           12 :             .await
    2077           12 :             .unwrap();
    2078           12 : 
    2079           12 :         assert_eq!(branch.get_ancestor_lsn(), Lsn(0x60));
    2080           12 : 
    2081           12 :         // truncating at 0x61 gives us a full copy, otherwise just go backwards until there's just
    2082           12 :         // a single key
    2083           12 : 
    2084           72 :         for truncate_at in [0x61, 0x51, 0x41, 0x31, 0x21] {
    2085           60 :             let truncate_at = Lsn(truncate_at);
    2086           12 : 
    2087           60 :             let mut writer = DeltaLayerWriter::new(
    2088           60 :                 tenant.conf,
    2089           60 :                 branch.timeline_id,
    2090           60 :                 tenant.tenant_shard_id,
    2091           60 :                 Key::MIN,
    2092           60 :                 Lsn(0x11)..truncate_at,
    2093           60 :                 &branch.gate,
    2094           60 :                 branch.cancel.clone(),
    2095           60 :                 ctx,
    2096           60 :             )
    2097           60 :             .await
    2098           60 :             .unwrap();
    2099           12 : 
    2100           60 :             let new_layer = new_layer.download_and_keep_resident(ctx).await.unwrap();
    2101           60 : 
    2102           60 :             new_layer
    2103           60 :                 .copy_delta_prefix(&mut writer, truncate_at, ctx)
    2104           60 :                 .await
    2105           60 :                 .unwrap();
    2106           12 : 
    2107           60 :             let (desc, path) = writer.finish(Key::MAX, ctx).await.unwrap();
    2108           60 :             let copied_layer = Layer::finish_creating(tenant.conf, &branch, desc, &path).unwrap();
    2109           60 : 
    2110           60 :             copied_layer.get_as_delta(ctx).await.unwrap();
    2111           60 : 
    2112           60 :             assert_keys_and_values_eq(
    2113           60 :                 new_layer.get_as_delta(ctx).await.unwrap(),
    2114           60 :                 copied_layer.get_as_delta(ctx).await.unwrap(),
    2115           60 :                 truncate_at,
    2116           60 :                 ctx,
    2117           60 :             )
    2118           60 :             .await;
    2119           12 :         }
    2120           12 :     }
    2121              : 
    2122           60 :     async fn assert_keys_and_values_eq(
    2123           60 :         source: &DeltaLayerInner,
    2124           60 :         truncated: &DeltaLayerInner,
    2125           60 :         truncated_at: Lsn,
    2126           60 :         ctx: &RequestContext,
    2127           60 :     ) {
    2128              :         use futures::future::ready;
    2129              :         use futures::stream::TryStreamExt;
    2130              : 
    2131           60 :         let start_key = [0u8; DELTA_KEY_SIZE];
    2132           60 : 
    2133           60 :         let source_reader = FileBlockReader::new(&source.file, source.file_id);
    2134           60 :         let source_tree = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(
    2135           60 :             source.index_start_blk,
    2136           60 :             source.index_root_blk,
    2137           60 :             &source_reader,
    2138           60 :         );
    2139           60 :         let source_stream = source.stream_index_forwards(source_tree, &start_key, ctx);
    2140          360 :         let source_stream = source_stream.filter(|res| match res {
    2141          360 :             Ok((_, lsn, _)) => ready(lsn < &truncated_at),
    2142            0 :             _ => ready(true),
    2143          360 :         });
    2144           60 :         let mut source_stream = std::pin::pin!(source_stream);
    2145           60 : 
    2146           60 :         let truncated_reader = FileBlockReader::new(&truncated.file, truncated.file_id);
    2147           60 :         let truncated_tree = DiskBtreeReader::<_, DELTA_KEY_SIZE>::new(
    2148           60 :             truncated.index_start_blk,
    2149           60 :             truncated.index_root_blk,
    2150           60 :             &truncated_reader,
    2151           60 :         );
    2152           60 :         let truncated_stream = truncated.stream_index_forwards(truncated_tree, &start_key, ctx);
    2153           60 :         let mut truncated_stream = std::pin::pin!(truncated_stream);
    2154           60 : 
    2155           60 :         let mut scratch_left = Vec::new();
    2156           60 :         let mut scratch_right = Vec::new();
    2157              : 
    2158              :         loop {
    2159          252 :             let (src, truncated) = (source_stream.try_next(), truncated_stream.try_next());
    2160          252 :             let (src, truncated) = tokio::try_join!(src, truncated).unwrap();
    2161          252 : 
    2162          252 :             if src.is_none() {
    2163           60 :                 assert!(truncated.is_none());
    2164           60 :                 break;
    2165          192 :             }
    2166          192 : 
    2167          192 :             let (src, truncated) = (src.unwrap(), truncated.unwrap());
    2168          192 : 
    2169          192 :             // because we've filtered the source with Lsn, we should always have the same keys from both.
    2170          192 :             assert_eq!(src.0, truncated.0);
    2171          192 :             assert_eq!(src.1, truncated.1);
    2172              : 
    2173              :             // if this is needed for something else, just drop this assert.
    2174          192 :             assert!(
    2175          192 :                 src.2.pos() >= truncated.2.pos(),
    2176            0 :                 "value position should not go backwards {} vs. {}",
    2177            0 :                 src.2.pos(),
    2178            0 :                 truncated.2.pos()
    2179              :             );
    2180              : 
    2181          192 :             scratch_left.clear();
    2182          192 :             let src_cursor = source_reader.block_cursor();
    2183          192 :             let left = src_cursor.read_blob_into_buf(src.2.pos(), &mut scratch_left, ctx);
    2184          192 :             scratch_right.clear();
    2185          192 :             let trunc_cursor = truncated_reader.block_cursor();
    2186          192 :             let right = trunc_cursor.read_blob_into_buf(truncated.2.pos(), &mut scratch_right, ctx);
    2187          192 : 
    2188          192 :             tokio::try_join!(left, right).unwrap();
    2189          192 : 
    2190          192 :             assert_eq!(utils::Hex(&scratch_left), utils::Hex(&scratch_right));
    2191              :         }
    2192           60 :     }
    2193              : 
    2194       109344 :     pub(crate) fn sort_delta(
    2195       109344 :         (k1, l1, _): &(Key, Lsn, Value),
    2196       109344 :         (k2, l2, _): &(Key, Lsn, Value),
    2197       109344 :     ) -> std::cmp::Ordering {
    2198       109344 :         (k1, l1).cmp(&(k2, l2))
    2199       109344 :     }
    2200              : 
    2201              :     #[cfg(feature = "testing")]
    2202          564 :     pub(crate) fn sort_delta_value(
    2203          564 :         (k1, l1, v1): &(Key, Lsn, Value),
    2204          564 :         (k2, l2, v2): &(Key, Lsn, Value),
    2205          564 :     ) -> std::cmp::Ordering {
    2206          564 :         let order_1 = if v1.is_image() { 0 } else { 1 };
    2207          564 :         let order_2 = if v2.is_image() { 0 } else { 1 };
    2208          564 :         (k1, l1, order_1).cmp(&(k2, l2, order_2))
    2209          564 :     }
    2210              : 
    2211          132 :     pub(crate) async fn produce_delta_layer(
    2212          132 :         tenant: &TenantShard,
    2213          132 :         tline: &Arc<Timeline>,
    2214          132 :         mut deltas: Vec<(Key, Lsn, Value)>,
    2215          132 :         ctx: &RequestContext,
    2216          132 :     ) -> anyhow::Result<ResidentLayer> {
    2217          132 :         deltas.sort_by(sort_delta);
    2218          132 :         let (key_start, _, _) = deltas.first().unwrap();
    2219          132 :         let (key_max, _, _) = deltas.last().unwrap();
    2220        49440 :         let lsn_min = deltas.iter().map(|(_, lsn, _)| lsn).min().unwrap();
    2221        49440 :         let lsn_max = deltas.iter().map(|(_, lsn, _)| lsn).max().unwrap();
    2222          132 :         let lsn_end = Lsn(lsn_max.0 + 1);
    2223          132 :         let mut writer = DeltaLayerWriter::new(
    2224          132 :             tenant.conf,
    2225          132 :             tline.timeline_id,
    2226          132 :             tenant.tenant_shard_id,
    2227          132 :             *key_start,
    2228          132 :             (*lsn_min)..lsn_end,
    2229          132 :             &tline.gate,
    2230          132 :             tline.cancel.clone(),
    2231          132 :             ctx,
    2232          132 :         )
    2233          132 :         .await?;
    2234          132 :         let key_end = key_max.next();
    2235              : 
    2236        49572 :         for (key, lsn, value) in deltas {
    2237        49440 :             writer.put_value(key, lsn, value, ctx).await?;
    2238              :         }
    2239              : 
    2240          132 :         let (desc, path) = writer.finish(key_end, ctx).await?;
    2241          132 :         let delta_layer = Layer::finish_creating(tenant.conf, tline, desc, &path)?;
    2242              : 
    2243          132 :         Ok::<_, anyhow::Error>(delta_layer)
    2244          132 :     }
    2245              : 
    2246          168 :     async fn assert_delta_iter_equal(
    2247          168 :         delta_iter: &mut DeltaLayerIterator<'_>,
    2248          168 :         expect: &[(Key, Lsn, Value)],
    2249          168 :     ) {
    2250          168 :         let mut expect_iter = expect.iter();
    2251              :         loop {
    2252       168168 :             let o1 = delta_iter.next().await.unwrap();
    2253       168168 :             let o2 = expect_iter.next();
    2254       168168 :             assert_eq!(o1.is_some(), o2.is_some());
    2255       168168 :             if o1.is_none() && o2.is_none() {
    2256          168 :                 break;
    2257       168000 :             }
    2258       168000 :             let (k1, l1, v1) = o1.unwrap();
    2259       168000 :             let (k2, l2, v2) = o2.unwrap();
    2260       168000 :             assert_eq!(&k1, k2);
    2261       168000 :             assert_eq!(l1, *l2);
    2262       168000 :             assert_eq!(&v1, v2);
    2263              :         }
    2264          168 :     }
    2265              : 
    2266              :     #[tokio::test]
    2267           12 :     async fn delta_layer_iterator() {
    2268           12 :         let harness = TenantHarness::create("delta_layer_iterator").await.unwrap();
    2269           12 :         let (tenant, ctx) = harness.load().await;
    2270           12 : 
    2271           12 :         let tline = tenant
    2272           12 :             .create_test_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
    2273           12 :             .await
    2274           12 :             .unwrap();
    2275           12 : 
    2276        12000 :         fn get_key(id: u32) -> Key {
    2277        12000 :             let mut key = Key::from_hex("000000000033333333444444445500000000").unwrap();
    2278        12000 :             key.field6 = id;
    2279        12000 :             key
    2280        12000 :         }
    2281           12 :         const N: usize = 1000;
    2282           12 :         let test_deltas = (0..N)
    2283        12000 :             .map(|idx| {
    2284        12000 :                 (
    2285        12000 :                     get_key(idx as u32 / 10),
    2286        12000 :                     Lsn(0x10 * ((idx as u64) % 10 + 1)),
    2287        12000 :                     Value::Image(Bytes::from(format!("img{idx:05}"))),
    2288        12000 :                 )
    2289        12000 :             })
    2290           12 :             .collect_vec();
    2291           12 :         let resident_layer = produce_delta_layer(&tenant, &tline, test_deltas.clone(), &ctx)
    2292           12 :             .await
    2293           12 :             .unwrap();
    2294           12 :         let delta_layer = resident_layer.get_as_delta(&ctx).await.unwrap();
    2295           36 :         for max_read_size in [1, 1024] {
    2296          192 :             for batch_size in [1, 2, 4, 8, 3, 7, 13] {
    2297          168 :                 println!("running with batch_size={batch_size} max_read_size={max_read_size}");
    2298          168 :                 // Test if the batch size is correctly determined
    2299          168 :                 let mut iter = delta_layer.iter(&ctx);
    2300          168 :                 iter.planner = StreamingVectoredReadPlanner::new(max_read_size, batch_size);
    2301          168 :                 let mut num_items = 0;
    2302          672 :                 for _ in 0..3 {
    2303          504 :                     iter.next_batch().await.unwrap();
    2304          504 :                     num_items += iter.key_values_batch.len();
    2305          504 :                     if max_read_size == 1 {
    2306           12 :                         // every key should be a batch b/c the value is larger than max_read_size
    2307          252 :                         assert_eq!(iter.key_values_batch.len(), 1);
    2308           12 :                     } else {
    2309          252 :                         assert!(iter.key_values_batch.len() <= batch_size);
    2310           12 :                     }
    2311          504 :                     if num_items >= N {
    2312           12 :                         break;
    2313          504 :                     }
    2314          504 :                     iter.key_values_batch.clear();
    2315           12 :                 }
    2316           12 :                 // Test if the result is correct
    2317          168 :                 let mut iter = delta_layer.iter(&ctx);
    2318          168 :                 iter.planner = StreamingVectoredReadPlanner::new(max_read_size, batch_size);
    2319          168 :                 assert_delta_iter_equal(&mut iter, &test_deltas).await;
    2320           12 :             }
    2321           12 :         }
    2322           12 :     }
    2323              : }
        

Generated by: LCOV version 2.1-beta