LCOV - differential code coverage report
Current view: top level - pageserver/src/tenant - disk_btree.rs (source / functions) Coverage Total Hit UBC GIC CBC ECB
Current: f6946e90941b557c917ac98cd5a7e9506d180f3e.info Lines: 97.2 % 680 661 19 661
Current Date: 2023-10-19 02:04:12 Functions: 80.5 % 215 173 42 2 171 2
Baseline: c8637f37369098875162f194f92736355783b050.info
Baseline Date: 2023-10-18 20:25:20

           TLA  Line data    Source code
       1                 : //!
       2                 : //! Simple on-disk B-tree implementation
       3                 : //!
       4                 : //! This is used as the index structure within image and delta layers
       5                 : //!
       6                 : //! Features:
       7                 : //! - Fixed-width keys
       8                 : //! - Fixed-width values (VALUE_SZ)
       9                 : //! - The tree is created in a bulk operation. Insert/deletion after creation
      10                 : //!   is not supported
      11                 : //! - page-oriented
      12                 : //!
      13                 : //! TODO:
      14                 : //! - maybe something like an Adaptive Radix Tree would be more efficient?
      15                 : //! - the values stored by image and delta layers are offsets into the file,
      16                 : //!   and they are in monotonically increasing order. Prefix compression would
      17                 : //!   be very useful for them, too.
      18                 : //! - An Iterator interface would be more convenient for the callers than the
      19                 : //!   'visit' function
      20                 : //!
      21                 : use byteorder::{ReadBytesExt, BE};
      22                 : use bytes::{BufMut, Bytes, BytesMut};
      23                 : use either::Either;
      24                 : use hex;
      25                 : use std::{cmp::Ordering, io, result};
      26                 : use thiserror::Error;
      27                 : use tracing::error;
      28                 : 
      29                 : use crate::{
      30                 :     context::{DownloadBehavior, RequestContext},
      31                 :     task_mgr::TaskKind,
      32                 :     tenant::block_io::{BlockReader, BlockWriter},
      33                 : };
      34                 : 
      35                 : // The maximum size of a value stored in the B-tree. 5 bytes is enough currently.
      36                 : pub const VALUE_SZ: usize = 5;
      37                 : pub const MAX_VALUE: u64 = 0x007f_ffff_ffff;
      38                 : 
      39                 : #[allow(dead_code)]
      40                 : pub const PAGE_SZ: usize = 8192;
      41                 : 
      42 UBC           0 : #[derive(Clone, Copy, Debug)]
      43                 : struct Value([u8; VALUE_SZ]);
      44                 : 
      45                 : impl Value {
      46 CBC   184103924 :     fn from_slice(slice: &[u8]) -> Value {
      47       184103924 :         let mut b = [0u8; VALUE_SZ];
      48       184103924 :         b.copy_from_slice(slice);
      49       184103924 :         Value(b)
      50       184103924 :     }
      51                 : 
      52        83054556 :     fn from_u64(x: u64) -> Value {
      53        83054556 :         assert!(x <= 0x007f_ffff_ffff);
      54        83054556 :         Value([
      55        83054556 :             (x >> 32) as u8,
      56        83054556 :             (x >> 24) as u8,
      57        83054556 :             (x >> 16) as u8,
      58        83054556 :             (x >> 8) as u8,
      59        83054556 :             x as u8,
      60        83054556 :         ])
      61        83054556 :     }
      62                 : 
      63          161987 :     fn from_blknum(x: u32) -> Value {
      64          161987 :         Value([
      65          161987 :             0x80,
      66          161987 :             (x >> 24) as u8,
      67          161987 :             (x >> 16) as u8,
      68          161987 :             (x >> 8) as u8,
      69          161987 :             x as u8,
      70          161987 :         ])
      71          161987 :     }
      72                 : 
      73                 :     #[allow(dead_code)]
      74 UBC           0 :     fn is_offset(self) -> bool {
      75               0 :         self.0[0] & 0x80 != 0
      76               0 :     }
      77                 : 
      78 CBC   171719431 :     fn to_u64(self) -> u64 {
      79       171719431 :         let b = &self.0;
      80       171719431 :         (b[0] as u64) << 32
      81       171719431 :             | (b[1] as u64) << 24
      82       171719431 :             | (b[2] as u64) << 16
      83       171719431 :             | (b[3] as u64) << 8
      84       171719431 :             | b[4] as u64
      85       171719431 :     }
      86                 : 
      87        12381481 :     fn to_blknum(self) -> u32 {
      88        12381481 :         let b = &self.0;
      89        12381481 :         assert!(b[0] == 0x80);
      90        12381481 :         (b[1] as u32) << 24 | (b[2] as u32) << 16 | (b[3] as u32) << 8 | b[4] as u32
      91        12381481 :     }
      92                 : }
      93                 : 
      94 UBC           0 : #[derive(Error, Debug)]
      95                 : pub enum DiskBtreeError {
      96                 :     #[error("Attempt to append a value that is too large {0} > {}", MAX_VALUE)]
      97                 :     AppendOverflow(u64),
      98                 : 
      99                 :     #[error("Unsorted input: key {key:?} is <= last_key {last_key:?}")]
     100                 :     UnsortedInput { key: Box<[u8]>, last_key: Box<[u8]> },
     101                 : 
     102                 :     #[error("Could not push to new leaf node")]
     103                 :     FailedToPushToNewLeafNode,
     104                 : 
     105                 :     #[error("IoError: {0}")]
     106                 :     Io(#[from] io::Error),
     107                 : }
     108                 : 
     109                 : pub type Result<T> = result::Result<T, DiskBtreeError>;
     110                 : 
     111                 : /// This is the on-disk representation.
     112                 : struct OnDiskNode<'a, const L: usize> {
     113                 :     // Fixed-width fields
     114                 :     num_children: u16,
     115                 :     level: u8,
     116                 :     prefix_len: u8,
     117                 :     suffix_len: u8,
     118                 : 
     119                 :     // Variable-length fields. These are stored on-disk after the fixed-width
     120                 :     // fields, in this order. In the in-memory representation, these point to
     121                 :     // the right parts in the page buffer.
     122                 :     prefix: &'a [u8],
     123                 :     keys: &'a [u8],
     124                 :     values: &'a [u8],
     125                 : }
     126                 : 
     127                 : impl<'a, const L: usize> OnDiskNode<'a, L> {
     128                 :     ///
     129                 :     /// Interpret a PAGE_SZ page as a node.
     130                 :     ///
     131 CBC    36768819 :     fn deparse(buf: &[u8]) -> Result<OnDiskNode<L>> {
     132        36768819 :         let mut cursor = std::io::Cursor::new(buf);
     133        36768819 :         let num_children = cursor.read_u16::<BE>()?;
     134        36768819 :         let level = cursor.read_u8()?;
     135        36768819 :         let prefix_len = cursor.read_u8()?;
     136        36768819 :         let suffix_len = cursor.read_u8()?;
     137                 : 
     138        36768819 :         let mut off = cursor.position();
     139        36768819 :         let prefix_off = off as usize;
     140        36768819 :         off += prefix_len as u64;
     141        36768819 : 
     142        36768819 :         let keys_off = off as usize;
     143        36768819 :         let keys_len = num_children as usize * suffix_len as usize;
     144        36768819 :         off += keys_len as u64;
     145        36768819 : 
     146        36768819 :         let values_off = off as usize;
     147        36768819 :         let values_len = num_children as usize * VALUE_SZ;
     148        36768819 :         //off += values_len as u64;
     149        36768819 : 
     150        36768819 :         let prefix = &buf[prefix_off..prefix_off + prefix_len as usize];
     151        36768819 :         let keys = &buf[keys_off..keys_off + keys_len];
     152        36768819 :         let values = &buf[values_off..values_off + values_len];
     153        36768819 : 
     154        36768819 :         Ok(OnDiskNode {
     155        36768819 :             num_children,
     156        36768819 :             level,
     157        36768819 :             prefix_len,
     158        36768819 :             suffix_len,
     159        36768819 :             prefix,
     160        36768819 :             keys,
     161        36768819 :             values,
     162        36768819 :         })
     163        36768819 :     }
     164                 : 
     165                 :     ///
     166                 :     /// Read a value at 'idx'
     167                 :     ///
     168       184104160 :     fn value(&self, idx: usize) -> Value {
     169       184104160 :         let value_off = idx * VALUE_SZ;
     170       184104160 :         let value_slice = &self.values[value_off..value_off + VALUE_SZ];
     171       184104160 :         Value::from_slice(value_slice)
     172       184104160 :     }
     173                 : 
     174        36441653 :     fn binary_search(
     175        36441653 :         &self,
     176        36441653 :         search_key: &[u8; L],
     177        36441653 :         keybuf: &mut [u8],
     178        36441653 :     ) -> result::Result<usize, usize> {
     179        36441653 :         let mut size = self.num_children as usize;
     180        36441653 :         let mut low = 0;
     181        36441653 :         let mut high = size;
     182       257730829 :         while low < high {
     183       221856959 :             let mid = low + size / 2;
     184       221856959 : 
     185       221856959 :             let key_off = mid * self.suffix_len as usize;
     186       221856959 :             let suffix = &self.keys[key_off..key_off + self.suffix_len as usize];
     187       221856959 :             // Does this match?
     188       221856959 :             keybuf[self.prefix_len as usize..].copy_from_slice(suffix);
     189       221856959 : 
     190       221856959 :             let cmp = keybuf[..].cmp(search_key);
     191       221856959 : 
     192       221856959 :             if cmp == Ordering::Less {
     193        85096911 :                 low = mid + 1;
     194       136760048 :             } else if cmp == Ordering::Greater {
     195       136192265 :                 high = mid;
     196       136192265 :             } else {
     197          567783 :                 return Ok(mid);
     198                 :             }
     199       221289176 :             size = high - low;
     200                 :         }
     201        35873870 :         Err(low)
     202        36441653 :     }
     203                 : }
     204                 : 
     205                 : ///
     206                 : /// Public reader object, to search the tree.
     207                 : ///
     208                 : pub struct DiskBtreeReader<R, const L: usize>
     209                 : where
     210                 :     R: BlockReader,
     211                 : {
     212                 :     start_blk: u32,
     213                 :     root_blk: u32,
     214                 :     reader: R,
     215                 : }
     216                 : 
     217        36441592 : #[derive(Clone, Copy, Debug, PartialEq, Eq)]
     218                 : pub enum VisitDirection {
     219                 :     Forwards,
     220                 :     Backwards,
     221                 : }
     222                 : 
     223                 : impl<R, const L: usize> DiskBtreeReader<R, L>
     224                 : where
     225                 :     R: BlockReader,
     226                 : {
     227        23854581 :     pub fn new(start_blk: u32, root_blk: u32, reader: R) -> Self {
     228        23854581 :         DiskBtreeReader {
     229        23854581 :             start_blk,
     230        23854581 :             root_blk,
     231        23854581 :             reader,
     232        23854581 :         }
     233        23854581 :     }
     234                 : 
     235                 :     ///
     236                 :     /// Read the value for given key. Returns the value, or None if it doesn't exist.
     237                 :     ///
     238          632433 :     pub async fn get(&self, search_key: &[u8; L], ctx: &RequestContext) -> Result<Option<u64>> {
     239          632433 :         let mut result: Option<u64> = None;
     240          632433 :         self.visit(
     241          632433 :             search_key,
     242          632433 :             VisitDirection::Forwards,
     243          632433 :             |key, value| {
     244          532421 :                 if key == search_key {
     245          531416 :                     result = Some(value);
     246          531416 :                 }
     247          532421 :                 false
     248          632433 :             },
     249          632433 :             ctx,
     250          632433 :         )
     251            3710 :         .await?;
     252          632433 :         Ok(result)
     253          632433 :     }
     254                 : 
     255                 :     ///
     256                 :     /// Scan the tree, starting from 'search_key', in the given direction. 'visitor'
     257                 :     /// will be called for every key >= 'search_key' (or <= 'search_key', if scanning
     258                 :     /// backwards)
     259                 :     ///
     260        24060155 :     pub async fn visit<V>(
     261        24060155 :         &self,
     262        24060155 :         search_key: &[u8; L],
     263        24060155 :         dir: VisitDirection,
     264        24060155 :         mut visitor: V,
     265        24060155 :         ctx: &RequestContext,
     266        24060155 :     ) -> Result<bool>
     267        24060155 :     where
     268        24060155 :         V: FnMut(&[u8], u64) -> bool,
     269        24060155 :     {
     270        24060155 :         let mut stack = Vec::new();
     271        24060155 :         stack.push((self.root_blk, None));
     272        24060155 :         let block_cursor = self.reader.block_cursor();
     273        36875880 :         while let Some((node_blknum, opt_iter)) = stack.pop() {
     274                 :             // Locate the node.
     275        36765803 :             let node_buf = block_cursor
     276        36765803 :                 .read_blk(self.start_blk + node_blknum, ctx)
     277          282875 :                 .await?;
     278                 : 
     279        36765803 :             let node = OnDiskNode::deparse(node_buf.as_ref())?;
     280        36765803 :             let prefix_len = node.prefix_len as usize;
     281        36765803 :             let suffix_len = node.suffix_len as usize;
     282        36765803 : 
     283        36765803 :             assert!(node.num_children > 0);
     284                 : 
     285        36765803 :             let mut keybuf = Vec::new();
     286        36765803 :             keybuf.extend(node.prefix);
     287        36765803 :             keybuf.resize(prefix_len + suffix_len, 0);
     288                 : 
     289        36765803 :             let mut iter = if let Some(iter) = opt_iter {
     290          324150 :                 iter
     291        36441653 :             } else if dir == VisitDirection::Forwards {
     292                 :                 // Locate the first match
     293         1051946 :                 let idx = match node.binary_search(search_key, keybuf.as_mut_slice()) {
     294          532742 :                     Ok(idx) => idx,
     295          519204 :                     Err(idx) => {
     296          519204 :                         if node.level == 0 {
     297                 :                             // Imagine that the node contains the following keys:
     298                 :                             //
     299                 :                             // 1
     300                 :                             // 3  <-- idx
     301                 :                             // 5
     302                 :                             //
     303                 :                             // If the search key is '2' and there is exact match,
     304                 :                             // the binary search would return the index of key
     305                 :                             // '3'. That's cool, '3' is the first key to return.
     306          160333 :                             idx
     307                 :                         } else {
     308                 :                             // This is an internal page, so each key represents a lower
     309                 :                             // bound for what's in the child page. If there is no exact
     310                 :                             // match, we have to return the *previous* entry.
     311                 :                             //
     312                 :                             // 1  <-- return this
     313                 :                             // 3  <-- idx
     314                 :                             // 5
     315          358871 :                             idx.saturating_sub(1)
     316                 :                         }
     317                 :                     }
     318                 :                 };
     319         1051946 :                 Either::Left(idx..node.num_children.into())
     320                 :             } else {
     321        35389707 :                 let idx = match node.binary_search(search_key, keybuf.as_mut_slice()) {
     322           35041 :                     Ok(idx) => {
     323           35041 :                         // Exact match. That's the first entry to return, and walk
     324           35041 :                         // backwards from there.
     325           35041 :                         idx
     326                 :                     }
     327        35354666 :                     Err(idx) => {
     328                 :                         // No exact match. The binary search returned the index of the
     329                 :                         // first key that's > search_key. Back off by one, and walk
     330                 :                         // backwards from there.
     331        35354666 :                         if let Some(idx) = idx.checked_sub(1) {
     332        24131718 :                             idx
     333                 :                         } else {
     334        11222948 :                             return Ok(false);
     335                 :                         }
     336                 :                     }
     337                 :                 };
     338        24166759 :                 Either::Right((0..=idx).rev())
     339                 :             };
     340                 : 
     341                 :             // idx points to the first match now. Keep going from there
     342       184535371 :             while let Some(idx) = iter.next() {
     343       184101144 :                 let key_off = idx * suffix_len;
     344       184101144 :                 let suffix = &node.keys[key_off..key_off + suffix_len];
     345       184101144 :                 keybuf[prefix_len..].copy_from_slice(suffix);
     346       184101144 :                 let value = node.value(idx);
     347       184101144 :                 #[allow(clippy::collapsible_if)]
     348       184101144 :                 if node.level == 0 {
     349                 :                     // leaf
     350       171719646 :                     if !visitor(&keybuf, value.to_u64()) {
     351        12727130 :                         return Ok(false);
     352       158992516 :                     }
     353                 :                 } else {
     354        12381498 :                     stack.push((node_blknum, Some(iter)));
     355        12381498 :                     stack.push((value.to_blknum(), None));
     356        12381498 :                     break;
     357                 :                 }
     358                 :             }
     359                 :         }
     360          110077 :         Ok(true)
     361        24060155 :     }
     362                 : 
     363                 :     #[allow(dead_code)]
     364               5 :     pub async fn dump(&self) -> Result<()> {
     365               5 :         let mut stack = Vec::new();
     366               5 :         let ctx = RequestContext::new(TaskKind::DebugTool, DownloadBehavior::Error);
     367               5 : 
     368               5 :         stack.push((self.root_blk, String::new(), 0, 0, 0));
     369               5 : 
     370               5 :         let block_cursor = self.reader.block_cursor();
     371                 : 
     372            3021 :         while let Some((blknum, path, depth, child_idx, key_off)) = stack.pop() {
     373            3016 :             let blk = block_cursor.read_blk(self.start_blk + blknum, &ctx).await?;
     374            3016 :             let buf: &[u8] = blk.as_ref();
     375            3016 :             let node = OnDiskNode::<L>::deparse(buf)?;
     376                 : 
     377            3016 :             if child_idx == 0 {
     378               9 :                 print!("{:indent$}", "", indent = depth * 2);
     379               9 :                 let path_prefix = stack
     380               9 :                     .iter()
     381               9 :                     .map(|(_blknum, path, ..)| path.as_str())
     382               9 :                     .collect::<String>();
     383               9 :                 println!(
     384               9 :                     "blk #{blknum}: path {path_prefix}{path}: prefix {}, suffix_len {}",
     385               9 :                     hex::encode(node.prefix),
     386               9 :                     node.suffix_len
     387               9 :                 );
     388            3007 :             }
     389                 : 
     390            3016 :             if child_idx + 1 < node.num_children {
     391            3007 :                 let key_off = key_off + node.suffix_len as usize;
     392            3007 :                 stack.push((blknum, path.clone(), depth, child_idx + 1, key_off));
     393            3007 :             }
     394            3016 :             let key = &node.keys[key_off..key_off + node.suffix_len as usize];
     395            3016 :             let val = node.value(child_idx as usize);
     396            3016 : 
     397            3016 :             print!("{:indent$}", "", indent = depth * 2 + 2);
     398            3016 :             println!("{}: {}", hex::encode(key), hex::encode(val.0));
     399            3016 : 
     400            3016 :             if node.level > 0 {
     401               4 :                 stack.push((val.to_blknum(), hex::encode(node.prefix), depth + 1, 0, 0));
     402            3012 :             }
     403                 :         }
     404               5 :         Ok(())
     405               5 :     }
     406                 : }
     407                 : 
     408                 : ///
     409                 : /// Public builder object, for creating a new tree.
     410                 : ///
     411                 : /// Usage: Create a builder object by calling 'new', load all the data into the
     412                 : /// tree by calling 'append' for each key-value pair, and then call 'finish'
     413                 : ///
     414                 : /// 'L' is the key length in bytes
     415                 : pub struct DiskBtreeBuilder<W, const L: usize>
     416                 : where
     417                 :     W: BlockWriter,
     418                 : {
     419                 :     writer: W,
     420                 : 
     421                 :     ///
     422                 :     /// `stack[0]` is the current root page, `stack.last()` is the leaf.
     423                 :     ///
     424                 :     /// We maintain the length of the stack to be always greater than zero.
     425                 :     /// Two exceptions are:
     426                 :     /// 1. `Self::flush_node`. The method will push the new node if it extracted the last one.
     427                 :     ///   So because other methods cannot see the intermediate state invariant still holds.
     428                 :     /// 2. `Self::finish`. It consumes self and does not return it back,
     429                 :     ///  which means that this is where the structure is destroyed.
     430                 :     ///  Thus stack of zero length cannot be observed by other methods.
     431                 :     stack: Vec<BuildNode<L>>,
     432                 : 
     433                 :     /// Last key that was appended to the tree. Used to sanity check that append
     434                 :     /// is called in increasing key order.
     435                 :     last_key: Option<[u8; L]>,
     436                 : }
     437                 : 
     438                 : impl<W, const L: usize> DiskBtreeBuilder<W, L>
     439                 : where
     440                 :     W: BlockWriter,
     441                 : {
     442           19139 :     pub fn new(writer: W) -> Self {
     443           19139 :         DiskBtreeBuilder {
     444           19139 :             writer,
     445           19139 :             last_key: None,
     446           19139 :             stack: vec![BuildNode::new(0)],
     447           19139 :         }
     448           19139 :     }
     449                 : 
     450        83054809 :     pub fn append(&mut self, key: &[u8; L], value: u64) -> Result<()> {
     451        83054809 :         if value > MAX_VALUE {
     452 UBC           0 :             return Err(DiskBtreeError::AppendOverflow(value));
     453 CBC    83054809 :         }
     454        83054809 :         if let Some(last_key) = &self.last_key {
     455        83035670 :             if key <= last_key {
     456               1 :                 return Err(DiskBtreeError::UnsortedInput {
     457               1 :                     key: key.as_slice().into(),
     458               1 :                     last_key: last_key.as_slice().into(),
     459               1 :                 });
     460        83035669 :             }
     461           19139 :         }
     462        83054808 :         self.last_key = Some(*key);
     463        83054808 : 
     464        83054808 :         self.append_internal(key, Value::from_u64(value))
     465        83054809 :     }
     466                 : 
     467        83216796 :     fn append_internal(&mut self, key: &[u8; L], value: Value) -> Result<()> {
     468        83216796 :         // Try to append to the current leaf buffer
     469        83216796 :         let last = self
     470        83216796 :             .stack
     471        83216796 :             .last_mut()
     472        83216796 :             .expect("should always have at least one item");
     473        83216796 :         let level = last.level;
     474        83216796 :         if last.push(key, value) {
     475        82908371 :             return Ok(());
     476          308425 :         }
     477          308425 : 
     478          308425 :         // It did not fit. Try to compress, and if it succeeds to make
     479          308425 :         // some room on the node, try appending to it again.
     480          308425 :         #[allow(clippy::collapsible_if)]
     481          308425 :         if last.compress() {
     482          155609 :             if last.push(key, value) {
     483          155542 :                 return Ok(());
     484              67 :             }
     485          152816 :         }
     486                 : 
     487                 :         // Could not append to the current leaf. Flush it and create a new one.
     488          152883 :         self.flush_node()?;
     489                 : 
     490                 :         // Replace the node we flushed with an empty one and append the new
     491                 :         // key to it.
     492          152883 :         let mut last = BuildNode::new(level);
     493          152883 :         if !last.push(key, value) {
     494 UBC           0 :             return Err(DiskBtreeError::FailedToPushToNewLeafNode);
     495 CBC      152883 :         }
     496          152883 : 
     497          152883 :         self.stack.push(last);
     498          152883 : 
     499          152883 :         Ok(())
     500        83216796 :     }
     501                 : 
     502                 :     /// Flush the bottommost node in the stack to disk. Appends a downlink to its parent,
     503                 :     /// and recursively flushes the parent too, if it becomes full. If the root page becomes full,
     504                 :     /// creates a new root page, increasing the height of the tree.
     505          161988 :     fn flush_node(&mut self) -> Result<()> {
     506          161988 :         // Get the current bottommost node in the stack and flush it to disk.
     507          161988 :         let last = self
     508          161988 :             .stack
     509          161988 :             .pop()
     510          161988 :             .expect("should always have at least one item");
     511          161988 :         let buf = last.pack();
     512          161988 :         let downlink_key = last.first_key();
     513          161988 :         let downlink_ptr = self.writer.write_blk(buf)?;
     514                 : 
     515                 :         // Append the downlink to the parent. If there is no parent, ie. this was the root page,
     516                 :         // create a new root page, increasing the height of the tree.
     517          161988 :         if self.stack.is_empty() {
     518            9114 :             self.stack.push(BuildNode::new(last.level + 1));
     519          152874 :         }
     520          161988 :         self.append_internal(&downlink_key, Value::from_blknum(downlink_ptr))
     521          161988 :     }
     522                 : 
     523                 :     ///
     524                 :     /// Flushes everything to disk, and returns the block number of the root page.
     525                 :     /// The caller must store the root block number "out-of-band", and pass it
     526                 :     /// to the DiskBtreeReader::new() when you want to read the tree again.
     527                 :     /// (In the image and delta layers, it is stored in the beginning of the file,
     528                 :     /// in the summary header)
     529                 :     ///
     530           19128 :     pub fn finish(mut self) -> Result<(u32, W)> {
     531                 :         // flush all levels, except the root.
     532           28233 :         while self.stack.len() > 1 {
     533            9105 :             self.flush_node()?;
     534                 :         }
     535                 : 
     536           19128 :         let root = self
     537           19128 :             .stack
     538           19128 :             .first()
     539           19128 :             .expect("by the check above we left one item there");
     540           19128 :         let buf = root.pack();
     541           19128 :         let root_blknum = self.writer.write_blk(buf)?;
     542                 : 
     543           19128 :         Ok((root_blknum, self.writer))
     544           19128 :     }
     545                 : 
     546         1870701 :     pub fn borrow_writer(&self) -> &W {
     547         1870701 :         &self.writer
     548         1870701 :     }
     549                 : }
     550                 : 
     551                 : ///
     552                 : /// BuildNode represesnts an incomplete page that we are appending to.
     553                 : ///
     554 UBC           0 : #[derive(Clone, Debug)]
     555                 : struct BuildNode<const L: usize> {
     556                 :     num_children: u16,
     557                 :     level: u8,
     558                 :     prefix: Vec<u8>,
     559                 :     suffix_len: usize,
     560                 : 
     561                 :     keys: Vec<u8>,
     562                 :     values: Vec<u8>,
     563                 : 
     564                 :     size: usize, // physical size of this node, if it was written to disk like this
     565                 : }
     566                 : 
     567                 : const NODE_SIZE: usize = PAGE_SZ;
     568                 : 
     569                 : const NODE_HDR_SIZE: usize = 2 + 1 + 1 + 1;
     570                 : 
     571                 : impl<const L: usize> BuildNode<L> {
     572 CBC      181136 :     fn new(level: u8) -> Self {
     573          181136 :         BuildNode {
     574          181136 :             num_children: 0,
     575          181136 :             level,
     576          181136 :             prefix: Vec::new(),
     577          181136 :             suffix_len: 0,
     578          181136 :             keys: Vec::new(),
     579          181136 :             values: Vec::new(),
     580          181136 :             size: NODE_HDR_SIZE,
     581          181136 :         }
     582          181136 :     }
     583                 : 
     584                 :     /// Try to append a key-value pair to this node. Returns 'true' on
     585                 :     /// success, 'false' if the page was full or the key was
     586                 :     /// incompatible with the prefix of the existing keys.
     587        83525292 :     fn push(&mut self, key: &[u8; L], value: Value) -> bool {
     588        83525292 :         // If we have already performed prefix-compression on the page,
     589        83525292 :         // check that the incoming key has the same prefix.
     590        83525292 :         if self.num_children > 0 {
     591                 :             // does the prefix allow it?
     592        83344156 :             if !key.starts_with(&self.prefix) {
     593           43753 :                 return false;
     594        83300403 :             }
     595          181136 :         } else {
     596          181136 :             self.suffix_len = key.len();
     597          181136 :         }
     598                 : 
     599                 :         // Is the node too full?
     600        83481539 :         if self.size + self.suffix_len + VALUE_SZ >= NODE_SIZE {
     601          264739 :             return false;
     602        83216800 :         }
     603        83216800 : 
     604        83216800 :         // All clear
     605        83216800 :         self.num_children += 1;
     606        83216800 :         self.keys.extend(&key[self.prefix.len()..]);
     607        83216800 :         self.values.extend(value.0);
     608        83216800 : 
     609        83216800 :         assert!(self.keys.len() == self.num_children as usize * self.suffix_len);
     610        83216800 :         assert!(self.values.len() == self.num_children as usize * VALUE_SZ);
     611                 : 
     612        83216800 :         self.size += self.suffix_len + VALUE_SZ;
     613        83216800 : 
     614        83216800 :         true
     615        83525292 :     }
     616                 : 
     617                 :     ///
     618                 :     /// Perform prefix-compression.
     619                 :     ///
     620                 :     /// Returns 'true' on success, 'false' if no compression was possible.
     621                 :     ///
     622          308425 :     fn compress(&mut self) -> bool {
     623          308425 :         let first_suffix = self.first_suffix();
     624          308425 :         let last_suffix = self.last_suffix();
     625          308425 : 
     626          308425 :         // Find the common prefix among all keys
     627          308425 :         let mut prefix_len = 0;
     628         3105630 :         while prefix_len < self.suffix_len {
     629         3105630 :             if first_suffix[prefix_len] != last_suffix[prefix_len] {
     630          308425 :                 break;
     631         2797205 :             }
     632         2797205 :             prefix_len += 1;
     633                 :         }
     634          308425 :         if prefix_len == 0 {
     635          152816 :             return false;
     636          155609 :         }
     637          155609 : 
     638          155609 :         // Can compress. Rewrite the keys without the common prefix.
     639          155609 :         self.prefix.extend(&self.keys[..prefix_len]);
     640          155609 : 
     641          155609 :         let mut new_keys = Vec::new();
     642          155609 :         let mut key_off = 0;
     643        41265414 :         while key_off < self.keys.len() {
     644        41109805 :             let next_key_off = key_off + self.suffix_len;
     645        41109805 :             new_keys.extend(&self.keys[key_off + prefix_len..next_key_off]);
     646        41109805 :             key_off = next_key_off;
     647        41109805 :         }
     648          155609 :         self.keys = new_keys;
     649          155609 :         self.suffix_len -= prefix_len;
     650          155609 : 
     651          155609 :         self.size -= prefix_len * self.num_children as usize;
     652          155609 :         self.size += prefix_len;
     653          155609 : 
     654          155609 :         assert!(self.keys.len() == self.num_children as usize * self.suffix_len);
     655          155609 :         assert!(self.values.len() == self.num_children as usize * VALUE_SZ);
     656                 : 
     657          155609 :         true
     658          308425 :     }
     659                 : 
     660                 :     ///
     661                 :     /// Serialize the node to on-disk format.
     662                 :     ///
     663          181116 :     fn pack(&self) -> Bytes {
     664          181116 :         assert!(self.keys.len() == self.num_children as usize * self.suffix_len);
     665          181116 :         assert!(self.values.len() == self.num_children as usize * VALUE_SZ);
     666          181116 :         assert!(self.num_children > 0);
     667                 : 
     668          181116 :         let mut buf = BytesMut::new();
     669          181116 : 
     670          181116 :         buf.put_u16(self.num_children);
     671          181116 :         buf.put_u8(self.level);
     672          181116 :         buf.put_u8(self.prefix.len() as u8);
     673          181116 :         buf.put_u8(self.suffix_len as u8);
     674          181116 :         buf.put(&self.prefix[..]);
     675          181116 :         buf.put(&self.keys[..]);
     676          181116 :         buf.put(&self.values[..]);
     677          181116 : 
     678          181116 :         assert!(buf.len() == self.size);
     679                 : 
     680          181116 :         assert!(buf.len() <= PAGE_SZ);
     681          181116 :         buf.resize(PAGE_SZ, 0);
     682          181116 :         buf.freeze()
     683          181116 :     }
     684                 : 
     685          470413 :     fn first_suffix(&self) -> &[u8] {
     686          470413 :         &self.keys[..self.suffix_len]
     687          470413 :     }
     688          308425 :     fn last_suffix(&self) -> &[u8] {
     689          308425 :         &self.keys[self.keys.len() - self.suffix_len..]
     690          308425 :     }
     691                 : 
     692                 :     /// Return the full first key of the page, including the prefix
     693          161988 :     fn first_key(&self) -> [u8; L] {
     694          161988 :         let mut key = [0u8; L];
     695          161988 :         key[..self.prefix.len()].copy_from_slice(&self.prefix);
     696          161988 :         key[self.prefix.len()..].copy_from_slice(self.first_suffix());
     697          161988 :         key
     698          161988 :     }
     699                 : }
     700                 : 
     701                 : #[cfg(test)]
     702                 : pub(crate) mod tests {
     703                 :     use super::*;
     704                 :     use crate::context::DownloadBehavior;
     705                 :     use crate::task_mgr::TaskKind;
     706                 :     use crate::tenant::block_io::{BlockCursor, BlockLease, BlockReaderRef};
     707                 :     use rand::Rng;
     708                 :     use std::collections::BTreeMap;
     709                 :     use std::sync::atomic::{AtomicUsize, Ordering};
     710                 : 
     711               5 :     #[derive(Clone, Default)]
     712                 :     pub(crate) struct TestDisk {
     713                 :         blocks: Vec<Bytes>,
     714                 :     }
     715                 :     impl TestDisk {
     716               5 :         fn new() -> Self {
     717               5 :             Self::default()
     718               5 :         }
     719          508133 :         pub(crate) fn read_blk(&self, blknum: u32) -> io::Result<BlockLease> {
     720          508133 :             let mut buf = [0u8; PAGE_SZ];
     721          508133 :             buf.copy_from_slice(&self.blocks[blknum as usize]);
     722          508133 :             Ok(std::sync::Arc::new(buf).into())
     723          508133 :         }
     724                 :     }
     725                 :     impl BlockReader for TestDisk {
     726          205583 :         fn block_cursor(&self) -> BlockCursor<'_> {
     727          205583 :             BlockCursor::new(BlockReaderRef::TestDisk(self))
     728          205583 :         }
     729                 :     }
     730                 :     impl BlockWriter for &mut TestDisk {
     731             107 :         fn write_blk(&mut self, buf: Bytes) -> io::Result<u32> {
     732             107 :             let blknum = self.blocks.len();
     733             107 :             self.blocks.push(buf);
     734             107 :             Ok(blknum as u32)
     735             107 :         }
     736                 :     }
     737                 : 
     738               1 :     #[tokio::test]
     739               1 :     async fn basic() -> Result<()> {
     740               1 :         let mut disk = TestDisk::new();
     741               1 :         let mut writer = DiskBtreeBuilder::<_, 6>::new(&mut disk);
     742               1 : 
     743               1 :         let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
     744               1 : 
     745               1 :         let all_keys: Vec<&[u8; 6]> = vec![
     746               1 :             b"xaaaaa", b"xaaaba", b"xaaaca", b"xabaaa", b"xababa", b"xabaca", b"xabada", b"xabadb",
     747               1 :         ];
     748               1 :         let all_data: Vec<(&[u8; 6], u64)> = all_keys
     749               1 :             .iter()
     750               1 :             .enumerate()
     751               8 :             .map(|(idx, key)| (*key, idx as u64))
     752               1 :             .collect();
     753               8 :         for (key, val) in all_data.iter() {
     754               8 :             writer.append(key, *val)?;
     755                 :         }
     756                 : 
     757               1 :         let (root_offset, _writer) = writer.finish()?;
     758                 : 
     759               1 :         let reader = DiskBtreeReader::new(0, root_offset, disk);
     760               1 : 
     761               1 :         reader.dump().await?;
     762                 : 
     763                 :         // Test the `get` function on all the keys.
     764               8 :         for (key, val) in all_data.iter() {
     765               8 :             assert_eq!(reader.get(key, &ctx).await?, Some(*val));
     766                 :         }
     767                 :         // And on some keys that don't exist
     768               1 :         assert_eq!(reader.get(b"aaaaaa", &ctx).await?, None);
     769               1 :         assert_eq!(reader.get(b"zzzzzz", &ctx).await?, None);
     770               1 :         assert_eq!(reader.get(b"xaaabx", &ctx).await?, None);
     771                 : 
     772                 :         // Test search with `visit` function
     773               1 :         let search_key = b"xabaaa";
     774               1 :         let expected: Vec<(Vec<u8>, u64)> = all_data
     775               1 :             .iter()
     776               8 :             .filter(|(key, _value)| key[..] >= search_key[..])
     777               5 :             .map(|(key, value)| (key.to_vec(), *value))
     778               1 :             .collect();
     779               1 : 
     780               1 :         let mut data = Vec::new();
     781               1 :         reader
     782               1 :             .visit(
     783               1 :                 search_key,
     784               1 :                 VisitDirection::Forwards,
     785               5 :                 |key, value| {
     786               5 :                     data.push((key.to_vec(), value));
     787               5 :                     true
     788               5 :                 },
     789               1 :                 &ctx,
     790               1 :             )
     791 UBC           0 :             .await?;
     792 CBC           1 :         assert_eq!(data, expected);
     793                 : 
     794                 :         // Test a backwards scan
     795               1 :         let mut expected: Vec<(Vec<u8>, u64)> = all_data
     796               1 :             .iter()
     797               8 :             .filter(|(key, _value)| key[..] <= search_key[..])
     798               4 :             .map(|(key, value)| (key.to_vec(), *value))
     799               1 :             .collect();
     800               1 :         expected.reverse();
     801               1 :         let mut data = Vec::new();
     802               1 :         reader
     803               1 :             .visit(
     804               1 :                 search_key,
     805               1 :                 VisitDirection::Backwards,
     806               4 :                 |key, value| {
     807               4 :                     data.push((key.to_vec(), value));
     808               4 :                     true
     809               4 :                 },
     810               1 :                 &ctx,
     811               1 :             )
     812 UBC           0 :             .await?;
     813 CBC           1 :         assert_eq!(data, expected);
     814                 : 
     815                 :         // Backward scan where nothing matches
     816               1 :         reader
     817               1 :             .visit(
     818               1 :                 b"aaaaaa",
     819               1 :                 VisitDirection::Backwards,
     820               1 :                 |key, value| {
     821 UBC           0 :                     panic!("found unexpected key {}: {}", hex::encode(key), value);
     822 CBC           1 :                 },
     823               1 :                 &ctx,
     824               1 :             )
     825 UBC           0 :             .await?;
     826                 : 
     827                 :         // Full scan
     828 CBC           1 :         let expected: Vec<(Vec<u8>, u64)> = all_data
     829               1 :             .iter()
     830               8 :             .map(|(key, value)| (key.to_vec(), *value))
     831               1 :             .collect();
     832               1 :         let mut data = Vec::new();
     833               1 :         reader
     834               1 :             .visit(
     835               1 :                 &[0u8; 6],
     836               1 :                 VisitDirection::Forwards,
     837               8 :                 |key, value| {
     838               8 :                     data.push((key.to_vec(), value));
     839               8 :                     true
     840               8 :                 },
     841               1 :                 &ctx,
     842               1 :             )
     843 UBC           0 :             .await?;
     844 CBC           1 :         assert_eq!(data, expected);
     845                 : 
     846               1 :         Ok(())
     847                 :     }
     848                 : 
     849               1 :     #[tokio::test]
     850               1 :     async fn lots_of_keys() -> Result<()> {
     851               1 :         let mut disk = TestDisk::new();
     852               1 :         let mut writer = DiskBtreeBuilder::<_, 8>::new(&mut disk);
     853               1 :         let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
     854               1 : 
     855               1 :         const NUM_KEYS: u64 = 1000;
     856               1 : 
     857               1 :         let mut all_data: BTreeMap<u64, u64> = BTreeMap::new();
     858                 : 
     859            1001 :         for idx in 0..NUM_KEYS {
     860            1000 :             let key_int: u64 = 1 + idx * 2;
     861            1000 :             let key = u64::to_be_bytes(key_int);
     862            1000 :             writer.append(&key, idx)?;
     863                 : 
     864            1000 :             all_data.insert(key_int, idx);
     865                 :         }
     866                 : 
     867               1 :         let (root_offset, _writer) = writer.finish()?;
     868                 : 
     869               1 :         let reader = DiskBtreeReader::new(0, root_offset, disk);
     870               1 : 
     871               1 :         reader.dump().await?;
     872                 : 
     873                 :         use std::sync::Mutex;
     874                 : 
     875               1 :         let result = Mutex::new(Vec::new());
     876               1 :         let limit: AtomicUsize = AtomicUsize::new(10);
     877           41910 :         let take_ten = |key: &[u8], value: u64| {
     878           41910 :             let mut keybuf = [0u8; 8];
     879           41910 :             keybuf.copy_from_slice(key);
     880           41910 :             let key_int = u64::from_be_bytes(keybuf);
     881           41910 : 
     882           41910 :             let mut result = result.lock().unwrap();
     883           41910 :             result.push((key_int, value));
     884           41910 : 
     885           41910 :             // keep going until we have 10 matches
     886           41910 :             result.len() < limit.load(Ordering::Relaxed)
     887           41910 :         };
     888                 : 
     889            2010 :         for search_key_int in 0..(NUM_KEYS * 2 + 10) {
     890            2010 :             let search_key = u64::to_be_bytes(search_key_int);
     891            2010 :             assert_eq!(
     892            2010 :                 reader.get(&search_key, &ctx).await?,
     893            2010 :                 all_data.get(&search_key_int).cloned()
     894                 :             );
     895                 : 
     896                 :             // Test a forward scan starting with this key
     897            2010 :             result.lock().unwrap().clear();
     898            2010 :             reader
     899            2010 :                 .visit(&search_key, VisitDirection::Forwards, take_ten, &ctx)
     900 UBC           0 :                 .await?;
     901 CBC        2010 :             let expected = all_data
     902            2010 :                 .range(search_key_int..)
     903            2010 :                 .take(10)
     904           19910 :                 .map(|(&key, &val)| (key, val))
     905            2010 :                 .collect::<Vec<(u64, u64)>>();
     906            2010 :             assert_eq!(*result.lock().unwrap(), expected);
     907                 : 
     908                 :             // And a backwards scan
     909            2010 :             result.lock().unwrap().clear();
     910            2010 :             reader
     911            2010 :                 .visit(&search_key, VisitDirection::Backwards, take_ten, &ctx)
     912 UBC           0 :                 .await?;
     913 CBC        2010 :             let expected = all_data
     914            2010 :                 .range(..=search_key_int)
     915            2010 :                 .rev()
     916            2010 :                 .take(10)
     917           20000 :                 .map(|(&key, &val)| (key, val))
     918            2010 :                 .collect::<Vec<(u64, u64)>>();
     919            2010 :             assert_eq!(*result.lock().unwrap(), expected);
     920                 :         }
     921                 : 
     922                 :         // full scan
     923               1 :         let search_key = u64::to_be_bytes(0);
     924               1 :         limit.store(usize::MAX, Ordering::Relaxed);
     925               1 :         result.lock().unwrap().clear();
     926               1 :         reader
     927               1 :             .visit(&search_key, VisitDirection::Forwards, take_ten, &ctx)
     928 UBC           0 :             .await?;
     929 CBC           1 :         let expected = all_data
     930               1 :             .iter()
     931            1000 :             .map(|(&key, &val)| (key, val))
     932               1 :             .collect::<Vec<(u64, u64)>>();
     933               1 :         assert_eq!(*result.lock().unwrap(), expected);
     934                 : 
     935                 :         // full scan
     936               1 :         let search_key = u64::to_be_bytes(u64::MAX);
     937               1 :         limit.store(usize::MAX, Ordering::Relaxed);
     938               1 :         result.lock().unwrap().clear();
     939               1 :         reader
     940               1 :             .visit(&search_key, VisitDirection::Backwards, take_ten, &ctx)
     941 UBC           0 :             .await?;
     942 CBC           1 :         let expected = all_data
     943               1 :             .iter()
     944               1 :             .rev()
     945            1000 :             .map(|(&key, &val)| (key, val))
     946               1 :             .collect::<Vec<(u64, u64)>>();
     947               1 :         assert_eq!(*result.lock().unwrap(), expected);
     948                 : 
     949               1 :         Ok(())
     950                 :     }
     951                 : 
     952               1 :     #[tokio::test]
     953               1 :     async fn random_data() -> Result<()> {
     954               1 :         let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
     955               1 : 
     956               1 :         // Generate random keys with exponential distribution, to
     957               1 :         // exercise the prefix compression
     958               1 :         const NUM_KEYS: usize = 100000;
     959               1 :         let mut all_data: BTreeMap<u128, u64> = BTreeMap::new();
     960          100001 :         for idx in 0..NUM_KEYS {
     961          100000 :             let u: f64 = rand::thread_rng().gen_range(0.0..1.0);
     962          100000 :             let t = -(f64::ln(u));
     963          100000 :             let key_int = (t * 1000000.0) as u128;
     964          100000 : 
     965          100000 :             all_data.insert(key_int, idx as u64);
     966          100000 :         }
     967                 : 
     968                 :         // Build a tree from it
     969               1 :         let mut disk = TestDisk::new();
     970               1 :         let mut writer = DiskBtreeBuilder::<_, 16>::new(&mut disk);
     971                 : 
     972           97530 :         for (&key, &val) in all_data.iter() {
     973           97530 :             writer.append(&u128::to_be_bytes(key), val)?;
     974                 :         }
     975               1 :         let (root_offset, _writer) = writer.finish()?;
     976                 : 
     977               1 :         let reader = DiskBtreeReader::new(0, root_offset, disk);
     978                 : 
     979                 :         // Test get() operation on all the keys
     980           97530 :         for (&key, &val) in all_data.iter() {
     981           97530 :             let search_key = u128::to_be_bytes(key);
     982           97530 :             assert_eq!(reader.get(&search_key, &ctx).await?, Some(val));
     983                 :         }
     984                 : 
     985                 :         // Test get() operations on random keys, most of which will not exist
     986          100001 :         for _ in 0..100000 {
     987          100000 :             let key_int = rand::thread_rng().gen::<u128>();
     988          100000 :             let search_key = u128::to_be_bytes(key_int);
     989          100000 :             assert!(reader.get(&search_key, &ctx).await? == all_data.get(&key_int).cloned());
     990                 :         }
     991                 : 
     992                 :         // Test boundary cases
     993               1 :         assert!(
     994               1 :             reader.get(&u128::to_be_bytes(u128::MIN), &ctx).await?
     995               1 :                 == all_data.get(&u128::MIN).cloned()
     996                 :         );
     997               1 :         assert!(
     998               1 :             reader.get(&u128::to_be_bytes(u128::MAX), &ctx).await?
     999               1 :                 == all_data.get(&u128::MAX).cloned()
    1000                 :         );
    1001                 : 
    1002               1 :         Ok(())
    1003                 :     }
    1004                 : 
    1005               1 :     #[test]
    1006               1 :     fn unsorted_input() {
    1007               1 :         let mut disk = TestDisk::new();
    1008               1 :         let mut writer = DiskBtreeBuilder::<_, 2>::new(&mut disk);
    1009               1 : 
    1010               1 :         let _ = writer.append(b"ba", 1);
    1011               1 :         let _ = writer.append(b"bb", 2);
    1012               1 :         let err = writer.append(b"aa", 3).expect_err("should've failed");
    1013               1 :         match err {
    1014               1 :             DiskBtreeError::UnsortedInput { key, last_key } => {
    1015               1 :                 assert_eq!(key.as_ref(), b"aa".as_slice());
    1016               1 :                 assert_eq!(last_key.as_ref(), b"bb".as_slice());
    1017                 :             }
    1018 UBC           0 :             _ => panic!("unexpected error variant, expected DiskBtreeError::UnsortedInput"),
    1019                 :         }
    1020 CBC           1 :     }
    1021                 : 
    1022                 :     ///
    1023                 :     /// This test contains a particular data set, see disk_btree_test_data.rs
    1024                 :     ///
    1025               1 :     #[tokio::test]
    1026               1 :     async fn particular_data() -> Result<()> {
    1027               1 :         // Build a tree from it
    1028               1 :         let mut disk = TestDisk::new();
    1029               1 :         let mut writer = DiskBtreeBuilder::<_, 26>::new(&mut disk);
    1030               1 :         let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
    1031                 : 
    1032            2001 :         for (key, val) in disk_btree_test_data::TEST_DATA {
    1033            2000 :             writer.append(&key, val)?;
    1034                 :         }
    1035               1 :         let (root_offset, writer) = writer.finish()?;
    1036                 : 
    1037               1 :         println!("SIZE: {} blocks", writer.blocks.len());
    1038               1 : 
    1039               1 :         let reader = DiskBtreeReader::new(0, root_offset, disk);
    1040                 : 
    1041                 :         // Test get() operation on all the keys
    1042            2001 :         for (key, val) in disk_btree_test_data::TEST_DATA {
    1043            2000 :             assert_eq!(reader.get(&key, &ctx).await?, Some(val));
    1044                 :         }
    1045                 : 
    1046                 :         // Test full scan
    1047               1 :         let mut count = 0;
    1048               1 :         reader
    1049               1 :             .visit(
    1050               1 :                 &[0u8; 26],
    1051               1 :                 VisitDirection::Forwards,
    1052            2000 :                 |_key, _value| {
    1053            2000 :                     count += 1;
    1054            2000 :                     true
    1055            2000 :                 },
    1056               1 :                 &ctx,
    1057               1 :             )
    1058 UBC           0 :             .await?;
    1059 CBC           1 :         assert_eq!(count, disk_btree_test_data::TEST_DATA.len());
    1060                 : 
    1061               1 :         reader.dump().await?;
    1062                 : 
    1063               1 :         Ok(())
    1064                 :     }
    1065                 : }
    1066                 : 
    1067                 : #[cfg(test)]
    1068                 : #[path = "disk_btree_test_data.rs"]
    1069                 : mod disk_btree_test_data;
        

Generated by: LCOV version 2.1-beta