LCOV - code coverage report
Current view: top level - pageserver/src/tenant - blob_io.rs (source / functions) Coverage Total Hit
Test: 8ff8efadb0253cf618c612650348666c0c564111.info Lines: 92.7 % 385 357
Test Date: 2024-11-20 17:53:50 Functions: 91.3 % 103 94

            Line data    Source code
       1              : //!
       2              : //! Functions for reading and writing variable-sized "blobs".
       3              : //!
       4              : //! Each blob begins with a 1- or 4-byte length field, followed by the
       5              : //! actual data. If the length is smaller than 128 bytes, the length
       6              : //! is written as a one byte. If it's larger than that, the length
       7              : //! is written as a four-byte integer, in big-endian, with the high
       8              : //! bit set. This way, we can detect whether it's 1- or 4-byte header
       9              : //! by peeking at the first byte. For blobs larger than 128 bits,
      10              : //! we also specify three reserved bits, only one of the three bit
      11              : //! patterns is currently in use (0b011) and signifies compression
      12              : //! with zstd.
      13              : //!
      14              : //! len <  128: 0XXXXXXX
      15              : //! len >= 128: 1CCCXXXX XXXXXXXX XXXXXXXX XXXXXXXX
      16              : //!
      17              : use async_compression::Level;
      18              : use bytes::{BufMut, BytesMut};
      19              : use pageserver_api::models::ImageCompressionAlgorithm;
      20              : use tokio::io::AsyncWriteExt;
      21              : use tokio_epoll_uring::{BoundedBuf, IoBuf, Slice};
      22              : use tracing::warn;
      23              : 
      24              : use crate::context::RequestContext;
      25              : use crate::page_cache::PAGE_SZ;
      26              : use crate::tenant::block_io::BlockCursor;
      27              : use crate::virtual_file::owned_buffers_io::io_buf_ext::{FullSlice, IoBufExt};
      28              : use crate::virtual_file::VirtualFile;
      29              : use std::cmp::min;
      30              : use std::io::{Error, ErrorKind};
      31              : 
      32              : #[derive(Copy, Clone, Debug)]
      33              : pub struct CompressionInfo {
      34              :     pub written_compressed: bool,
      35              :     pub compressed_size: Option<usize>,
      36              : }
      37              : 
      38              : impl<'a> BlockCursor<'a> {
      39              :     /// Read a blob into a new buffer.
      40         8296 :     pub async fn read_blob(
      41         8296 :         &self,
      42         8296 :         offset: u64,
      43         8296 :         ctx: &RequestContext,
      44         8296 :     ) -> Result<Vec<u8>, std::io::Error> {
      45         8296 :         let mut buf = Vec::new();
      46        10621 :         self.read_blob_into_buf(offset, &mut buf, ctx).await?;
      47         8296 :         Ok(buf)
      48         8296 :     }
      49              :     /// Read blob into the given buffer. Any previous contents in the buffer
      50              :     /// are overwritten.
      51         8360 :     pub async fn read_blob_into_buf(
      52         8360 :         &self,
      53         8360 :         offset: u64,
      54         8360 :         dstbuf: &mut Vec<u8>,
      55         8360 :         ctx: &RequestContext,
      56         8360 :     ) -> Result<(), std::io::Error> {
      57         8360 :         let mut blknum = (offset / PAGE_SZ as u64) as u32;
      58         8360 :         let mut off = (offset % PAGE_SZ as u64) as usize;
      59              : 
      60         8360 :         let mut buf = self.read_blk(blknum, ctx).await?;
      61              : 
      62              :         // peek at the first byte, to determine if it's a 1- or 4-byte length
      63         8360 :         let first_len_byte = buf[off];
      64         8360 :         let len: usize = if first_len_byte < 0x80 {
      65              :             // 1-byte length header
      66         2224 :             off += 1;
      67         2224 :             first_len_byte as usize
      68              :         } else {
      69              :             // 4-byte length header
      70         6136 :             let mut len_buf = [0u8; 4];
      71         6136 :             let thislen = PAGE_SZ - off;
      72         6136 :             if thislen < 4 {
      73              :                 // it is split across two pages
      74            0 :                 len_buf[..thislen].copy_from_slice(&buf[off..PAGE_SZ]);
      75            0 :                 blknum += 1;
      76            0 :                 buf = self.read_blk(blknum, ctx).await?;
      77            0 :                 len_buf[thislen..].copy_from_slice(&buf[0..4 - thislen]);
      78            0 :                 off = 4 - thislen;
      79         6136 :             } else {
      80         6136 :                 len_buf.copy_from_slice(&buf[off..off + 4]);
      81         6136 :                 off += 4;
      82         6136 :             }
      83         6136 :             let bit_mask = if self.read_compressed {
      84           20 :                 !LEN_COMPRESSION_BIT_MASK
      85              :             } else {
      86         6116 :                 0x7f
      87              :             };
      88         6136 :             len_buf[0] &= bit_mask;
      89         6136 :             u32::from_be_bytes(len_buf) as usize
      90              :         };
      91         8360 :         let compression_bits = first_len_byte & LEN_COMPRESSION_BIT_MASK;
      92         8360 : 
      93         8360 :         let mut tmp_buf = Vec::new();
      94              :         let buf_to_write;
      95         8360 :         let compression = if compression_bits <= BYTE_UNCOMPRESSED || !self.read_compressed {
      96         8356 :             if compression_bits > BYTE_UNCOMPRESSED {
      97            0 :                 warn!("reading key above future limit ({len} bytes)");
      98         8356 :             }
      99         8356 :             buf_to_write = dstbuf;
     100         8356 :             None
     101            4 :         } else if compression_bits == BYTE_ZSTD {
     102            4 :             buf_to_write = &mut tmp_buf;
     103            4 :             Some(dstbuf)
     104              :         } else {
     105            0 :             let error = std::io::Error::new(
     106            0 :                 std::io::ErrorKind::InvalidData,
     107            0 :                 format!("invalid compression byte {compression_bits:x}"),
     108            0 :             );
     109            0 :             return Err(error);
     110              :         };
     111              : 
     112         8360 :         buf_to_write.clear();
     113         8360 :         buf_to_write.reserve(len);
     114         8360 : 
     115         8360 :         // Read the payload
     116         8360 :         let mut remain = len;
     117        29436 :         while remain > 0 {
     118        21076 :             let mut page_remain = PAGE_SZ - off;
     119        21076 :             if page_remain == 0 {
     120              :                 // continue on next page
     121        12756 :                 blknum += 1;
     122        12756 :                 buf = self.read_blk(blknum, ctx).await?;
     123        12756 :                 off = 0;
     124        12756 :                 page_remain = PAGE_SZ;
     125         8320 :             }
     126        21076 :             let this_blk_len = min(remain, page_remain);
     127        21076 :             buf_to_write.extend_from_slice(&buf[off..off + this_blk_len]);
     128        21076 :             remain -= this_blk_len;
     129        21076 :             off += this_blk_len;
     130              :         }
     131              : 
     132         8360 :         if let Some(dstbuf) = compression {
     133            4 :             if compression_bits == BYTE_ZSTD {
     134            4 :                 let mut decoder = async_compression::tokio::write::ZstdDecoder::new(dstbuf);
     135            4 :                 decoder.write_all(buf_to_write).await?;
     136            4 :                 decoder.flush().await?;
     137              :             } else {
     138            0 :                 unreachable!("already checked above")
     139              :             }
     140         8356 :         }
     141              : 
     142         8360 :         Ok(())
     143         8360 :     }
     144              : }
     145              : 
     146              : /// Reserved bits for length and compression
     147              : pub(super) const LEN_COMPRESSION_BIT_MASK: u8 = 0xf0;
     148              : 
     149              : /// The maximum size of blobs we support. The highest few bits
     150              : /// are reserved for compression and other further uses.
     151              : pub(crate) const MAX_SUPPORTED_BLOB_LEN: usize = 0x0fff_ffff;
     152              : 
     153              : pub(super) const BYTE_UNCOMPRESSED: u8 = 0x80;
     154              : pub(super) const BYTE_ZSTD: u8 = BYTE_UNCOMPRESSED | 0x10;
     155              : 
     156              : /// A wrapper of `VirtualFile` that allows users to write blobs.
     157              : ///
     158              : /// If a `BlobWriter` is dropped, the internal buffer will be
     159              : /// discarded. You need to call [`flush_buffer`](Self::flush_buffer)
     160              : /// manually before dropping.
     161              : pub struct BlobWriter<const BUFFERED: bool> {
     162              :     inner: VirtualFile,
     163              :     offset: u64,
     164              :     /// A buffer to save on write calls, only used if BUFFERED=true
     165              :     buf: Vec<u8>,
     166              :     /// We do tiny writes for the length headers; they need to be in an owned buffer;
     167              :     io_buf: Option<BytesMut>,
     168              : }
     169              : 
     170              : impl<const BUFFERED: bool> BlobWriter<BUFFERED> {
     171         1996 :     pub fn new(inner: VirtualFile, start_offset: u64) -> Self {
     172         1996 :         Self {
     173         1996 :             inner,
     174         1996 :             offset: start_offset,
     175         1996 :             buf: Vec::with_capacity(Self::CAPACITY),
     176         1996 :             io_buf: Some(BytesMut::new()),
     177         1996 :         }
     178         1996 :     }
     179              : 
     180      2049582 :     pub fn size(&self) -> u64 {
     181      2049582 :         self.offset
     182      2049582 :     }
     183              : 
     184              :     const CAPACITY: usize = if BUFFERED { 64 * 1024 } else { 0 };
     185              : 
     186              :     /// Writes the given buffer directly to the underlying `VirtualFile`.
     187              :     /// You need to make sure that the internal buffer is empty, otherwise
     188              :     /// data will be written in wrong order.
     189              :     #[inline(always)]
     190      1101194 :     async fn write_all_unbuffered<Buf: IoBuf + Send>(
     191      1101194 :         &mut self,
     192      1101194 :         src_buf: FullSlice<Buf>,
     193      1101194 :         ctx: &RequestContext,
     194      1101194 :     ) -> (FullSlice<Buf>, Result<(), Error>) {
     195      1101194 :         let (src_buf, res) = self.inner.write_all(src_buf, ctx).await;
     196      1101194 :         let nbytes = match res {
     197      1101194 :             Ok(nbytes) => nbytes,
     198            0 :             Err(e) => return (src_buf, Err(e)),
     199              :         };
     200      1101194 :         self.offset += nbytes as u64;
     201      1101194 :         (src_buf, Ok(()))
     202      1101194 :     }
     203              : 
     204              :     #[inline(always)]
     205              :     /// Flushes the internal buffer to the underlying `VirtualFile`.
     206        12268 :     pub async fn flush_buffer(&mut self, ctx: &RequestContext) -> Result<(), Error> {
     207        12268 :         let buf = std::mem::take(&mut self.buf);
     208        12268 :         let (slice, res) = self.inner.write_all(buf.slice_len(), ctx).await;
     209        12268 :         res?;
     210        12268 :         let mut buf = slice.into_raw_slice().into_inner();
     211        12268 :         buf.clear();
     212        12268 :         self.buf = buf;
     213        12268 :         Ok(())
     214        12268 :     }
     215              : 
     216              :     #[inline(always)]
     217              :     /// Writes as much of `src_buf` into the internal buffer as it fits
     218     12969664 :     fn write_into_buffer(&mut self, src_buf: &[u8]) -> usize {
     219     12969664 :         let remaining = Self::CAPACITY - self.buf.len();
     220     12969664 :         let to_copy = src_buf.len().min(remaining);
     221     12969664 :         self.buf.extend_from_slice(&src_buf[..to_copy]);
     222     12969664 :         self.offset += to_copy as u64;
     223     12969664 :         to_copy
     224     12969664 :     }
     225              : 
     226              :     /// Internal, possibly buffered, write function
     227     14060108 :     async fn write_all<Buf: IoBuf + Send>(
     228     14060108 :         &mut self,
     229     14060108 :         src_buf: FullSlice<Buf>,
     230     14060108 :         ctx: &RequestContext,
     231     14060108 :     ) -> (FullSlice<Buf>, Result<(), Error>) {
     232     14060108 :         let src_buf = src_buf.into_raw_slice();
     233     14060108 :         let src_buf_bounds = src_buf.bounds();
     234     14060108 :         let restore = move |src_buf_slice: Slice<_>| {
     235     12958914 :             FullSlice::must_new(Slice::from_buf_bounds(
     236     12958914 :                 src_buf_slice.into_inner(),
     237     12958914 :                 src_buf_bounds,
     238     12958914 :             ))
     239     12958914 :         };
     240              : 
     241     14060108 :         if !BUFFERED {
     242      1100964 :             assert!(self.buf.is_empty());
     243      1100964 :             return self
     244      1100964 :                 .write_all_unbuffered(FullSlice::must_new(src_buf), ctx)
     245       559158 :                 .await;
     246     12959144 :         }
     247     12959144 :         let remaining = Self::CAPACITY - self.buf.len();
     248     12959144 :         let src_buf_len = src_buf.bytes_init();
     249     12959144 :         if src_buf_len == 0 {
     250           24 :             return (restore(src_buf), Ok(()));
     251     12959120 :         }
     252     12959120 :         let mut src_buf = src_buf.slice(0..src_buf_len);
     253     12959120 :         // First try to copy as much as we can into the buffer
     254     12959120 :         if remaining > 0 {
     255     12959120 :             let copied = self.write_into_buffer(&src_buf);
     256     12959120 :             src_buf = src_buf.slice(copied..);
     257     12959120 :         }
     258              :         // Then, if the buffer is full, flush it out
     259     12959120 :         if self.buf.len() == Self::CAPACITY {
     260        10814 :             if let Err(e) = self.flush_buffer(ctx).await {
     261            0 :                 return (restore(src_buf), Err(e));
     262        10814 :             }
     263     12948306 :         }
     264              :         // Finally, write the tail of src_buf:
     265              :         // If it wholly fits into the buffer without
     266              :         // completely filling it, then put it there.
     267              :         // If not, write it out directly.
     268     12959120 :         let src_buf = if !src_buf.is_empty() {
     269        10774 :             assert_eq!(self.buf.len(), 0);
     270        10774 :             if src_buf.len() < Self::CAPACITY {
     271        10544 :                 let copied = self.write_into_buffer(&src_buf);
     272        10544 :                 // We just verified above that src_buf fits into our internal buffer.
     273        10544 :                 assert_eq!(copied, src_buf.len());
     274        10544 :                 restore(src_buf)
     275              :             } else {
     276          230 :                 let (src_buf, res) = self
     277          230 :                     .write_all_unbuffered(FullSlice::must_new(src_buf), ctx)
     278          115 :                     .await;
     279          230 :                 if let Err(e) = res {
     280            0 :                     return (src_buf, Err(e));
     281          230 :                 }
     282          230 :                 src_buf
     283              :             }
     284              :         } else {
     285     12948346 :             restore(src_buf)
     286              :         };
     287     12959120 :         (src_buf, Ok(()))
     288     14060108 :     }
     289              : 
     290              :     /// Write a blob of data. Returns the offset that it was written to,
     291              :     /// which can be used to retrieve the data later.
     292        10348 :     pub async fn write_blob<Buf: IoBuf + Send>(
     293        10348 :         &mut self,
     294        10348 :         srcbuf: FullSlice<Buf>,
     295        10348 :         ctx: &RequestContext,
     296        10348 :     ) -> (FullSlice<Buf>, Result<u64, Error>) {
     297        10348 :         let (buf, res) = self
     298        10348 :             .write_blob_maybe_compressed(srcbuf, ctx, ImageCompressionAlgorithm::Disabled)
     299         4712 :             .await;
     300        10348 :         (buf, res.map(|(off, _compression_info)| off))
     301        10348 :     }
     302              : 
     303              :     /// Write a blob of data. Returns the offset that it was written to,
     304              :     /// which can be used to retrieve the data later.
     305      7030054 :     pub(crate) async fn write_blob_maybe_compressed<Buf: IoBuf + Send>(
     306      7030054 :         &mut self,
     307      7030054 :         srcbuf: FullSlice<Buf>,
     308      7030054 :         ctx: &RequestContext,
     309      7030054 :         algorithm: ImageCompressionAlgorithm,
     310      7030054 :     ) -> (FullSlice<Buf>, Result<(u64, CompressionInfo), Error>) {
     311      7030054 :         let offset = self.offset;
     312      7030054 :         let mut compression_info = CompressionInfo {
     313      7030054 :             written_compressed: false,
     314      7030054 :             compressed_size: None,
     315      7030054 :         };
     316      7030054 : 
     317      7030054 :         let len = srcbuf.len();
     318      7030054 : 
     319      7030054 :         let mut io_buf = self.io_buf.take().expect("we always put it back below");
     320      7030054 :         io_buf.clear();
     321      7030054 :         let mut compressed_buf = None;
     322      7030054 :         let ((io_buf_slice, hdr_res), srcbuf) = async {
     323      7030054 :             if len < 128 {
     324              :                 // Short blob. Write a 1-byte length header
     325      6993686 :                 io_buf.put_u8(len as u8);
     326      6993686 :                 (self.write_all(io_buf.slice_len(), ctx).await, srcbuf)
     327              :             } else {
     328              :                 // Write a 4-byte length header
     329        36368 :                 if len > MAX_SUPPORTED_BLOB_LEN {
     330            0 :                     return (
     331            0 :                         (
     332            0 :                             io_buf.slice_len(),
     333            0 :                             Err(Error::new(
     334            0 :                                 ErrorKind::Other,
     335            0 :                                 format!("blob too large ({len} bytes)"),
     336            0 :                             )),
     337            0 :                         ),
     338            0 :                         srcbuf,
     339            0 :                     );
     340        36368 :                 }
     341        36368 :                 let (high_bit_mask, len_written, srcbuf) = match algorithm {
     342        10052 :                     ImageCompressionAlgorithm::Zstd { level } => {
     343        10052 :                         let mut encoder = if let Some(level) = level {
     344        10052 :                             async_compression::tokio::write::ZstdEncoder::with_quality(
     345        10052 :                                 Vec::new(),
     346        10052 :                                 Level::Precise(level.into()),
     347        10052 :                             )
     348              :                         } else {
     349            0 :                             async_compression::tokio::write::ZstdEncoder::new(Vec::new())
     350              :                         };
     351        10052 :                         encoder.write_all(&srcbuf[..]).await.unwrap();
     352        10052 :                         encoder.shutdown().await.unwrap();
     353        10052 :                         let compressed = encoder.into_inner();
     354        10052 :                         compression_info.compressed_size = Some(compressed.len());
     355        10052 :                         if compressed.len() < len {
     356            6 :                             compression_info.written_compressed = true;
     357            6 :                             let compressed_len = compressed.len();
     358            6 :                             compressed_buf = Some(compressed);
     359            6 :                             (BYTE_ZSTD, compressed_len, srcbuf)
     360              :                         } else {
     361        10046 :                             (BYTE_UNCOMPRESSED, len, srcbuf)
     362              :                         }
     363              :                     }
     364        26316 :                     ImageCompressionAlgorithm::Disabled => (BYTE_UNCOMPRESSED, len, srcbuf),
     365              :                 };
     366        36368 :                 let mut len_buf = (len_written as u32).to_be_bytes();
     367        36368 :                 assert_eq!(len_buf[0] & 0xf0, 0);
     368        36368 :                 len_buf[0] |= high_bit_mask;
     369        36368 :                 io_buf.extend_from_slice(&len_buf[..]);
     370        36368 :                 (self.write_all(io_buf.slice_len(), ctx).await, srcbuf)
     371              :             }
     372      7030054 :         }
     373       280372 :         .await;
     374      7030054 :         self.io_buf = Some(io_buf_slice.into_raw_slice().into_inner());
     375      7030054 :         match hdr_res {
     376      7030054 :             Ok(_) => (),
     377            0 :             Err(e) => return (srcbuf, Err(e)),
     378              :         }
     379      7030054 :         let (srcbuf, res) = if let Some(compressed_buf) = compressed_buf {
     380            6 :             let (_buf, res) = self.write_all(compressed_buf.slice_len(), ctx).await;
     381            6 :             (srcbuf, res)
     382              :         } else {
     383      7030048 :             self.write_all(srcbuf, ctx).await
     384              :         };
     385      7030054 :         (srcbuf, res.map(|_| (offset, compression_info)))
     386      7030054 :     }
     387              : }
     388              : 
     389              : impl BlobWriter<true> {
     390              :     /// Access the underlying `VirtualFile`.
     391              :     ///
     392              :     /// This function flushes the internal buffer before giving access
     393              :     /// to the underlying `VirtualFile`.
     394         1414 :     pub async fn into_inner(mut self, ctx: &RequestContext) -> Result<VirtualFile, Error> {
     395         1414 :         self.flush_buffer(ctx).await?;
     396         1414 :         Ok(self.inner)
     397         1414 :     }
     398              : 
     399              :     /// Access the underlying `VirtualFile`.
     400              :     ///
     401              :     /// Unlike [`into_inner`](Self::into_inner), this doesn't flush
     402              :     /// the internal buffer before giving access.
     403           24 :     pub fn into_inner_no_flush(self) -> VirtualFile {
     404           24 :         self.inner
     405           24 :     }
     406              : }
     407              : 
     408              : impl BlobWriter<false> {
     409              :     /// Access the underlying `VirtualFile`.
     410          518 :     pub fn into_inner(self) -> VirtualFile {
     411          518 :         self.inner
     412          518 :     }
     413              : }
     414              : 
     415              : #[cfg(test)]
     416              : pub(crate) mod tests {
     417              :     use super::*;
     418              :     use crate::{context::DownloadBehavior, task_mgr::TaskKind, tenant::block_io::BlockReaderRef};
     419              :     use camino::Utf8PathBuf;
     420              :     use camino_tempfile::Utf8TempDir;
     421              :     use rand::{Rng, SeedableRng};
     422              : 
     423           24 :     async fn round_trip_test<const BUFFERED: bool>(blobs: &[Vec<u8>]) -> Result<(), Error> {
     424        15054 :         round_trip_test_compressed::<BUFFERED>(blobs, false).await
     425           24 :     }
     426              : 
     427           40 :     pub(crate) async fn write_maybe_compressed<const BUFFERED: bool>(
     428           40 :         blobs: &[Vec<u8>],
     429           40 :         compression: bool,
     430           40 :         ctx: &RequestContext,
     431           40 :     ) -> Result<(Utf8TempDir, Utf8PathBuf, Vec<u64>), Error> {
     432           40 :         let temp_dir = camino_tempfile::tempdir()?;
     433           40 :         let pathbuf = temp_dir.path().join("file");
     434           40 : 
     435           40 :         // Write part (in block to drop the file)
     436           40 :         let mut offsets = Vec::new();
     437              :         {
     438           40 :             let file = VirtualFile::create(pathbuf.as_path(), ctx).await?;
     439           40 :             let mut wtr = BlobWriter::<BUFFERED>::new(file, 0);
     440        12408 :             for blob in blobs.iter() {
     441        12408 :                 let (_, res) = if compression {
     442         2100 :                     let res = wtr
     443         2100 :                         .write_blob_maybe_compressed(
     444         2100 :                             blob.clone().slice_len(),
     445         2100 :                             ctx,
     446         2100 :                             ImageCompressionAlgorithm::Zstd { level: Some(1) },
     447         2100 :                         )
     448          154 :                         .await;
     449         2100 :                     (res.0, res.1.map(|(off, _)| off))
     450              :                 } else {
     451        10308 :                     wtr.write_blob(blob.clone().slice_len(), ctx).await
     452              :                 };
     453        12408 :                 let offs = res?;
     454        12408 :                 offsets.push(offs);
     455              :             }
     456              :             // Write out one page worth of zeros so that we can
     457              :             // read again with read_blk
     458           40 :             let (_, res) = wtr.write_blob(vec![0; PAGE_SZ].slice_len(), ctx).await;
     459           40 :             let offs = res?;
     460           40 :             println!("Writing final blob at offs={offs}");
     461           40 :             wtr.flush_buffer(ctx).await?;
     462              :         }
     463           40 :         Ok((temp_dir, pathbuf, offsets))
     464           40 :     }
     465              : 
     466           32 :     async fn round_trip_test_compressed<const BUFFERED: bool>(
     467           32 :         blobs: &[Vec<u8>],
     468           32 :         compression: bool,
     469           32 :     ) -> Result<(), Error> {
     470           32 :         let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
     471           32 :         let (_temp_dir, pathbuf, offsets) =
     472         4630 :             write_maybe_compressed::<BUFFERED>(blobs, compression, &ctx).await?;
     473              : 
     474           32 :         let file = VirtualFile::open(pathbuf, &ctx).await?;
     475           32 :         let rdr = BlockReaderRef::VirtualFile(&file);
     476           32 :         let rdr = BlockCursor::new_with_compression(rdr, compression);
     477         8288 :         for (idx, (blob, offset)) in blobs.iter().zip(offsets.iter()).enumerate() {
     478        10619 :             let blob_read = rdr.read_blob(*offset, &ctx).await?;
     479         8288 :             assert_eq!(
     480         8288 :                 blob, &blob_read,
     481            0 :                 "mismatch for idx={idx} at offset={offset}"
     482              :             );
     483              :         }
     484           32 :         Ok(())
     485           32 :     }
     486              : 
     487         6158 :     pub(crate) fn random_array(len: usize) -> Vec<u8> {
     488         6158 :         let mut rng = rand::thread_rng();
     489     68151904 :         (0..len).map(|_| rng.gen()).collect::<_>()
     490         6158 :     }
     491              : 
     492              :     #[tokio::test]
     493            2 :     async fn test_one() -> Result<(), Error> {
     494            2 :         let blobs = &[vec![12, 21, 22]];
     495            8 :         round_trip_test::<false>(blobs).await?;
     496            4 :         round_trip_test::<true>(blobs).await?;
     497            2 :         Ok(())
     498            2 :     }
     499              : 
     500              :     #[tokio::test]
     501            2 :     async fn test_hello_simple() -> Result<(), Error> {
     502            2 :         let blobs = &[
     503            2 :             vec![0, 1, 2, 3],
     504            2 :             b"Hello, World!".to_vec(),
     505            2 :             Vec::new(),
     506            2 :             b"foobar".to_vec(),
     507            2 :         ];
     508           16 :         round_trip_test::<false>(blobs).await?;
     509            7 :         round_trip_test::<true>(blobs).await?;
     510           15 :         round_trip_test_compressed::<false>(blobs, true).await?;
     511            7 :         round_trip_test_compressed::<true>(blobs, true).await?;
     512            2 :         Ok(())
     513            2 :     }
     514              : 
     515              :     #[tokio::test]
     516            2 :     async fn test_really_big_array() -> Result<(), Error> {
     517            2 :         let blobs = &[
     518            2 :             b"test".to_vec(),
     519            2 :             random_array(10 * PAGE_SZ),
     520            2 :             b"hello".to_vec(),
     521            2 :             random_array(66 * PAGE_SZ),
     522            2 :             vec![0xf3; 24 * PAGE_SZ],
     523            2 :             b"foobar".to_vec(),
     524            2 :         ];
     525          124 :         round_trip_test::<false>(blobs).await?;
     526          116 :         round_trip_test::<true>(blobs).await?;
     527          100 :         round_trip_test_compressed::<false>(blobs, true).await?;
     528           89 :         round_trip_test_compressed::<true>(blobs, true).await?;
     529            2 :         Ok(())
     530            2 :     }
     531              : 
     532              :     #[tokio::test]
     533            2 :     async fn test_arrays_inc() -> Result<(), Error> {
     534            2 :         let blobs = (0..PAGE_SZ / 8)
     535         2048 :             .map(|v| random_array(v * 16))
     536            2 :             .collect::<Vec<_>>();
     537         4162 :         round_trip_test::<false>(&blobs).await?;
     538         2212 :         round_trip_test::<true>(&blobs).await?;
     539            2 :         Ok(())
     540            2 :     }
     541              : 
     542              :     #[tokio::test]
     543            2 :     async fn test_arrays_random_size() -> Result<(), Error> {
     544            2 :         let mut rng = rand::rngs::StdRng::seed_from_u64(42);
     545            2 :         let blobs = (0..1024)
     546         2048 :             .map(|_| {
     547         2048 :                 let mut sz: u16 = rng.gen();
     548         2048 :                 // Make 50% of the arrays small
     549         2048 :                 if rng.gen() {
     550         1032 :                     sz &= 63;
     551         1032 :                 }
     552         2048 :                 random_array(sz.into())
     553         2048 :             })
     554            2 :             .collect::<Vec<_>>();
     555         5106 :         round_trip_test::<false>(&blobs).await?;
     556         3279 :         round_trip_test::<true>(&blobs).await?;
     557            2 :         Ok(())
     558            2 :     }
     559              : 
     560              :     #[tokio::test]
     561            2 :     async fn test_arrays_page_boundary() -> Result<(), Error> {
     562            2 :         let blobs = &[
     563            2 :             random_array(PAGE_SZ - 4),
     564            2 :             random_array(PAGE_SZ - 4),
     565            2 :             random_array(PAGE_SZ - 4),
     566            2 :         ];
     567           14 :         round_trip_test::<false>(blobs).await?;
     568            6 :         round_trip_test::<true>(blobs).await?;
     569            2 :         Ok(())
     570            2 :     }
     571              : }
        

Generated by: LCOV version 2.1-beta