Line data Source code
1 : //!
2 : //! Functions for reading and writing variable-sized "blobs".
3 : //!
4 : //! Each blob begins with a 1- or 4-byte length field, followed by the
5 : //! actual data. If the length is smaller than 128 bytes, the length
6 : //! is written as a one byte. If it's larger than that, the length
7 : //! is written as a four-byte integer, in big-endian, with the high
8 : //! bit set. This way, we can detect whether it's 1- or 4-byte header
9 : //! by peeking at the first byte. For blobs larger than 128 bits,
10 : //! we also specify three reserved bits, only one of the three bit
11 : //! patterns is currently in use (0b011) and signifies compression
12 : //! with zstd.
13 : //!
14 : //! len < 128: 0XXXXXXX
15 : //! len >= 128: 1CCCXXXX XXXXXXXX XXXXXXXX XXXXXXXX
16 : //!
17 : use std::cmp::min;
18 : use std::io::Error;
19 :
20 : use async_compression::Level;
21 : use bytes::{BufMut, BytesMut};
22 : use pageserver_api::models::ImageCompressionAlgorithm;
23 : use tokio::io::AsyncWriteExt;
24 : use tokio_epoll_uring::{BoundedBuf, IoBuf, Slice};
25 : use tokio_util::sync::CancellationToken;
26 : use tracing::warn;
27 :
28 : use crate::context::RequestContext;
29 : use crate::page_cache::PAGE_SZ;
30 : use crate::tenant::block_io::BlockCursor;
31 : use crate::virtual_file::VirtualFile;
32 : use crate::virtual_file::owned_buffers_io::io_buf_ext::{FullSlice, IoBufExt};
33 :
34 : #[derive(Copy, Clone, Debug)]
35 : pub struct CompressionInfo {
36 : pub written_compressed: bool,
37 : pub compressed_size: Option<usize>,
38 : }
39 :
40 : /// A blob header, with header+data length and compression info.
41 : ///
42 : /// TODO: use this more widely, and add an encode() method too.
43 : /// TODO: document the header format.
44 : #[derive(Clone, Copy, Default)]
45 : pub struct Header {
46 : pub header_len: usize,
47 : pub data_len: usize,
48 : pub compression_bits: u8,
49 : }
50 :
51 : impl Header {
52 : /// Decodes a header from a byte slice.
53 7749955 : pub fn decode(bytes: &[u8]) -> Result<Self, std::io::Error> {
54 7749955 : let Some(&first_header_byte) = bytes.first() else {
55 0 : return Err(std::io::Error::new(
56 0 : std::io::ErrorKind::InvalidData,
57 0 : "zero-length blob header",
58 0 : ));
59 : };
60 :
61 : // If the first bit is 0, this is just a 1-byte length prefix up to 128 bytes.
62 7749955 : if first_header_byte < 0x80 {
63 7666427 : return Ok(Self {
64 7666427 : header_len: 1, // by definition
65 7666427 : data_len: first_header_byte as usize,
66 7666427 : compression_bits: BYTE_UNCOMPRESSED,
67 7666427 : });
68 83528 : }
69 :
70 : // Otherwise, this is a 4-byte header containing compression information and length.
71 : const HEADER_LEN: usize = 4;
72 83528 : let mut header_buf: [u8; HEADER_LEN] = bytes[0..HEADER_LEN].try_into().map_err(|_| {
73 0 : std::io::Error::new(
74 0 : std::io::ErrorKind::InvalidData,
75 0 : format!("blob header too short: {bytes:?}"),
76 0 : )
77 83528 : })?;
78 :
79 : // TODO: verify the compression bits and convert to an enum.
80 83528 : let compression_bits = header_buf[0] & LEN_COMPRESSION_BIT_MASK;
81 83528 : header_buf[0] &= !LEN_COMPRESSION_BIT_MASK;
82 83528 : let data_len = u32::from_be_bytes(header_buf) as usize;
83 83528 :
84 83528 : Ok(Self {
85 83528 : header_len: HEADER_LEN,
86 83528 : data_len,
87 83528 : compression_bits,
88 83528 : })
89 7749955 : }
90 :
91 : /// Returns the total header+data length.
92 8224 : pub fn total_len(&self) -> usize {
93 8224 : self.header_len + self.data_len
94 8224 : }
95 : }
96 :
97 : impl BlockCursor<'_> {
98 : /// Read a blob into a new buffer.
99 16592 : pub async fn read_blob(
100 16592 : &self,
101 16592 : offset: u64,
102 16592 : ctx: &RequestContext,
103 16592 : ) -> Result<Vec<u8>, std::io::Error> {
104 16592 : let mut buf = Vec::new();
105 16592 : self.read_blob_into_buf(offset, &mut buf, ctx).await?;
106 16592 : Ok(buf)
107 16592 : }
108 : /// Read blob into the given buffer. Any previous contents in the buffer
109 : /// are overwritten.
110 16720 : pub async fn read_blob_into_buf(
111 16720 : &self,
112 16720 : offset: u64,
113 16720 : dstbuf: &mut Vec<u8>,
114 16720 : ctx: &RequestContext,
115 16720 : ) -> Result<(), std::io::Error> {
116 16720 : let mut blknum = (offset / PAGE_SZ as u64) as u32;
117 16720 : let mut off = (offset % PAGE_SZ as u64) as usize;
118 :
119 16720 : let mut buf = self.read_blk(blknum, ctx).await?;
120 :
121 : // peek at the first byte, to determine if it's a 1- or 4-byte length
122 16720 : let first_len_byte = buf[off];
123 16720 : let len: usize = if first_len_byte < 0x80 {
124 : // 1-byte length header
125 4448 : off += 1;
126 4448 : first_len_byte as usize
127 : } else {
128 : // 4-byte length header
129 12272 : let mut len_buf = [0u8; 4];
130 12272 : let thislen = PAGE_SZ - off;
131 12272 : if thislen < 4 {
132 : // it is split across two pages
133 0 : len_buf[..thislen].copy_from_slice(&buf[off..PAGE_SZ]);
134 0 : blknum += 1;
135 0 : buf = self.read_blk(blknum, ctx).await?;
136 0 : len_buf[thislen..].copy_from_slice(&buf[0..4 - thislen]);
137 0 : off = 4 - thislen;
138 12272 : } else {
139 12272 : len_buf.copy_from_slice(&buf[off..off + 4]);
140 12272 : off += 4;
141 12272 : }
142 12272 : let bit_mask = if self.read_compressed {
143 40 : !LEN_COMPRESSION_BIT_MASK
144 : } else {
145 12232 : 0x7f
146 : };
147 12272 : len_buf[0] &= bit_mask;
148 12272 : u32::from_be_bytes(len_buf) as usize
149 : };
150 16720 : let compression_bits = first_len_byte & LEN_COMPRESSION_BIT_MASK;
151 16720 :
152 16720 : let mut tmp_buf = Vec::new();
153 : let buf_to_write;
154 16720 : let compression = if compression_bits <= BYTE_UNCOMPRESSED || !self.read_compressed {
155 16712 : if compression_bits > BYTE_UNCOMPRESSED {
156 0 : warn!("reading key above future limit ({len} bytes)");
157 16712 : }
158 16712 : buf_to_write = dstbuf;
159 16712 : None
160 8 : } else if compression_bits == BYTE_ZSTD {
161 8 : buf_to_write = &mut tmp_buf;
162 8 : Some(dstbuf)
163 : } else {
164 0 : let error = std::io::Error::new(
165 0 : std::io::ErrorKind::InvalidData,
166 0 : format!("invalid compression byte {compression_bits:x}"),
167 0 : );
168 0 : return Err(error);
169 : };
170 :
171 16720 : buf_to_write.clear();
172 16720 : buf_to_write.reserve(len);
173 16720 :
174 16720 : // Read the payload
175 16720 : let mut remain = len;
176 58872 : while remain > 0 {
177 42152 : let mut page_remain = PAGE_SZ - off;
178 42152 : if page_remain == 0 {
179 : // continue on next page
180 25512 : blknum += 1;
181 25512 : buf = self.read_blk(blknum, ctx).await?;
182 25512 : off = 0;
183 25512 : page_remain = PAGE_SZ;
184 16640 : }
185 42152 : let this_blk_len = min(remain, page_remain);
186 42152 : buf_to_write.extend_from_slice(&buf[off..off + this_blk_len]);
187 42152 : remain -= this_blk_len;
188 42152 : off += this_blk_len;
189 : }
190 :
191 16720 : if let Some(dstbuf) = compression {
192 8 : if compression_bits == BYTE_ZSTD {
193 8 : let mut decoder = async_compression::tokio::write::ZstdDecoder::new(dstbuf);
194 8 : decoder.write_all(buf_to_write).await?;
195 8 : decoder.flush().await?;
196 : } else {
197 0 : unreachable!("already checked above")
198 : }
199 16712 : }
200 :
201 16720 : Ok(())
202 16720 : }
203 : }
204 :
205 : /// Reserved bits for length and compression
206 : pub(super) const LEN_COMPRESSION_BIT_MASK: u8 = 0xf0;
207 :
208 : /// The maximum size of blobs we support. The highest few bits
209 : /// are reserved for compression and other further uses.
210 : pub(crate) const MAX_SUPPORTED_BLOB_LEN: usize = 0x0fff_ffff;
211 :
212 : pub(super) const BYTE_UNCOMPRESSED: u8 = 0x80;
213 : pub(super) const BYTE_ZSTD: u8 = BYTE_UNCOMPRESSED | 0x10;
214 :
215 : /// A wrapper of `VirtualFile` that allows users to write blobs.
216 : ///
217 : /// If a `BlobWriter` is dropped, the internal buffer will be
218 : /// discarded. You need to call [`flush_buffer`](Self::flush_buffer)
219 : /// manually before dropping.
220 : pub struct BlobWriter<const BUFFERED: bool> {
221 : inner: VirtualFile,
222 : offset: u64,
223 : /// A buffer to save on write calls, only used if BUFFERED=true
224 : buf: Vec<u8>,
225 : /// We do tiny writes for the length headers; they need to be in an owned buffer;
226 : io_buf: Option<BytesMut>,
227 : }
228 :
229 : impl<const BUFFERED: bool> BlobWriter<BUFFERED> {
230 4280 : pub fn new(
231 4280 : inner: VirtualFile,
232 4280 : start_offset: u64,
233 4280 : _gate: &utils::sync::gate::Gate,
234 4280 : _cancel: CancellationToken,
235 4280 : _ctx: &RequestContext,
236 4280 : ) -> Self {
237 4280 : Self {
238 4280 : inner,
239 4280 : offset: start_offset,
240 4280 : buf: Vec::with_capacity(Self::CAPACITY),
241 4280 : io_buf: Some(BytesMut::new()),
242 4280 : }
243 4280 : }
244 :
245 4099752 : pub fn size(&self) -> u64 {
246 4099752 : self.offset
247 4099752 : }
248 :
249 : const CAPACITY: usize = if BUFFERED { 64 * 1024 } else { 0 };
250 :
251 : /// Writes the given buffer directly to the underlying `VirtualFile`.
252 : /// You need to make sure that the internal buffer is empty, otherwise
253 : /// data will be written in wrong order.
254 : #[inline(always)]
255 239188 : async fn write_all_unbuffered<Buf: IoBuf + Send>(
256 239188 : &mut self,
257 239188 : src_buf: FullSlice<Buf>,
258 239188 : ctx: &RequestContext,
259 239188 : ) -> (FullSlice<Buf>, Result<(), Error>) {
260 239188 : let (src_buf, res) = self.inner.write_all(src_buf, ctx).await;
261 239188 : let nbytes = match res {
262 239188 : Ok(nbytes) => nbytes,
263 0 : Err(e) => return (src_buf, Err(e)),
264 : };
265 239188 : self.offset += nbytes as u64;
266 239188 : (src_buf, Ok(()))
267 239188 : }
268 :
269 : #[inline(always)]
270 : /// Flushes the internal buffer to the underlying `VirtualFile`.
271 24644 : pub async fn flush_buffer(&mut self, ctx: &RequestContext) -> Result<(), Error> {
272 24644 : let buf = std::mem::take(&mut self.buf);
273 24644 : let (slice, res) = self.inner.write_all(buf.slice_len(), ctx).await;
274 24644 : res?;
275 24644 : let mut buf = slice.into_raw_slice().into_inner();
276 24644 : buf.clear();
277 24644 : self.buf = buf;
278 24644 : Ok(())
279 24644 : }
280 :
281 : #[inline(always)]
282 : /// Writes as much of `src_buf` into the internal buffer as it fits
283 26022136 : fn write_into_buffer(&mut self, src_buf: &[u8]) -> usize {
284 26022136 : let remaining = Self::CAPACITY - self.buf.len();
285 26022136 : let to_copy = src_buf.len().min(remaining);
286 26022136 : self.buf.extend_from_slice(&src_buf[..to_copy]);
287 26022136 : self.offset += to_copy as u64;
288 26022136 : to_copy
289 26022136 : }
290 :
291 : /// Internal, possibly buffered, write function
292 26239792 : async fn write_all<Buf: IoBuf + Send>(
293 26239792 : &mut self,
294 26239792 : src_buf: FullSlice<Buf>,
295 26239792 : ctx: &RequestContext,
296 26239792 : ) -> (FullSlice<Buf>, Result<(), Error>) {
297 26239792 : let src_buf = src_buf.into_raw_slice();
298 26239792 : let src_buf_bounds = src_buf.bounds();
299 26239792 : let restore = move |src_buf_slice: Slice<_>| {
300 26000604 : FullSlice::must_new(Slice::from_buf_bounds(
301 26000604 : src_buf_slice.into_inner(),
302 26000604 : src_buf_bounds,
303 26000604 : ))
304 26000604 : };
305 :
306 26239792 : if !BUFFERED {
307 238728 : assert!(self.buf.is_empty());
308 238728 : return self
309 238728 : .write_all_unbuffered(FullSlice::must_new(src_buf), ctx)
310 238728 : .await;
311 26001064 : }
312 26001064 : let remaining = Self::CAPACITY - self.buf.len();
313 26001064 : let src_buf_len = src_buf.bytes_init();
314 26001064 : if src_buf_len == 0 {
315 48 : return (restore(src_buf), Ok(()));
316 26001016 : }
317 26001016 : let mut src_buf = src_buf.slice(0..src_buf_len);
318 26001016 : // First try to copy as much as we can into the buffer
319 26001016 : if remaining > 0 {
320 26001016 : let copied = self.write_into_buffer(&src_buf);
321 26001016 : src_buf = src_buf.slice(copied..);
322 26001016 : }
323 : // Then, if the buffer is full, flush it out
324 26001016 : if self.buf.len() == Self::CAPACITY {
325 21664 : if let Err(e) = self.flush_buffer(ctx).await {
326 0 : return (restore(src_buf), Err(e));
327 21664 : }
328 25979352 : }
329 : // Finally, write the tail of src_buf:
330 : // If it wholly fits into the buffer without
331 : // completely filling it, then put it there.
332 : // If not, write it out directly.
333 26001016 : let src_buf = if !src_buf.is_empty() {
334 21580 : assert_eq!(self.buf.len(), 0);
335 21580 : if src_buf.len() < Self::CAPACITY {
336 21120 : let copied = self.write_into_buffer(&src_buf);
337 21120 : // We just verified above that src_buf fits into our internal buffer.
338 21120 : assert_eq!(copied, src_buf.len());
339 21120 : restore(src_buf)
340 : } else {
341 460 : let (src_buf, res) = self
342 460 : .write_all_unbuffered(FullSlice::must_new(src_buf), ctx)
343 460 : .await;
344 460 : if let Err(e) = res {
345 0 : return (src_buf, Err(e));
346 460 : }
347 460 : src_buf
348 : }
349 : } else {
350 25979436 : restore(src_buf)
351 : };
352 26001016 : (src_buf, Ok(()))
353 26239792 : }
354 :
355 : /// Write a blob of data. Returns the offset that it was written to,
356 : /// which can be used to retrieve the data later.
357 20696 : pub async fn write_blob<Buf: IoBuf + Send>(
358 20696 : &mut self,
359 20696 : srcbuf: FullSlice<Buf>,
360 20696 : ctx: &RequestContext,
361 20696 : ) -> (FullSlice<Buf>, Result<u64, Error>) {
362 20696 : let (buf, res) = self
363 20696 : .write_blob_maybe_compressed(srcbuf, ctx, ImageCompressionAlgorithm::Disabled)
364 20696 : .await;
365 20696 : (buf, res.map(|(off, _compression_info)| off))
366 20696 : }
367 :
368 : /// Write a blob of data. Returns the offset that it was written to,
369 : /// which can be used to retrieve the data later.
370 13119896 : pub(crate) async fn write_blob_maybe_compressed<Buf: IoBuf + Send>(
371 13119896 : &mut self,
372 13119896 : srcbuf: FullSlice<Buf>,
373 13119896 : ctx: &RequestContext,
374 13119896 : algorithm: ImageCompressionAlgorithm,
375 13119896 : ) -> (FullSlice<Buf>, Result<(u64, CompressionInfo), Error>) {
376 13119896 : let offset = self.offset;
377 13119896 : let mut compression_info = CompressionInfo {
378 13119896 : written_compressed: false,
379 13119896 : compressed_size: None,
380 13119896 : };
381 13119896 :
382 13119896 : let len = srcbuf.len();
383 13119896 :
384 13119896 : let mut io_buf = self.io_buf.take().expect("we always put it back below");
385 13119896 : io_buf.clear();
386 13119896 : let mut compressed_buf = None;
387 13119896 : let ((io_buf_slice, hdr_res), srcbuf) = async {
388 13119896 : if len < 128 {
389 : // Short blob. Write a 1-byte length header
390 13047160 : io_buf.put_u8(len as u8);
391 13047160 : (self.write_all(io_buf.slice_len(), ctx).await, srcbuf)
392 : } else {
393 : // Write a 4-byte length header
394 72736 : if len > MAX_SUPPORTED_BLOB_LEN {
395 0 : return (
396 0 : (
397 0 : io_buf.slice_len(),
398 0 : Err(Error::other(format!("blob too large ({len} bytes)"))),
399 0 : ),
400 0 : srcbuf,
401 0 : );
402 72736 : }
403 72736 : let (high_bit_mask, len_written, srcbuf) = match algorithm {
404 20104 : ImageCompressionAlgorithm::Zstd { level } => {
405 20104 : let mut encoder = if let Some(level) = level {
406 20104 : async_compression::tokio::write::ZstdEncoder::with_quality(
407 20104 : Vec::new(),
408 20104 : Level::Precise(level.into()),
409 20104 : )
410 : } else {
411 0 : async_compression::tokio::write::ZstdEncoder::new(Vec::new())
412 : };
413 20104 : encoder.write_all(&srcbuf[..]).await.unwrap();
414 20104 : encoder.shutdown().await.unwrap();
415 20104 : let compressed = encoder.into_inner();
416 20104 : compression_info.compressed_size = Some(compressed.len());
417 20104 : if compressed.len() < len {
418 12 : compression_info.written_compressed = true;
419 12 : let compressed_len = compressed.len();
420 12 : compressed_buf = Some(compressed);
421 12 : (BYTE_ZSTD, compressed_len, srcbuf)
422 : } else {
423 20092 : (BYTE_UNCOMPRESSED, len, srcbuf)
424 : }
425 : }
426 52632 : ImageCompressionAlgorithm::Disabled => (BYTE_UNCOMPRESSED, len, srcbuf),
427 : };
428 72736 : let mut len_buf = (len_written as u32).to_be_bytes();
429 72736 : assert_eq!(len_buf[0] & 0xf0, 0);
430 72736 : len_buf[0] |= high_bit_mask;
431 72736 : io_buf.extend_from_slice(&len_buf[..]);
432 72736 : (self.write_all(io_buf.slice_len(), ctx).await, srcbuf)
433 : }
434 13119896 : }
435 13119896 : .await;
436 13119896 : self.io_buf = Some(io_buf_slice.into_raw_slice().into_inner());
437 13119896 : match hdr_res {
438 13119896 : Ok(_) => (),
439 0 : Err(e) => return (srcbuf, Err(e)),
440 : }
441 13119896 : let (srcbuf, res) = if let Some(compressed_buf) = compressed_buf {
442 12 : let (_buf, res) = self.write_all(compressed_buf.slice_len(), ctx).await;
443 12 : (srcbuf, res)
444 : } else {
445 13119884 : self.write_all(srcbuf, ctx).await
446 : };
447 13119896 : (srcbuf, res.map(|_| (offset, compression_info)))
448 13119896 : }
449 : }
450 :
451 : impl BlobWriter<true> {
452 : /// Access the underlying `VirtualFile`.
453 : ///
454 : /// This function flushes the internal buffer before giving access
455 : /// to the underlying `VirtualFile`.
456 2900 : pub async fn into_inner(mut self, ctx: &RequestContext) -> Result<VirtualFile, Error> {
457 2900 : self.flush_buffer(ctx).await?;
458 2900 : Ok(self.inner)
459 2900 : }
460 :
461 : /// Access the underlying `VirtualFile`.
462 : ///
463 : /// Unlike [`into_inner`](Self::into_inner), this doesn't flush
464 : /// the internal buffer before giving access.
465 52 : pub fn into_inner_no_flush(self) -> VirtualFile {
466 52 : self.inner
467 52 : }
468 : }
469 :
470 : impl BlobWriter<false> {
471 : /// Access the underlying `VirtualFile`.
472 1248 : pub fn into_inner(self) -> VirtualFile {
473 1248 : self.inner
474 1248 : }
475 : }
476 :
477 : #[cfg(test)]
478 : pub(crate) mod tests {
479 : use camino::Utf8PathBuf;
480 : use camino_tempfile::Utf8TempDir;
481 : use rand::{Rng, SeedableRng};
482 :
483 : use super::*;
484 : use crate::context::DownloadBehavior;
485 : use crate::task_mgr::TaskKind;
486 : use crate::tenant::block_io::BlockReaderRef;
487 :
488 48 : async fn round_trip_test<const BUFFERED: bool>(blobs: &[Vec<u8>]) -> Result<(), Error> {
489 48 : round_trip_test_compressed::<BUFFERED>(blobs, false).await
490 48 : }
491 :
492 80 : pub(crate) async fn write_maybe_compressed<const BUFFERED: bool>(
493 80 : blobs: &[Vec<u8>],
494 80 : compression: bool,
495 80 : ctx: &RequestContext,
496 80 : ) -> Result<(Utf8TempDir, Utf8PathBuf, Vec<u64>), Error> {
497 80 : let temp_dir = camino_tempfile::tempdir()?;
498 80 : let pathbuf = temp_dir.path().join("file");
499 80 : let gate = utils::sync::gate::Gate::default();
500 80 : let cancel = CancellationToken::new();
501 80 :
502 80 : // Write part (in block to drop the file)
503 80 : let mut offsets = Vec::new();
504 : {
505 80 : let file = VirtualFile::create(pathbuf.as_path(), ctx).await?;
506 80 : let mut wtr = BlobWriter::<BUFFERED>::new(file, 0, &gate, cancel.clone(), ctx);
507 24816 : for blob in blobs.iter() {
508 24816 : let (_, res) = if compression {
509 4200 : let res = wtr
510 4200 : .write_blob_maybe_compressed(
511 4200 : blob.clone().slice_len(),
512 4200 : ctx,
513 4200 : ImageCompressionAlgorithm::Zstd { level: Some(1) },
514 4200 : )
515 4200 : .await;
516 4200 : (res.0, res.1.map(|(off, _)| off))
517 : } else {
518 20616 : wtr.write_blob(blob.clone().slice_len(), ctx).await
519 : };
520 24816 : let offs = res?;
521 24816 : offsets.push(offs);
522 : }
523 : // Write out one page worth of zeros so that we can
524 : // read again with read_blk
525 80 : let (_, res) = wtr.write_blob(vec![0; PAGE_SZ].slice_len(), ctx).await;
526 80 : let offs = res?;
527 80 : println!("Writing final blob at offs={offs}");
528 80 : wtr.flush_buffer(ctx).await?;
529 : }
530 80 : Ok((temp_dir, pathbuf, offsets))
531 80 : }
532 :
533 64 : async fn round_trip_test_compressed<const BUFFERED: bool>(
534 64 : blobs: &[Vec<u8>],
535 64 : compression: bool,
536 64 : ) -> Result<(), Error> {
537 64 : let ctx =
538 64 : RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error).with_scope_unit_test();
539 64 : let (_temp_dir, pathbuf, offsets) =
540 64 : write_maybe_compressed::<BUFFERED>(blobs, compression, &ctx).await?;
541 :
542 64 : let file = VirtualFile::open(pathbuf, &ctx).await?;
543 64 : let rdr = BlockReaderRef::VirtualFile(&file);
544 64 : let rdr = BlockCursor::new_with_compression(rdr, compression);
545 16576 : for (idx, (blob, offset)) in blobs.iter().zip(offsets.iter()).enumerate() {
546 16576 : let blob_read = rdr.read_blob(*offset, &ctx).await?;
547 16576 : assert_eq!(
548 16576 : blob, &blob_read,
549 0 : "mismatch for idx={idx} at offset={offset}"
550 : );
551 : }
552 64 : Ok(())
553 64 : }
554 :
555 12316 : pub(crate) fn random_array(len: usize) -> Vec<u8> {
556 12316 : let mut rng = rand::thread_rng();
557 136303808 : (0..len).map(|_| rng.r#gen()).collect::<_>()
558 12316 : }
559 :
560 : #[tokio::test]
561 4 : async fn test_one() -> Result<(), Error> {
562 4 : let blobs = &[vec![12, 21, 22]];
563 4 : round_trip_test::<false>(blobs).await?;
564 4 : round_trip_test::<true>(blobs).await?;
565 4 : Ok(())
566 4 : }
567 :
568 : #[tokio::test]
569 4 : async fn test_hello_simple() -> Result<(), Error> {
570 4 : let blobs = &[
571 4 : vec![0, 1, 2, 3],
572 4 : b"Hello, World!".to_vec(),
573 4 : Vec::new(),
574 4 : b"foobar".to_vec(),
575 4 : ];
576 4 : round_trip_test::<false>(blobs).await?;
577 4 : round_trip_test::<true>(blobs).await?;
578 4 : round_trip_test_compressed::<false>(blobs, true).await?;
579 4 : round_trip_test_compressed::<true>(blobs, true).await?;
580 4 : Ok(())
581 4 : }
582 :
583 : #[tokio::test]
584 4 : async fn test_really_big_array() -> Result<(), Error> {
585 4 : let blobs = &[
586 4 : b"test".to_vec(),
587 4 : random_array(10 * PAGE_SZ),
588 4 : b"hello".to_vec(),
589 4 : random_array(66 * PAGE_SZ),
590 4 : vec![0xf3; 24 * PAGE_SZ],
591 4 : b"foobar".to_vec(),
592 4 : ];
593 4 : round_trip_test::<false>(blobs).await?;
594 4 : round_trip_test::<true>(blobs).await?;
595 4 : round_trip_test_compressed::<false>(blobs, true).await?;
596 4 : round_trip_test_compressed::<true>(blobs, true).await?;
597 4 : Ok(())
598 4 : }
599 :
600 : #[tokio::test]
601 4 : async fn test_arrays_inc() -> Result<(), Error> {
602 4 : let blobs = (0..PAGE_SZ / 8)
603 4096 : .map(|v| random_array(v * 16))
604 4 : .collect::<Vec<_>>();
605 4 : round_trip_test::<false>(&blobs).await?;
606 4 : round_trip_test::<true>(&blobs).await?;
607 4 : Ok(())
608 4 : }
609 :
610 : #[tokio::test]
611 4 : async fn test_arrays_random_size() -> Result<(), Error> {
612 4 : let mut rng = rand::rngs::StdRng::seed_from_u64(42);
613 4 : let blobs = (0..1024)
614 4096 : .map(|_| {
615 4096 : let mut sz: u16 = rng.r#gen();
616 4096 : // Make 50% of the arrays small
617 4096 : if rng.r#gen() {
618 2064 : sz &= 63;
619 2064 : }
620 4096 : random_array(sz.into())
621 4096 : })
622 4 : .collect::<Vec<_>>();
623 4 : round_trip_test::<false>(&blobs).await?;
624 4 : round_trip_test::<true>(&blobs).await?;
625 4 : Ok(())
626 4 : }
627 :
628 : #[tokio::test]
629 4 : async fn test_arrays_page_boundary() -> Result<(), Error> {
630 4 : let blobs = &[
631 4 : random_array(PAGE_SZ - 4),
632 4 : random_array(PAGE_SZ - 4),
633 4 : random_array(PAGE_SZ - 4),
634 4 : ];
635 4 : round_trip_test::<false>(blobs).await?;
636 4 : round_trip_test::<true>(blobs).await?;
637 4 : Ok(())
638 4 : }
639 : }
|