Line data Source code
1 : //! A [`crate::virtual_file::owned_buffers_io::write::Buffer`] whose
2 : //! unwritten range is guaranteed to be zero-initialized.
3 : //! This is used by [`crate::tenant::ephemeral_file::zero_padded_read_write::RW::read_blk`]
4 : //! to serve page-sized reads of the trailing page when the trailing page has only been partially filled.
5 :
6 : use std::mem::MaybeUninit;
7 :
8 : use crate::virtual_file::owned_buffers_io::io_buf_ext::FullSlice;
9 :
10 : /// See module-level comment.
11 : pub struct Buffer<const N: usize> {
12 : allocation: Box<[u8; N]>,
13 : written: usize,
14 : }
15 :
16 : impl<const N: usize> Default for Buffer<N> {
17 1268 : fn default() -> Self {
18 1268 : Self {
19 1268 : allocation: Box::new(
20 1268 : // SAFETY: zeroed memory is a valid [u8; N]
21 1268 : unsafe { MaybeUninit::zeroed().assume_init() },
22 1268 : ),
23 1268 : written: 0,
24 1268 : }
25 1268 : }
26 : }
27 :
28 : impl<const N: usize> Buffer<N> {
29 : #[inline(always)]
30 20469678 : fn invariants(&self) {
31 20469678 : // don't check by default, unoptimized is too expensive even for debug mode
32 20469678 : if false {
33 0 : debug_assert!(self.written <= N, "{}", self.written);
34 0 : debug_assert!(self.allocation[self.written..N].iter().all(|v| *v == 0));
35 20469678 : }
36 20469678 : }
37 :
38 323445 : pub fn as_zero_padded_slice(&self) -> &[u8; N] {
39 323445 : &self.allocation
40 323445 : }
41 : }
42 :
43 : impl<const N: usize> crate::virtual_file::owned_buffers_io::write::Buffer for Buffer<N> {
44 : type IoBuf = Self;
45 :
46 20463068 : fn cap(&self) -> usize {
47 20463068 : self.allocation.len()
48 20463068 : }
49 :
50 10228229 : fn extend_from_slice(&mut self, other: &[u8]) {
51 10228229 : self.invariants();
52 10228229 : let remaining = self.allocation.len() - self.written;
53 10228229 : if other.len() > remaining {
54 0 : panic!("calling extend_from_slice() with insufficient remaining capacity");
55 10228229 : }
56 10228229 : self.allocation[self.written..(self.written + other.len())].copy_from_slice(other);
57 10228229 : self.written += other.len();
58 10228229 : self.invariants();
59 10228229 : }
60 :
61 31200017 : fn pending(&self) -> usize {
62 31200017 : self.written
63 31200017 : }
64 :
65 6610 : fn flush(self) -> FullSlice<Self> {
66 6610 : self.invariants();
67 6610 : let written = self.written;
68 6610 : FullSlice::must_new(tokio_epoll_uring::BoundedBuf::slice(self, 0..written))
69 6610 : }
70 :
71 6610 : fn reuse_after_flush(iobuf: Self::IoBuf) -> Self {
72 6610 : let Self {
73 6610 : mut allocation,
74 6610 : written,
75 6610 : } = iobuf;
76 6610 : allocation[0..written].fill(0);
77 6610 : let new = Self {
78 6610 : allocation,
79 6610 : written: 0,
80 6610 : };
81 6610 : new.invariants();
82 6610 : new
83 6610 : }
84 : }
85 :
86 : /// We have this trait impl so that the `flush` method in the `Buffer` impl above can produce a
87 : /// [`tokio_epoll_uring::BoundedBuf::slice`] of the [`Self::written`] range of the data.
88 : ///
89 : /// Remember that bytes_init is generally _not_ a tracker of the amount
90 : /// of valid data in the io buffer; we use `Slice` for that.
91 : /// The `IoBuf` is _only_ for keeping track of uninitialized memory, a bit like MaybeUninit.
92 : ///
93 : /// SAFETY:
94 : ///
95 : /// The [`Self::allocation`] is stable becauses boxes are stable.
96 : /// The memory is zero-initialized, so, bytes_init is always N.
97 : unsafe impl<const N: usize> tokio_epoll_uring::IoBuf for Buffer<N> {
98 56185 : fn stable_ptr(&self) -> *const u8 {
99 56185 : self.allocation.as_ptr()
100 56185 : }
101 :
102 76015 : fn bytes_init(&self) -> usize {
103 76015 : // Yes, N, not self.written; Read the full comment of this impl block!
104 76015 : N
105 76015 : }
106 :
107 19830 : fn bytes_total(&self) -> usize {
108 19830 : N
109 19830 : }
110 : }
|