Line data Source code
1 : use bytes::BytesMut;
2 : use tokio_epoll_uring::{BoundedBuf, IoBuf, Slice};
3 :
4 : use crate::context::RequestContext;
5 :
6 : /// A trait for doing owned-buffer write IO.
7 : /// Think [`tokio::io::AsyncWrite`] but with owned buffers.
8 : pub trait OwnedAsyncWriter {
9 : async fn write_all<B: BoundedBuf<Buf = Buf>, Buf: IoBuf + Send>(
10 : &mut self,
11 : buf: B,
12 : ctx: &RequestContext,
13 : ) -> std::io::Result<(usize, B::Buf)>;
14 : }
15 :
16 : /// A wrapper aorund an [`OwnedAsyncWriter`] that uses a [`Buffer`] to batch
17 : /// small writes into larger writes of size [`Buffer::cap`].
18 : ///
19 : /// # Passthrough Of Large Writers
20 : ///
21 : /// Calls to [`BufferedWriter::write_buffered`] that are larger than [`Buffer::cap`]
22 : /// cause the internal buffer to be flushed prematurely so that the large
23 : /// buffered write is passed through to the underlying [`OwnedAsyncWriter`].
24 : ///
25 : /// This pass-through is generally beneficial for throughput, but if
26 : /// the storage backend of the [`OwnedAsyncWriter`] is a shared resource,
27 : /// unlimited large writes may cause latency or fairness issues.
28 : ///
29 : /// In such cases, a different implementation that always buffers in memory
30 : /// may be preferable.
31 : pub struct BufferedWriter<B, W> {
32 : writer: W,
33 : /// invariant: always remains Some(buf) except
34 : /// - while IO is ongoing => goes back to Some() once the IO completed successfully
35 : /// - after an IO error => stays `None` forever
36 : /// In these exceptional cases, it's `None`.
37 : buf: Option<B>,
38 : }
39 :
40 : impl<B, Buf, W> BufferedWriter<B, W>
41 : where
42 : B: Buffer<IoBuf = Buf> + Send,
43 : Buf: IoBuf + Send,
44 : W: OwnedAsyncWriter,
45 : {
46 1251 : pub fn new(writer: W, buf: B) -> Self {
47 1251 : Self {
48 1251 : writer,
49 1251 : buf: Some(buf),
50 1251 : }
51 1251 : }
52 :
53 19461607 : pub fn as_inner(&self) -> &W {
54 19461607 : &self.writer
55 19461607 : }
56 :
57 : /// Panics if used after any of the write paths returned an error
58 15158090 : pub fn inspect_buffer(&self) -> &B {
59 15158090 : self.buf()
60 15158090 : }
61 :
62 : #[cfg_attr(target_os = "macos", allow(dead_code))]
63 11 : pub async fn flush_and_into_inner(mut self, ctx: &RequestContext) -> std::io::Result<W> {
64 11 : self.flush(ctx).await?;
65 :
66 11 : let Self { buf, writer } = self;
67 11 : assert!(buf.is_some());
68 11 : Ok(writer)
69 11 : }
70 :
71 : #[inline(always)]
72 15158170 : fn buf(&self) -> &B {
73 15158170 : self.buf
74 15158170 : .as_ref()
75 15158170 : .expect("must not use after we returned an error")
76 15158170 : }
77 :
78 : #[cfg_attr(target_os = "macos", allow(dead_code))]
79 44 : pub async fn write_buffered<S: IoBuf + Send>(
80 44 : &mut self,
81 44 : chunk: Slice<S>,
82 44 : ctx: &RequestContext,
83 44 : ) -> std::io::Result<(usize, S)> {
84 44 : let chunk_len = chunk.len();
85 44 : // avoid memcpy for the middle of the chunk
86 44 : if chunk.len() >= self.buf().cap() {
87 8 : self.flush(ctx).await?;
88 : // do a big write, bypassing `buf`
89 8 : assert_eq!(
90 8 : self.buf
91 8 : .as_ref()
92 8 : .expect("must not use after an error")
93 8 : .pending(),
94 8 : 0
95 8 : );
96 8 : let (nwritten, chunk) = self.writer.write_all(chunk, ctx).await?;
97 8 : assert_eq!(nwritten, chunk_len);
98 8 : return Ok((nwritten, chunk));
99 36 : }
100 36 : // in-memory copy the < BUFFER_SIZED tail of the chunk
101 36 : assert!(chunk.len() < self.buf().cap());
102 36 : let mut slice = &chunk[..];
103 70 : while !slice.is_empty() {
104 34 : let buf = self.buf.as_mut().expect("must not use after an error");
105 34 : let need = buf.cap() - buf.pending();
106 34 : let have = slice.len();
107 34 : let n = std::cmp::min(need, have);
108 34 : buf.extend_from_slice(&slice[..n]);
109 34 : slice = &slice[n..];
110 34 : if buf.pending() >= buf.cap() {
111 6 : assert_eq!(buf.pending(), buf.cap());
112 6 : self.flush(ctx).await?;
113 28 : }
114 : }
115 36 : assert!(slice.is_empty(), "by now we should have drained the chunk");
116 36 : Ok((chunk_len, chunk.into_inner()))
117 44 : }
118 :
119 : /// Strictly less performant variant of [`Self::write_buffered`] that allows writing borrowed data.
120 : ///
121 : /// It is less performant because we always have to copy the borrowed data into the internal buffer
122 : /// before we can do the IO. The [`Self::write_buffered`] can avoid this, which is more performant
123 : /// for large writes.
124 10221230 : pub async fn write_buffered_borrowed(
125 10221230 : &mut self,
126 10221230 : mut chunk: &[u8],
127 10221230 : ctx: &RequestContext,
128 10221230 : ) -> std::io::Result<usize> {
129 10221230 : let chunk_len = chunk.len();
130 20449065 : while !chunk.is_empty() {
131 10227835 : let buf = self.buf.as_mut().expect("must not use after an error");
132 10227835 : let need = buf.cap() - buf.pending();
133 10227835 : let have = chunk.len();
134 10227835 : let n = std::cmp::min(need, have);
135 10227835 : buf.extend_from_slice(&chunk[..n]);
136 10227835 : chunk = &chunk[n..];
137 10227835 : if buf.pending() >= buf.cap() {
138 6624 : assert_eq!(buf.pending(), buf.cap());
139 6624 : self.flush(ctx).await?;
140 10221211 : }
141 : }
142 10221230 : Ok(chunk_len)
143 10221230 : }
144 :
145 6649 : async fn flush(&mut self, ctx: &RequestContext) -> std::io::Result<()> {
146 6649 : let buf = self.buf.take().expect("must not use after an error");
147 6649 : let buf_len = buf.pending();
148 6649 : if buf_len == 0 {
149 10 : self.buf = Some(buf);
150 10 : return Ok(());
151 6639 : }
152 6639 : let (nwritten, io_buf) = self.writer.write_all(buf.flush(), ctx).await?;
153 6639 : assert_eq!(nwritten, buf_len);
154 6639 : self.buf = Some(Buffer::reuse_after_flush(io_buf));
155 6639 : Ok(())
156 6649 : }
157 : }
158 :
159 : /// A [`Buffer`] is used by [`BufferedWriter`] to batch smaller writes into larger ones.
160 : pub trait Buffer {
161 : type IoBuf: IoBuf;
162 :
163 : /// Capacity of the buffer. Must not change over the lifetime `self`.`
164 : fn cap(&self) -> usize;
165 :
166 : /// Add data to the buffer.
167 : /// Panics if there is not enough room to accomodate `other`'s content, i.e.,
168 : /// panics if `other.len() > self.cap() - self.pending()`.
169 : fn extend_from_slice(&mut self, other: &[u8]);
170 :
171 : /// Number of bytes in the buffer.
172 : fn pending(&self) -> usize;
173 :
174 : /// Turns `self` into a [`tokio_epoll_uring::Slice`] of the pending data
175 : /// so we can use [`tokio_epoll_uring`] to write it to disk.
176 : fn flush(self) -> Slice<Self::IoBuf>;
177 :
178 : /// After the write to disk is done and we have gotten back the slice,
179 : /// [`BufferedWriter`] uses this method to re-use the io buffer.
180 : fn reuse_after_flush(iobuf: Self::IoBuf) -> Self;
181 : }
182 :
183 : impl Buffer for BytesMut {
184 : type IoBuf = BytesMut;
185 :
186 : #[inline(always)]
187 216 : fn cap(&self) -> usize {
188 216 : self.capacity()
189 216 : }
190 :
191 58 : fn extend_from_slice(&mut self, other: &[u8]) {
192 58 : BytesMut::extend_from_slice(self, other)
193 58 : }
194 :
195 : #[inline(always)]
196 183 : fn pending(&self) -> usize {
197 183 : self.len()
198 183 : }
199 :
200 29 : fn flush(self) -> Slice<BytesMut> {
201 29 : if self.is_empty() {
202 0 : return self.slice_full();
203 29 : }
204 29 : let len = self.len();
205 29 : self.slice(0..len)
206 29 : }
207 :
208 29 : fn reuse_after_flush(mut iobuf: BytesMut) -> Self {
209 29 : iobuf.clear();
210 29 : iobuf
211 29 : }
212 : }
213 :
214 : impl OwnedAsyncWriter for Vec<u8> {
215 0 : async fn write_all<B: BoundedBuf<Buf = Buf>, Buf: IoBuf + Send>(
216 0 : &mut self,
217 0 : buf: B,
218 0 : _: &RequestContext,
219 0 : ) -> std::io::Result<(usize, B::Buf)> {
220 0 : let nbytes = buf.bytes_init();
221 0 : if nbytes == 0 {
222 0 : return Ok((0, Slice::into_inner(buf.slice_full())));
223 0 : }
224 0 : let buf = buf.slice(0..nbytes);
225 0 : self.extend_from_slice(&buf[..]);
226 0 : Ok((buf.len(), Slice::into_inner(buf)))
227 0 : }
228 : }
229 :
230 : #[cfg(test)]
231 : mod tests {
232 : use bytes::BytesMut;
233 :
234 : use super::*;
235 : use crate::context::{DownloadBehavior, RequestContext};
236 : use crate::task_mgr::TaskKind;
237 :
238 : #[derive(Default)]
239 : struct RecorderWriter {
240 : writes: Vec<Vec<u8>>,
241 : }
242 : impl OwnedAsyncWriter for RecorderWriter {
243 34 : async fn write_all<B: BoundedBuf<Buf = Buf>, Buf: IoBuf + Send>(
244 34 : &mut self,
245 34 : buf: B,
246 34 : _: &RequestContext,
247 34 : ) -> std::io::Result<(usize, B::Buf)> {
248 34 : let nbytes = buf.bytes_init();
249 34 : if nbytes == 0 {
250 0 : self.writes.push(vec![]);
251 0 : return Ok((0, Slice::into_inner(buf.slice_full())));
252 34 : }
253 34 : let buf = buf.slice(0..nbytes);
254 34 : self.writes.push(Vec::from(&buf[..]));
255 34 : Ok((buf.len(), Slice::into_inner(buf)))
256 34 : }
257 : }
258 :
259 34 : fn test_ctx() -> RequestContext {
260 34 : RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error)
261 34 : }
262 :
263 : macro_rules! write {
264 : ($writer:ident, $data:literal) => {{
265 : $writer
266 : .write_buffered(::bytes::Bytes::from_static($data).slice_full(), &test_ctx())
267 : .await?;
268 : }};
269 : }
270 :
271 : #[tokio::test]
272 2 : async fn test_buffered_writes_only() -> std::io::Result<()> {
273 2 : let recorder = RecorderWriter::default();
274 2 : let mut writer = BufferedWriter::new(recorder, BytesMut::with_capacity(2));
275 2 : write!(writer, b"a");
276 2 : write!(writer, b"b");
277 2 : write!(writer, b"c");
278 2 : write!(writer, b"d");
279 2 : write!(writer, b"e");
280 2 : let recorder = writer.flush_and_into_inner(&test_ctx()).await?;
281 2 : assert_eq!(
282 2 : recorder.writes,
283 2 : vec![Vec::from(b"ab"), Vec::from(b"cd"), Vec::from(b"e")]
284 2 : );
285 2 : Ok(())
286 2 : }
287 :
288 : #[tokio::test]
289 2 : async fn test_passthrough_writes_only() -> std::io::Result<()> {
290 2 : let recorder = RecorderWriter::default();
291 2 : let mut writer = BufferedWriter::new(recorder, BytesMut::with_capacity(2));
292 2 : write!(writer, b"abc");
293 2 : write!(writer, b"de");
294 2 : write!(writer, b"");
295 2 : write!(writer, b"fghijk");
296 2 : let recorder = writer.flush_and_into_inner(&test_ctx()).await?;
297 2 : assert_eq!(
298 2 : recorder.writes,
299 2 : vec![Vec::from(b"abc"), Vec::from(b"de"), Vec::from(b"fghijk")]
300 2 : );
301 2 : Ok(())
302 2 : }
303 :
304 : #[tokio::test]
305 2 : async fn test_passthrough_write_with_nonempty_buffer() -> std::io::Result<()> {
306 2 : let recorder = RecorderWriter::default();
307 2 : let mut writer = BufferedWriter::new(recorder, BytesMut::with_capacity(2));
308 2 : write!(writer, b"a");
309 2 : write!(writer, b"bc");
310 2 : write!(writer, b"d");
311 2 : write!(writer, b"e");
312 2 : let recorder = writer.flush_and_into_inner(&test_ctx()).await?;
313 2 : assert_eq!(
314 2 : recorder.writes,
315 2 : vec![Vec::from(b"a"), Vec::from(b"bc"), Vec::from(b"de")]
316 2 : );
317 2 : Ok(())
318 2 : }
319 :
320 : #[tokio::test]
321 2 : async fn test_write_all_borrowed_always_goes_through_buffer() -> std::io::Result<()> {
322 2 : let ctx = test_ctx();
323 2 : let ctx = &ctx;
324 2 : let recorder = RecorderWriter::default();
325 2 : let mut writer = BufferedWriter::new(recorder, BytesMut::with_capacity(2));
326 2 :
327 2 : writer.write_buffered_borrowed(b"abc", ctx).await?;
328 2 : writer.write_buffered_borrowed(b"d", ctx).await?;
329 2 : writer.write_buffered_borrowed(b"e", ctx).await?;
330 2 : writer.write_buffered_borrowed(b"fg", ctx).await?;
331 2 : writer.write_buffered_borrowed(b"hi", ctx).await?;
332 2 : writer.write_buffered_borrowed(b"j", ctx).await?;
333 2 : writer.write_buffered_borrowed(b"klmno", ctx).await?;
334 2 :
335 2 : let recorder = writer.flush_and_into_inner(ctx).await?;
336 2 : assert_eq!(
337 2 : recorder.writes,
338 2 : {
339 2 : let expect: &[&[u8]] = &[b"ab", b"cd", b"ef", b"gh", b"ij", b"kl", b"mn", b"o"];
340 2 : expect
341 2 : }
342 2 : .iter()
343 16 : .map(|v| v[..].to_vec())
344 2 : .collect::<Vec<_>>()
345 2 : );
346 2 : Ok(())
347 2 : }
348 : }
|