Line data Source code
1 : //! VirtualFile is like a normal File, but it's not bound directly to
2 : //! a file descriptor.
3 : //!
4 : //! Instead, the file is opened when it's read from,
5 : //! and if too many files are open globally in the system, least-recently
6 : //! used ones are closed.
7 : //!
8 : //! To track which files have been recently used, we use the clock algorithm
9 : //! with a 'recently_used' flag on each slot.
10 : //!
11 : //! This is similar to PostgreSQL's virtual file descriptor facility in
12 : //! src/backend/storage/file/fd.c
13 : //!
14 : use crate::context::RequestContext;
15 : use crate::metrics::{StorageIoOperation, STORAGE_IO_SIZE, STORAGE_IO_TIME_METRIC};
16 :
17 : use crate::page_cache::{PageWriteGuard, PAGE_SZ};
18 : use crate::tenant::TENANTS_SEGMENT_NAME;
19 : use camino::{Utf8Path, Utf8PathBuf};
20 : use once_cell::sync::OnceCell;
21 : use owned_buffers_io::aligned_buffer::buffer::AlignedBuffer;
22 : use owned_buffers_io::aligned_buffer::{AlignedBufferMut, AlignedSlice, ConstAlign};
23 : use owned_buffers_io::io_buf_aligned::{IoBufAligned, IoBufAlignedMut};
24 : use owned_buffers_io::io_buf_ext::FullSlice;
25 : use pageserver_api::config::defaults::DEFAULT_IO_BUFFER_ALIGNMENT;
26 : use pageserver_api::shard::TenantShardId;
27 : use std::fs::File;
28 : use std::io::{Error, ErrorKind, Seek, SeekFrom};
29 : #[cfg(target_os = "linux")]
30 : use std::os::unix::fs::OpenOptionsExt;
31 : use tokio_epoll_uring::{BoundedBuf, IoBuf, IoBufMut, Slice};
32 :
33 : use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
34 : use std::sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering};
35 : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
36 : use tokio::time::Instant;
37 :
38 : pub use pageserver_api::models::virtual_file as api;
39 : pub(crate) mod io_engine;
40 : pub use io_engine::feature_test as io_engine_feature_test;
41 : pub use io_engine::io_engine_for_bench;
42 : pub use io_engine::FeatureTestResult as IoEngineFeatureTestResult;
43 : mod metadata;
44 : mod open_options;
45 : use self::owned_buffers_io::write::OwnedAsyncWriter;
46 : pub(crate) use api::IoMode;
47 : pub(crate) use io_engine::IoEngineKind;
48 : pub(crate) use metadata::Metadata;
49 : pub(crate) use open_options::*;
50 :
51 : pub(crate) mod owned_buffers_io {
52 : //! Abstractions for IO with owned buffers.
53 : //!
54 : //! Not actually tied to [`crate::virtual_file`] specifically, but, it's the primary
55 : //! reason we need this abstraction.
56 : //!
57 : //! Over time, this could move into the `tokio-epoll-uring` crate, maybe `uring-common`,
58 : //! but for the time being we're proving out the primitives in the neon.git repo
59 : //! for faster iteration.
60 :
61 : pub(crate) mod aligned_buffer;
62 : pub(crate) mod io_buf_aligned;
63 : pub(crate) mod io_buf_ext;
64 : pub(crate) mod slice;
65 : pub(crate) mod write;
66 : }
67 :
68 : #[derive(Debug)]
69 : pub struct VirtualFile {
70 : inner: VirtualFileInner,
71 : _mode: IoMode,
72 : }
73 :
74 : impl VirtualFile {
75 : /// Open a file in read-only mode. Like File::open.
76 2096 : pub async fn open<P: AsRef<Utf8Path>>(
77 2096 : path: P,
78 2096 : ctx: &RequestContext,
79 2096 : ) -> Result<Self, std::io::Error> {
80 2096 : let inner = VirtualFileInner::open(path, ctx).await?;
81 2096 : Ok(VirtualFile {
82 2096 : inner,
83 2096 : _mode: IoMode::Buffered,
84 2096 : })
85 2096 : }
86 :
87 : /// Open a file in read-only mode. Like File::open.
88 : ///
89 : /// `O_DIRECT` will be enabled base on `virtual_file_io_mode`.
90 2456 : pub async fn open_v2<P: AsRef<Utf8Path>>(
91 2456 : path: P,
92 2456 : ctx: &RequestContext,
93 2456 : ) -> Result<Self, std::io::Error> {
94 2456 : Self::open_with_options_v2(path.as_ref(), OpenOptions::new().read(true), ctx).await
95 2456 : }
96 :
97 3018 : pub async fn create<P: AsRef<Utf8Path>>(
98 3018 : path: P,
99 3018 : ctx: &RequestContext,
100 3018 : ) -> Result<Self, std::io::Error> {
101 3018 : let inner = VirtualFileInner::create(path, ctx).await?;
102 3018 : Ok(VirtualFile {
103 3018 : inner,
104 3018 : _mode: IoMode::Buffered,
105 3018 : })
106 3018 : }
107 :
108 0 : pub async fn create_v2<P: AsRef<Utf8Path>>(
109 0 : path: P,
110 0 : ctx: &RequestContext,
111 0 : ) -> Result<Self, std::io::Error> {
112 0 : VirtualFile::open_with_options_v2(
113 0 : path.as_ref(),
114 0 : OpenOptions::new().write(true).create(true).truncate(true),
115 0 : ctx,
116 0 : )
117 0 : .await
118 0 : }
119 :
120 1580 : pub async fn open_with_options<P: AsRef<Utf8Path>>(
121 1580 : path: P,
122 1580 : open_options: &OpenOptions,
123 1580 : ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
124 1580 : ) -> Result<Self, std::io::Error> {
125 1580 : let inner = VirtualFileInner::open_with_options(path, open_options, ctx).await?;
126 1580 : Ok(VirtualFile {
127 1580 : inner,
128 1580 : _mode: IoMode::Buffered,
129 1580 : })
130 1580 : }
131 :
132 5064 : pub async fn open_with_options_v2<P: AsRef<Utf8Path>>(
133 5064 : path: P,
134 5064 : open_options: &OpenOptions,
135 5064 : ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
136 5064 : ) -> Result<Self, std::io::Error> {
137 5064 : let file = match get_io_mode() {
138 : IoMode::Buffered => {
139 5064 : let inner = VirtualFileInner::open_with_options(path, open_options, ctx).await?;
140 5064 : VirtualFile {
141 5064 : inner,
142 5064 : _mode: IoMode::Buffered,
143 5064 : }
144 : }
145 : #[cfg(target_os = "linux")]
146 : IoMode::Direct => {
147 0 : let inner = VirtualFileInner::open_with_options(
148 0 : path,
149 0 : open_options.clone().custom_flags(nix::libc::O_DIRECT),
150 0 : ctx,
151 0 : )
152 0 : .await?;
153 0 : VirtualFile {
154 0 : inner,
155 0 : _mode: IoMode::Direct,
156 0 : }
157 : }
158 : };
159 5064 : Ok(file)
160 5064 : }
161 :
162 2372 : pub fn path(&self) -> &Utf8Path {
163 2372 : self.inner.path.as_path()
164 2372 : }
165 :
166 44 : pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>(
167 44 : final_path: Utf8PathBuf,
168 44 : tmp_path: Utf8PathBuf,
169 44 : content: B,
170 44 : ) -> std::io::Result<()> {
171 44 : VirtualFileInner::crashsafe_overwrite(final_path, tmp_path, content).await
172 44 : }
173 :
174 5598 : pub async fn sync_all(&self) -> Result<(), Error> {
175 5598 : if SYNC_MODE.load(std::sync::atomic::Ordering::Relaxed) == SyncMode::UnsafeNoSync as u8 {
176 0 : return Ok(());
177 5598 : }
178 5598 : self.inner.sync_all().await
179 5598 : }
180 :
181 0 : pub async fn sync_data(&self) -> Result<(), Error> {
182 0 : if SYNC_MODE.load(std::sync::atomic::Ordering::Relaxed) == SyncMode::UnsafeNoSync as u8 {
183 0 : return Ok(());
184 0 : }
185 0 : self.inner.sync_data().await
186 0 : }
187 :
188 3592 : pub async fn metadata(&self) -> Result<Metadata, Error> {
189 3592 : self.inner.metadata().await
190 3592 : }
191 :
192 512 : pub fn remove(self) {
193 512 : self.inner.remove();
194 512 : }
195 :
196 11312 : pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
197 11312 : self.inner.seek(pos).await
198 11312 : }
199 :
200 461683 : pub async fn read_exact_at<Buf>(
201 461683 : &self,
202 461683 : slice: Slice<Buf>,
203 461683 : offset: u64,
204 461683 : ctx: &RequestContext,
205 461683 : ) -> Result<Slice<Buf>, Error>
206 461683 : where
207 461683 : Buf: IoBufAlignedMut + Send,
208 461683 : {
209 461683 : self.inner.read_exact_at(slice, offset, ctx).await
210 461683 : }
211 :
212 63413 : pub async fn read_exact_at_page(
213 63413 : &self,
214 63413 : page: PageWriteGuard<'static>,
215 63413 : offset: u64,
216 63413 : ctx: &RequestContext,
217 63413 : ) -> Result<PageWriteGuard<'static>, Error> {
218 63413 : self.inner.read_exact_at_page(page, offset, ctx).await
219 63413 : }
220 :
221 13214 : pub async fn write_all_at<Buf: IoBufAligned + Send>(
222 13214 : &self,
223 13214 : buf: FullSlice<Buf>,
224 13214 : offset: u64,
225 13214 : ctx: &RequestContext,
226 13214 : ) -> (FullSlice<Buf>, Result<(), Error>) {
227 13214 : self.inner.write_all_at(buf, offset, ctx).await
228 13214 : }
229 :
230 2260692 : pub async fn write_all<Buf: IoBuf + Send>(
231 2260692 : &mut self,
232 2260692 : buf: FullSlice<Buf>,
233 2260692 : ctx: &RequestContext,
234 2260692 : ) -> (FullSlice<Buf>, Result<usize, Error>) {
235 2260692 : self.inner.write_all(buf, ctx).await
236 2260692 : }
237 :
238 448 : async fn read_to_end(&mut self, buf: &mut Vec<u8>, ctx: &RequestContext) -> Result<(), Error> {
239 448 : self.inner.read_to_end(buf, ctx).await
240 448 : }
241 :
242 0 : pub(crate) async fn read_to_string(
243 0 : &mut self,
244 0 : ctx: &RequestContext,
245 0 : ) -> Result<String, anyhow::Error> {
246 0 : let mut buf = Vec::new();
247 0 : self.read_to_end(&mut buf, ctx).await?;
248 0 : Ok(String::from_utf8(buf)?)
249 0 : }
250 : }
251 :
252 : /// Indicates whether to enable fsync, fdatasync, or O_SYNC/O_DSYNC when writing
253 : /// files. Switching this off is unsafe and only used for testing on machines
254 : /// with slow drives.
255 : #[repr(u8)]
256 : pub enum SyncMode {
257 : Sync,
258 : UnsafeNoSync,
259 : }
260 :
261 : impl TryFrom<u8> for SyncMode {
262 : type Error = u8;
263 :
264 0 : fn try_from(value: u8) -> Result<Self, Self::Error> {
265 0 : Ok(match value {
266 0 : v if v == (SyncMode::Sync as u8) => SyncMode::Sync,
267 0 : v if v == (SyncMode::UnsafeNoSync as u8) => SyncMode::UnsafeNoSync,
268 0 : x => return Err(x),
269 : })
270 0 : }
271 : }
272 :
273 : ///
274 : /// A virtual file descriptor. You can use this just like std::fs::File, but internally
275 : /// the underlying file is closed if the system is low on file descriptors,
276 : /// and re-opened when it's accessed again.
277 : ///
278 : /// Like with std::fs::File, multiple threads can read/write the file concurrently,
279 : /// holding just a shared reference the same VirtualFile, using the read_at() / write_at()
280 : /// functions from the FileExt trait. But the functions from the Read/Write/Seek traits
281 : /// require a mutable reference, because they modify the "current position".
282 : ///
283 : /// Each VirtualFile has a physical file descriptor in the global OPEN_FILES array, at the
284 : /// slot that 'handle points to, if the underlying file is currently open. If it's not
285 : /// currently open, the 'handle' can still point to the slot where it was last kept. The
286 : /// 'tag' field is used to detect whether the handle still is valid or not.
287 : ///
288 : #[derive(Debug)]
289 : pub struct VirtualFileInner {
290 : /// Lazy handle to the global file descriptor cache. The slot that this points to
291 : /// might contain our File, or it may be empty, or it may contain a File that
292 : /// belongs to a different VirtualFile.
293 : handle: RwLock<SlotHandle>,
294 :
295 : /// Current file position
296 : pos: u64,
297 :
298 : /// File path and options to use to open it.
299 : ///
300 : /// Note: this only contains the options needed to re-open it. For example,
301 : /// if a new file is created, we only pass the create flag when it's initially
302 : /// opened, in the VirtualFile::create() function, and strip the flag before
303 : /// storing it here.
304 : pub path: Utf8PathBuf,
305 : open_options: OpenOptions,
306 :
307 : // These are strings becase we only use them for metrics, and those expect strings.
308 : // It makes no sense for us to constantly turn the `TimelineId` and `TenantId` into
309 : // strings.
310 : tenant_id: String,
311 : shard_id: String,
312 : timeline_id: String,
313 : }
314 :
315 : #[derive(Debug, PartialEq, Clone, Copy)]
316 : struct SlotHandle {
317 : /// Index into OPEN_FILES.slots
318 : index: usize,
319 :
320 : /// Value of 'tag' in the slot. If slot's tag doesn't match, then the slot has
321 : /// been recycled and no longer contains the FD for this virtual file.
322 : tag: u64,
323 : }
324 :
325 : /// OPEN_FILES is the global array that holds the physical file descriptors that
326 : /// are currently open. Each slot in the array is protected by a separate lock,
327 : /// so that different files can be accessed independently. The lock must be held
328 : /// in write mode to replace the slot with a different file, but a read mode
329 : /// is enough to operate on the file, whether you're reading or writing to it.
330 : ///
331 : /// OPEN_FILES starts in uninitialized state, and it's initialized by
332 : /// the virtual_file::init() function. It must be called exactly once at page
333 : /// server startup.
334 : static OPEN_FILES: OnceCell<OpenFiles> = OnceCell::new();
335 :
336 : struct OpenFiles {
337 : slots: &'static [Slot],
338 :
339 : /// clock arm for the clock algorithm
340 : next: AtomicUsize,
341 : }
342 :
343 : struct Slot {
344 : inner: RwLock<SlotInner>,
345 :
346 : /// has this file been used since last clock sweep?
347 : recently_used: AtomicBool,
348 : }
349 :
350 : struct SlotInner {
351 : /// Counter that's incremented every time a different file is stored here.
352 : /// To avoid the ABA problem.
353 : tag: u64,
354 :
355 : /// the underlying file
356 : file: Option<OwnedFd>,
357 : }
358 :
359 : /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`].
360 : struct PageWriteGuardBuf {
361 : page: PageWriteGuard<'static>,
362 : }
363 : // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot,
364 : // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved.
365 : // Page cache pages are zero-initialized, so, wrt uninitialized memory we're good.
366 : // (Page cache tracks separately whether the contents are valid, see `PageWriteGuard::mark_valid`.)
367 : unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf {
368 253671 : fn stable_ptr(&self) -> *const u8 {
369 253671 : self.page.as_ptr()
370 253671 : }
371 475626 : fn bytes_init(&self) -> usize {
372 475626 : self.page.len()
373 475626 : }
374 190239 : fn bytes_total(&self) -> usize {
375 190239 : self.page.len()
376 190239 : }
377 : }
378 : // Safety: see above, plus: the ownership of [`PageWriteGuard`] means exclusive access,
379 : // hence it's safe to hand out the `stable_mut_ptr()`.
380 : unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf {
381 95129 : fn stable_mut_ptr(&mut self) -> *mut u8 {
382 95129 : self.page.as_mut_ptr()
383 95129 : }
384 :
385 63413 : unsafe fn set_init(&mut self, pos: usize) {
386 63413 : // There shouldn't really be any reason to call this API since bytes_init() == bytes_total().
387 63413 : assert!(pos <= self.page.len());
388 63413 : }
389 : }
390 :
391 : impl OpenFiles {
392 : /// Find a slot to use, evicting an existing file descriptor if needed.
393 : ///
394 : /// On return, we hold a lock on the slot, and its 'tag' has been updated
395 : /// recently_used has been set. It's all ready for reuse.
396 390607 : async fn find_victim_slot(&self) -> (SlotHandle, RwLockWriteGuard<SlotInner>) {
397 390607 : //
398 390607 : // Run the clock algorithm to find a slot to replace.
399 390607 : //
400 390607 : let num_slots = self.slots.len();
401 390607 : let mut retries = 0;
402 : let mut slot;
403 : let mut slot_guard;
404 : let index;
405 : loop {
406 5124050 : let next = self.next.fetch_add(1, Ordering::AcqRel) % num_slots;
407 5124050 : slot = &self.slots[next];
408 5124050 :
409 5124050 : // If the recently_used flag on this slot is set, continue the clock
410 5124050 : // sweep. Otherwise try to use this slot. If we cannot acquire the
411 5124050 : // lock, also continue the clock sweep.
412 5124050 : //
413 5124050 : // We only continue in this manner for a while, though. If we loop
414 5124050 : // through the array twice without finding a victim, just pick the
415 5124050 : // next slot and wait until we can reuse it. This way, we avoid
416 5124050 : // spinning in the extreme case that all the slots are busy with an
417 5124050 : // I/O operation.
418 5124050 : if retries < num_slots * 2 {
419 4921980 : if !slot.recently_used.swap(false, Ordering::Release) {
420 4505418 : if let Ok(guard) = slot.inner.try_write() {
421 188537 : slot_guard = guard;
422 188537 : index = next;
423 188537 : break;
424 4316881 : }
425 416562 : }
426 4733443 : retries += 1;
427 : } else {
428 202070 : slot_guard = slot.inner.write().await;
429 202070 : index = next;
430 202070 : break;
431 : }
432 : }
433 :
434 : //
435 : // We now have the victim slot locked. If it was in use previously, close the
436 : // old file.
437 : //
438 390607 : if let Some(old_file) = slot_guard.file.take() {
439 380672 : // the normal path of dropping VirtualFile uses "close", use "close-by-replace" here to
440 380672 : // distinguish the two.
441 380672 : STORAGE_IO_TIME_METRIC
442 380672 : .get(StorageIoOperation::CloseByReplace)
443 380672 : .observe_closure_duration(|| drop(old_file));
444 380672 : }
445 :
446 : // Prepare the slot for reuse and return it
447 390607 : slot_guard.tag += 1;
448 390607 : slot.recently_used.store(true, Ordering::Relaxed);
449 390607 : (
450 390607 : SlotHandle {
451 390607 : index,
452 390607 : tag: slot_guard.tag,
453 390607 : },
454 390607 : slot_guard,
455 390607 : )
456 390607 : }
457 : }
458 :
459 : /// Identify error types that should alwways terminate the process. Other
460 : /// error types may be elegible for retry.
461 4 : pub(crate) fn is_fatal_io_error(e: &std::io::Error) -> bool {
462 : use nix::errno::Errno::*;
463 4 : match e.raw_os_error().map(nix::errno::from_i32) {
464 : Some(EIO) => {
465 : // Terminate on EIO because we no longer trust the device to store
466 : // data safely, or to uphold persistence guarantees on fsync.
467 0 : true
468 : }
469 : Some(EROFS) => {
470 : // Terminate on EROFS because a filesystem is usually remounted
471 : // readonly when it has experienced some critical issue, so the same
472 : // logic as EIO applies.
473 0 : true
474 : }
475 : Some(EACCES) => {
476 : // Terminate on EACCESS because we should always have permissions
477 : // for our own data dir: if we don't, then we can't do our job and
478 : // need administrative intervention to fix permissions. Terminating
479 : // is the best way to make sure we stop cleanly rather than going
480 : // into infinite retry loops, and will make it clear to the outside
481 : // world that we need help.
482 0 : true
483 : }
484 : _ => {
485 : // Treat all other local file I/O errors are retryable. This includes:
486 : // - ENOSPC: we stay up and wait for eviction to free some space
487 : // - EINVAL, EBADF, EBADFD: this is a code bug, not a filesystem/hardware issue
488 : // - WriteZero, Interrupted: these are used internally VirtualFile
489 4 : false
490 : }
491 : }
492 4 : }
493 :
494 : /// Call this when the local filesystem gives us an error with an external
495 : /// cause: this includes EIO, EROFS, and EACCESS: all these indicate either
496 : /// bad storage or bad configuration, and we can't fix that from inside
497 : /// a running process.
498 0 : pub(crate) fn on_fatal_io_error(e: &std::io::Error, context: &str) -> ! {
499 0 : tracing::error!("Fatal I/O error: {e}: {context})");
500 0 : std::process::abort();
501 : }
502 :
503 : pub(crate) trait MaybeFatalIo<T> {
504 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T>;
505 : fn fatal_err(self, context: &str) -> T;
506 : }
507 :
508 : impl<T> MaybeFatalIo<T> for std::io::Result<T> {
509 : /// Terminate the process if the result is an error of a fatal type, else pass it through
510 : ///
511 : /// This is appropriate for writes, where we typically want to die on EIO/ACCES etc, but
512 : /// not on ENOSPC.
513 2283134 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T> {
514 2283134 : if let Err(e) = &self {
515 4 : if is_fatal_io_error(e) {
516 0 : on_fatal_io_error(e, context);
517 4 : }
518 2283130 : }
519 2283134 : self
520 2283134 : }
521 :
522 : /// Terminate the process on any I/O error.
523 : ///
524 : /// This is appropriate for reads on files that we know exist: they should always work.
525 4084 : fn fatal_err(self, context: &str) -> T {
526 4084 : match self {
527 4084 : Ok(v) => v,
528 0 : Err(e) => {
529 0 : on_fatal_io_error(&e, context);
530 : }
531 : }
532 4084 : }
533 : }
534 :
535 : /// Observe duration for the given storage I/O operation
536 : ///
537 : /// Unlike `observe_closure_duration`, this supports async,
538 : /// where "support" means that we measure wall clock time.
539 : macro_rules! observe_duration {
540 : ($op:expr, $($body:tt)*) => {{
541 : let instant = Instant::now();
542 : let result = $($body)*;
543 : let elapsed = instant.elapsed().as_secs_f64();
544 : STORAGE_IO_TIME_METRIC
545 : .get($op)
546 : .observe(elapsed);
547 : result
548 : }}
549 : }
550 :
551 : macro_rules! with_file {
552 : ($this:expr, $op:expr, | $ident:ident | $($body:tt)*) => {{
553 : let $ident = $this.lock_file().await?;
554 : observe_duration!($op, $($body)*)
555 : }};
556 : ($this:expr, $op:expr, | mut $ident:ident | $($body:tt)*) => {{
557 : let mut $ident = $this.lock_file().await?;
558 : observe_duration!($op, $($body)*)
559 : }};
560 : }
561 :
562 : impl VirtualFileInner {
563 : /// Open a file in read-only mode. Like File::open.
564 2096 : pub async fn open<P: AsRef<Utf8Path>>(
565 2096 : path: P,
566 2096 : ctx: &RequestContext,
567 2096 : ) -> Result<VirtualFileInner, std::io::Error> {
568 2096 : Self::open_with_options(path.as_ref(), OpenOptions::new().read(true), ctx).await
569 2096 : }
570 :
571 : /// Create a new file for writing. If the file exists, it will be truncated.
572 : /// Like File::create.
573 3018 : pub async fn create<P: AsRef<Utf8Path>>(
574 3018 : path: P,
575 3018 : ctx: &RequestContext,
576 3018 : ) -> Result<VirtualFileInner, std::io::Error> {
577 3018 : Self::open_with_options(
578 3018 : path.as_ref(),
579 3018 : OpenOptions::new().write(true).create(true).truncate(true),
580 3018 : ctx,
581 3018 : )
582 3018 : .await
583 3018 : }
584 :
585 : /// Open a file with given options.
586 : ///
587 : /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt,
588 : /// they will be applied also when the file is subsequently re-opened, not only
589 : /// on the first time. Make sure that's sane!
590 12158 : pub async fn open_with_options<P: AsRef<Utf8Path>>(
591 12158 : path: P,
592 12158 : open_options: &OpenOptions,
593 12158 : _ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
594 12158 : ) -> Result<VirtualFileInner, std::io::Error> {
595 12158 : let path_ref = path.as_ref();
596 12158 : let path_str = path_ref.to_string();
597 12158 : let parts = path_str.split('/').collect::<Vec<&str>>();
598 12158 : let (tenant_id, shard_id, timeline_id) =
599 12158 : if parts.len() > 5 && parts[parts.len() - 5] == TENANTS_SEGMENT_NAME {
600 9170 : let tenant_shard_part = parts[parts.len() - 4];
601 9170 : let (tenant_id, shard_id) = match tenant_shard_part.parse::<TenantShardId>() {
602 9170 : Ok(tenant_shard_id) => (
603 9170 : tenant_shard_id.tenant_id.to_string(),
604 9170 : format!("{}", tenant_shard_id.shard_slug()),
605 9170 : ),
606 : Err(_) => {
607 : // Malformed path: this ID is just for observability, so tolerate it
608 : // and pass through
609 0 : (tenant_shard_part.to_string(), "*".to_string())
610 : }
611 : };
612 9170 : (tenant_id, shard_id, parts[parts.len() - 2].to_string())
613 : } else {
614 2988 : ("*".to_string(), "*".to_string(), "*".to_string())
615 : };
616 12158 : let (handle, mut slot_guard) = get_open_files().find_victim_slot().await;
617 :
618 : // NB: there is also StorageIoOperation::OpenAfterReplace which is for the case
619 : // where our caller doesn't get to use the returned VirtualFile before its
620 : // slot gets re-used by someone else.
621 12158 : let file = observe_duration!(StorageIoOperation::Open, {
622 12158 : open_options.open(path_ref.as_std_path()).await?
623 : });
624 :
625 : // Strip all options other than read and write.
626 : //
627 : // It would perhaps be nicer to check just for the read and write flags
628 : // explicitly, but OpenOptions doesn't contain any functions to read flags,
629 : // only to set them.
630 12158 : let mut reopen_options = open_options.clone();
631 12158 : reopen_options.create(false);
632 12158 : reopen_options.create_new(false);
633 12158 : reopen_options.truncate(false);
634 12158 :
635 12158 : let vfile = VirtualFileInner {
636 12158 : handle: RwLock::new(handle),
637 12158 : pos: 0,
638 12158 : path: path_ref.to_path_buf(),
639 12158 : open_options: reopen_options,
640 12158 : tenant_id,
641 12158 : shard_id,
642 12158 : timeline_id,
643 12158 : };
644 12158 :
645 12158 : // TODO: Under pressure, it's likely the slot will get re-used and
646 12158 : // the underlying file closed before they get around to using it.
647 12158 : // => https://github.com/neondatabase/neon/issues/6065
648 12158 : slot_guard.file.replace(file);
649 12158 :
650 12158 : Ok(vfile)
651 12158 : }
652 :
653 : /// Async version of [`::utils::crashsafe::overwrite`].
654 : ///
655 : /// # NB:
656 : ///
657 : /// Doesn't actually use the [`VirtualFile`] file descriptor cache, but,
658 : /// it did at an earlier time.
659 : /// And it will use this module's [`io_engine`] in the near future, so, leaving it here.
660 56 : pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>(
661 56 : final_path: Utf8PathBuf,
662 56 : tmp_path: Utf8PathBuf,
663 56 : content: B,
664 56 : ) -> std::io::Result<()> {
665 56 : // TODO: use tokio_epoll_uring if configured as `io_engine`.
666 56 : // See https://github.com/neondatabase/neon/issues/6663
667 56 :
668 56 : tokio::task::spawn_blocking(move || {
669 56 : let slice_storage;
670 56 : let content_len = content.bytes_init();
671 56 : let content = if content.bytes_init() > 0 {
672 56 : slice_storage = Some(content.slice(0..content_len));
673 56 : slice_storage.as_deref().expect("just set it to Some()")
674 : } else {
675 0 : &[]
676 : };
677 56 : utils::crashsafe::overwrite(&final_path, &tmp_path, content)
678 56 : .maybe_fatal_err("crashsafe_overwrite")
679 56 : })
680 56 : .await
681 56 : .expect("blocking task is never aborted")
682 56 : }
683 :
684 : /// Call File::sync_all() on the underlying File.
685 5598 : pub async fn sync_all(&self) -> Result<(), Error> {
686 5598 : with_file!(self, StorageIoOperation::Fsync, |file_guard| {
687 5598 : let (_file_guard, res) = io_engine::get().sync_all(file_guard).await;
688 5598 : res.maybe_fatal_err("sync_all")
689 : })
690 5598 : }
691 :
692 : /// Call File::sync_data() on the underlying File.
693 0 : pub async fn sync_data(&self) -> Result<(), Error> {
694 0 : with_file!(self, StorageIoOperation::Fsync, |file_guard| {
695 0 : let (_file_guard, res) = io_engine::get().sync_data(file_guard).await;
696 0 : res.maybe_fatal_err("sync_data")
697 : })
698 0 : }
699 :
700 3592 : pub async fn metadata(&self) -> Result<Metadata, Error> {
701 3592 : with_file!(self, StorageIoOperation::Metadata, |file_guard| {
702 3592 : let (_file_guard, res) = io_engine::get().metadata(file_guard).await;
703 3592 : res
704 : })
705 3592 : }
706 :
707 : /// Helper function internal to `VirtualFile` that looks up the underlying File,
708 : /// opens it and evicts some other File if necessary. The passed parameter is
709 : /// assumed to be a function available for the physical `File`.
710 : ///
711 : /// We are doing it via a macro as Rust doesn't support async closures that
712 : /// take on parameters with lifetimes.
713 3250444 : async fn lock_file(&self) -> Result<FileGuard, Error> {
714 3250444 : let open_files = get_open_files();
715 :
716 378449 : let mut handle_guard = {
717 : // Read the cached slot handle, and see if the slot that it points to still
718 : // contains our File.
719 : //
720 : // We only need to hold the handle lock while we read the current handle. If
721 : // another thread closes the file and recycles the slot for a different file,
722 : // we will notice that the handle we read is no longer valid and retry.
723 3250444 : let mut handle = *self.handle.read().await;
724 : loop {
725 : // Check if the slot contains our File
726 : {
727 3458583 : let slot = &open_files.slots[handle.index];
728 3458583 : let slot_guard = slot.inner.read().await;
729 3458583 : if slot_guard.tag == handle.tag && slot_guard.file.is_some() {
730 : // Found a cached file descriptor.
731 2871995 : slot.recently_used.store(true, Ordering::Relaxed);
732 2871995 : return Ok(FileGuard { slot_guard });
733 586588 : }
734 : }
735 :
736 : // The slot didn't contain our File. We will have to open it ourselves,
737 : // but before that, grab a write lock on handle in the VirtualFile, so
738 : // that no other thread will try to concurrently open the same file.
739 586588 : let handle_guard = self.handle.write().await;
740 :
741 : // If another thread changed the handle while we were not holding the lock,
742 : // then the handle might now be valid again. Loop back to retry.
743 586588 : if *handle_guard != handle {
744 208139 : handle = *handle_guard;
745 208139 : continue;
746 378449 : }
747 378449 : break handle_guard;
748 : }
749 : };
750 :
751 : // We need to open the file ourselves. The handle in the VirtualFile is
752 : // now locked in write-mode. Find a free slot to put it in.
753 378449 : let (handle, mut slot_guard) = open_files.find_victim_slot().await;
754 :
755 : // Re-open the physical file.
756 : // NB: we use StorageIoOperation::OpenAferReplace for this to distinguish this
757 : // case from StorageIoOperation::Open. This helps with identifying thrashing
758 : // of the virtual file descriptor cache.
759 378449 : let file = observe_duration!(StorageIoOperation::OpenAfterReplace, {
760 378449 : self.open_options.open(self.path.as_std_path()).await?
761 : });
762 :
763 : // Store the File in the slot and update the handle in the VirtualFile
764 : // to point to it.
765 378449 : slot_guard.file.replace(file);
766 378449 :
767 378449 : *handle_guard = handle;
768 378449 :
769 378449 : Ok(FileGuard {
770 378449 : slot_guard: slot_guard.downgrade(),
771 378449 : })
772 3250444 : }
773 :
774 512 : pub fn remove(self) {
775 512 : let path = self.path.clone();
776 512 : drop(self);
777 512 : std::fs::remove_file(path).expect("failed to remove the virtual file");
778 512 : }
779 :
780 11312 : pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
781 11312 : match pos {
782 11292 : SeekFrom::Start(offset) => {
783 11292 : self.pos = offset;
784 11292 : }
785 8 : SeekFrom::End(offset) => {
786 8 : self.pos = with_file!(self, StorageIoOperation::Seek, |mut file_guard| file_guard
787 8 : .with_std_file_mut(|std_file| std_file.seek(SeekFrom::End(offset))))?
788 : }
789 12 : SeekFrom::Current(offset) => {
790 12 : let pos = self.pos as i128 + offset as i128;
791 12 : if pos < 0 {
792 4 : return Err(Error::new(
793 4 : ErrorKind::InvalidInput,
794 4 : "offset would be negative",
795 4 : ));
796 8 : }
797 8 : if pos > u64::MAX as i128 {
798 0 : return Err(Error::new(ErrorKind::InvalidInput, "offset overflow"));
799 8 : }
800 8 : self.pos = pos as u64;
801 : }
802 : }
803 11304 : Ok(self.pos)
804 11312 : }
805 :
806 : /// Read the file contents in range `offset..(offset + slice.bytes_total())` into `slice[0..slice.bytes_total()]`.
807 : ///
808 : /// The returned `Slice<Buf>` is equivalent to the input `slice`, i.e., it's the same view into the same buffer.
809 966528 : pub async fn read_exact_at<Buf>(
810 966528 : &self,
811 966528 : slice: Slice<Buf>,
812 966528 : offset: u64,
813 966528 : ctx: &RequestContext,
814 966528 : ) -> Result<Slice<Buf>, Error>
815 966528 : where
816 966528 : Buf: IoBufAlignedMut + Send,
817 966528 : {
818 966528 : let assert_we_return_original_bounds = if cfg!(debug_assertions) {
819 966528 : Some((slice.stable_ptr() as usize, slice.bytes_total()))
820 : } else {
821 0 : None
822 : };
823 :
824 966528 : let original_bounds = slice.bounds();
825 966528 : let (buf, res) =
826 966528 : read_exact_at_impl(slice, offset, |buf, offset| self.read_at(buf, offset, ctx)).await;
827 966528 : let res = res.map(|_| buf.slice(original_bounds));
828 :
829 966528 : if let Some(original_bounds) = assert_we_return_original_bounds {
830 966528 : if let Ok(slice) = &res {
831 966528 : let returned_bounds = (slice.stable_ptr() as usize, slice.bytes_total());
832 966528 : assert_eq!(original_bounds, returned_bounds);
833 0 : }
834 0 : }
835 :
836 966528 : res
837 966528 : }
838 :
839 : /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`].
840 63413 : pub async fn read_exact_at_page(
841 63413 : &self,
842 63413 : page: PageWriteGuard<'static>,
843 63413 : offset: u64,
844 63413 : ctx: &RequestContext,
845 63413 : ) -> Result<PageWriteGuard<'static>, Error> {
846 63413 : let buf = PageWriteGuardBuf { page }.slice_full();
847 63413 : debug_assert_eq!(buf.bytes_total(), PAGE_SZ);
848 63413 : self.read_exact_at(buf, offset, ctx)
849 63413 : .await
850 63413 : .map(|slice| slice.into_inner().page)
851 63413 : }
852 :
853 : // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235
854 13214 : pub async fn write_all_at<Buf: IoBuf + Send>(
855 13214 : &self,
856 13214 : buf: FullSlice<Buf>,
857 13214 : mut offset: u64,
858 13214 : ctx: &RequestContext,
859 13214 : ) -> (FullSlice<Buf>, Result<(), Error>) {
860 13214 : let buf = buf.into_raw_slice();
861 13214 : let bounds = buf.bounds();
862 13214 : let restore =
863 13214 : |buf: Slice<_>| FullSlice::must_new(Slice::from_buf_bounds(buf.into_inner(), bounds));
864 13214 : let mut buf = buf;
865 26428 : while !buf.is_empty() {
866 13214 : let (tmp, res) = self.write_at(FullSlice::must_new(buf), offset, ctx).await;
867 13214 : buf = tmp.into_raw_slice();
868 0 : match res {
869 : Ok(0) => {
870 0 : return (
871 0 : restore(buf),
872 0 : Err(Error::new(
873 0 : std::io::ErrorKind::WriteZero,
874 0 : "failed to write whole buffer",
875 0 : )),
876 0 : );
877 : }
878 13214 : Ok(n) => {
879 13214 : buf = buf.slice(n..);
880 13214 : offset += n as u64;
881 13214 : }
882 0 : Err(e) if e.kind() == std::io::ErrorKind::Interrupted => {}
883 0 : Err(e) => return (restore(buf), Err(e)),
884 : }
885 : }
886 13214 : (restore(buf), Ok(()))
887 13214 : }
888 :
889 : /// Writes `buf` to the file at the current offset.
890 : ///
891 : /// Panics if there is an uninitialized range in `buf`, as that is most likely a bug in the caller.
892 2260692 : pub async fn write_all<Buf: IoBuf + Send>(
893 2260692 : &mut self,
894 2260692 : buf: FullSlice<Buf>,
895 2260692 : ctx: &RequestContext,
896 2260692 : ) -> (FullSlice<Buf>, Result<usize, Error>) {
897 2260692 : let buf = buf.into_raw_slice();
898 2260692 : let bounds = buf.bounds();
899 2260692 : let restore =
900 2260692 : |buf: Slice<_>| FullSlice::must_new(Slice::from_buf_bounds(buf.into_inner(), bounds));
901 2260692 : let nbytes = buf.len();
902 2260692 : let mut buf = buf;
903 4521304 : while !buf.is_empty() {
904 2260616 : let (tmp, res) = self.write(FullSlice::must_new(buf), ctx).await;
905 2260616 : buf = tmp.into_raw_slice();
906 4 : match res {
907 : Ok(0) => {
908 0 : return (
909 0 : restore(buf),
910 0 : Err(Error::new(
911 0 : std::io::ErrorKind::WriteZero,
912 0 : "failed to write whole buffer",
913 0 : )),
914 0 : );
915 : }
916 2260612 : Ok(n) => {
917 2260612 : buf = buf.slice(n..);
918 2260612 : }
919 4 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
920 4 : Err(e) => return (restore(buf), Err(e)),
921 : }
922 : }
923 2260688 : (restore(buf), Ok(nbytes))
924 2260692 : }
925 :
926 2260616 : async fn write<B: IoBuf + Send>(
927 2260616 : &mut self,
928 2260616 : buf: FullSlice<B>,
929 2260616 : ctx: &RequestContext,
930 2260616 : ) -> (FullSlice<B>, Result<usize, std::io::Error>) {
931 2260616 : let pos = self.pos;
932 2260616 : let (buf, res) = self.write_at(buf, pos, ctx).await;
933 2260616 : let n = match res {
934 2260612 : Ok(n) => n,
935 4 : Err(e) => return (buf, Err(e)),
936 : };
937 2260612 : self.pos += n as u64;
938 2260612 : (buf, Ok(n))
939 2260616 : }
940 :
941 967416 : pub(crate) async fn read_at<Buf>(
942 967416 : &self,
943 967416 : buf: tokio_epoll_uring::Slice<Buf>,
944 967416 : offset: u64,
945 967416 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
946 967416 : ) -> (tokio_epoll_uring::Slice<Buf>, Result<usize, Error>)
947 967416 : where
948 967416 : Buf: tokio_epoll_uring::IoBufMut + Send,
949 967416 : {
950 967416 : let file_guard = match self.lock_file().await {
951 967416 : Ok(file_guard) => file_guard,
952 0 : Err(e) => return (buf, Err(e)),
953 : };
954 :
955 967416 : observe_duration!(StorageIoOperation::Read, {
956 967416 : let ((_file_guard, buf), res) = io_engine::get().read_at(file_guard, offset, buf).await;
957 967416 : if let Ok(size) = res {
958 967412 : STORAGE_IO_SIZE
959 967412 : .with_label_values(&[
960 967412 : "read",
961 967412 : &self.tenant_id,
962 967412 : &self.shard_id,
963 967412 : &self.timeline_id,
964 967412 : ])
965 967412 : .add(size as i64);
966 967412 : }
967 967416 : (buf, res)
968 : })
969 967416 : }
970 :
971 : /// The function aborts the process if the error is fatal.
972 2273830 : async fn write_at<B: IoBuf + Send>(
973 2273830 : &self,
974 2273830 : buf: FullSlice<B>,
975 2273830 : offset: u64,
976 2273830 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
977 2273830 : ) -> (FullSlice<B>, Result<usize, Error>) {
978 2273830 : let (slice, result) = self.write_at_inner(buf, offset, _ctx).await;
979 2273830 : let result = result.maybe_fatal_err("write_at");
980 2273830 : (slice, result)
981 2273830 : }
982 :
983 2273830 : async fn write_at_inner<B: IoBuf + Send>(
984 2273830 : &self,
985 2273830 : buf: FullSlice<B>,
986 2273830 : offset: u64,
987 2273830 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
988 2273830 : ) -> (FullSlice<B>, Result<usize, Error>) {
989 2273830 : let file_guard = match self.lock_file().await {
990 2273830 : Ok(file_guard) => file_guard,
991 0 : Err(e) => return (buf, Err(e)),
992 : };
993 2273830 : observe_duration!(StorageIoOperation::Write, {
994 2273830 : let ((_file_guard, buf), result) =
995 2273830 : io_engine::get().write_at(file_guard, offset, buf).await;
996 2273830 : if let Ok(size) = result {
997 2273826 : STORAGE_IO_SIZE
998 2273826 : .with_label_values(&[
999 2273826 : "write",
1000 2273826 : &self.tenant_id,
1001 2273826 : &self.shard_id,
1002 2273826 : &self.timeline_id,
1003 2273826 : ])
1004 2273826 : .add(size as i64);
1005 2273826 : }
1006 2273830 : (buf, result)
1007 : })
1008 2273830 : }
1009 :
1010 448 : async fn read_to_end(&mut self, buf: &mut Vec<u8>, ctx: &RequestContext) -> Result<(), Error> {
1011 448 : let mut tmp = vec![0; 128];
1012 : loop {
1013 888 : let slice = tmp.slice(..128);
1014 888 : let (slice, res) = self.read_at(slice, self.pos, ctx).await;
1015 4 : match res {
1016 444 : Ok(0) => return Ok(()),
1017 440 : Ok(n) => {
1018 440 : self.pos += n as u64;
1019 440 : buf.extend_from_slice(&slice[..n]);
1020 440 : }
1021 4 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
1022 4 : Err(e) => return Err(e),
1023 : }
1024 440 : tmp = slice.into_inner();
1025 : }
1026 448 : }
1027 : }
1028 :
1029 : // Adapted from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135
1030 966544 : pub async fn read_exact_at_impl<Buf, F, Fut>(
1031 966544 : mut buf: tokio_epoll_uring::Slice<Buf>,
1032 966544 : mut offset: u64,
1033 966544 : mut read_at: F,
1034 966544 : ) -> (Buf, std::io::Result<()>)
1035 966544 : where
1036 966544 : Buf: IoBufMut + Send,
1037 966544 : F: FnMut(tokio_epoll_uring::Slice<Buf>, u64) -> Fut,
1038 966544 : Fut: std::future::Future<Output = (tokio_epoll_uring::Slice<Buf>, std::io::Result<usize>)>,
1039 966544 : {
1040 1933092 : while buf.bytes_total() != 0 {
1041 : let res;
1042 966552 : (buf, res) = read_at(buf, offset).await;
1043 0 : match res {
1044 4 : Ok(0) => break,
1045 966548 : Ok(n) => {
1046 966548 : buf = buf.slice(n..);
1047 966548 : offset += n as u64;
1048 966548 : }
1049 0 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
1050 0 : Err(e) => return (buf.into_inner(), Err(e)),
1051 : }
1052 : }
1053 : // NB: don't use `buf.is_empty()` here; it is from the
1054 : // `impl Deref for Slice { Target = [u8] }`; the &[u8]
1055 : // returned by it only covers the initialized portion of `buf`.
1056 : // Whereas we're interested in ensuring that we filled the entire
1057 : // buffer that the user passed in.
1058 966544 : if buf.bytes_total() != 0 {
1059 4 : (
1060 4 : buf.into_inner(),
1061 4 : Err(std::io::Error::new(
1062 4 : std::io::ErrorKind::UnexpectedEof,
1063 4 : "failed to fill whole buffer",
1064 4 : )),
1065 4 : )
1066 : } else {
1067 966540 : assert_eq!(buf.len(), buf.bytes_total());
1068 966540 : (buf.into_inner(), Ok(()))
1069 : }
1070 966544 : }
1071 :
1072 : #[cfg(test)]
1073 : mod test_read_exact_at_impl {
1074 :
1075 : use std::{collections::VecDeque, sync::Arc};
1076 :
1077 : use tokio_epoll_uring::{BoundedBuf, BoundedBufMut};
1078 :
1079 : use super::read_exact_at_impl;
1080 :
1081 : struct Expectation {
1082 : offset: u64,
1083 : bytes_total: usize,
1084 : result: std::io::Result<Vec<u8>>,
1085 : }
1086 : struct MockReadAt {
1087 : expectations: VecDeque<Expectation>,
1088 : }
1089 :
1090 : impl MockReadAt {
1091 24 : async fn read_at(
1092 24 : &mut self,
1093 24 : mut buf: tokio_epoll_uring::Slice<Vec<u8>>,
1094 24 : offset: u64,
1095 24 : ) -> (tokio_epoll_uring::Slice<Vec<u8>>, std::io::Result<usize>) {
1096 24 : let exp = self
1097 24 : .expectations
1098 24 : .pop_front()
1099 24 : .expect("read_at called but we have no expectations left");
1100 24 : assert_eq!(exp.offset, offset);
1101 24 : assert_eq!(exp.bytes_total, buf.bytes_total());
1102 24 : match exp.result {
1103 24 : Ok(bytes) => {
1104 24 : assert!(bytes.len() <= buf.bytes_total());
1105 24 : buf.put_slice(&bytes);
1106 24 : (buf, Ok(bytes.len()))
1107 : }
1108 0 : Err(e) => (buf, Err(e)),
1109 : }
1110 24 : }
1111 : }
1112 :
1113 : impl Drop for MockReadAt {
1114 16 : fn drop(&mut self) {
1115 16 : assert_eq!(self.expectations.len(), 0);
1116 16 : }
1117 : }
1118 :
1119 : #[tokio::test]
1120 4 : async fn test_basic() {
1121 4 : let buf = Vec::with_capacity(5).slice_full();
1122 4 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1123 4 : expectations: VecDeque::from(vec![Expectation {
1124 4 : offset: 0,
1125 4 : bytes_total: 5,
1126 4 : result: Ok(vec![b'a', b'b', b'c', b'd', b'e']),
1127 4 : }]),
1128 4 : }));
1129 4 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1130 4 : let mock_read_at = Arc::clone(&mock_read_at);
1131 4 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1132 4 : })
1133 4 : .await;
1134 4 : assert!(res.is_ok());
1135 4 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd', b'e']);
1136 4 : }
1137 :
1138 : #[tokio::test]
1139 4 : async fn test_empty_buf_issues_no_syscall() {
1140 4 : let buf = Vec::new().slice_full();
1141 4 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1142 4 : expectations: VecDeque::new(),
1143 4 : }));
1144 4 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1145 0 : let mock_read_at = Arc::clone(&mock_read_at);
1146 4 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1147 4 : })
1148 4 : .await;
1149 4 : assert!(res.is_ok());
1150 4 : }
1151 :
1152 : #[tokio::test]
1153 4 : async fn test_two_read_at_calls_needed_until_buf_filled() {
1154 4 : let buf = Vec::with_capacity(4).slice_full();
1155 4 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1156 4 : expectations: VecDeque::from(vec![
1157 4 : Expectation {
1158 4 : offset: 0,
1159 4 : bytes_total: 4,
1160 4 : result: Ok(vec![b'a', b'b']),
1161 4 : },
1162 4 : Expectation {
1163 4 : offset: 2,
1164 4 : bytes_total: 2,
1165 4 : result: Ok(vec![b'c', b'd']),
1166 4 : },
1167 4 : ]),
1168 4 : }));
1169 8 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1170 8 : let mock_read_at = Arc::clone(&mock_read_at);
1171 8 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1172 8 : })
1173 4 : .await;
1174 4 : assert!(res.is_ok());
1175 4 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd']);
1176 4 : }
1177 :
1178 : #[tokio::test]
1179 4 : async fn test_eof_before_buffer_full() {
1180 4 : let buf = Vec::with_capacity(3).slice_full();
1181 4 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1182 4 : expectations: VecDeque::from(vec![
1183 4 : Expectation {
1184 4 : offset: 0,
1185 4 : bytes_total: 3,
1186 4 : result: Ok(vec![b'a']),
1187 4 : },
1188 4 : Expectation {
1189 4 : offset: 1,
1190 4 : bytes_total: 2,
1191 4 : result: Ok(vec![b'b']),
1192 4 : },
1193 4 : Expectation {
1194 4 : offset: 2,
1195 4 : bytes_total: 1,
1196 4 : result: Ok(vec![]),
1197 4 : },
1198 4 : ]),
1199 4 : }));
1200 12 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1201 12 : let mock_read_at = Arc::clone(&mock_read_at);
1202 12 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1203 12 : })
1204 4 : .await;
1205 4 : let Err(err) = res else {
1206 4 : panic!("should return an error");
1207 4 : };
1208 4 : assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof);
1209 4 : assert_eq!(format!("{err}"), "failed to fill whole buffer");
1210 4 : // buffer contents on error are unspecified
1211 4 : }
1212 : }
1213 :
1214 : struct FileGuard {
1215 : slot_guard: RwLockReadGuard<'static, SlotInner>,
1216 : }
1217 :
1218 : impl AsRef<OwnedFd> for FileGuard {
1219 3250444 : fn as_ref(&self) -> &OwnedFd {
1220 3250444 : // This unwrap is safe because we only create `FileGuard`s
1221 3250444 : // if we know that the file is Some.
1222 3250444 : self.slot_guard.file.as_ref().unwrap()
1223 3250444 : }
1224 : }
1225 :
1226 : impl FileGuard {
1227 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
1228 1625390 : fn with_std_file<F, R>(&self, with: F) -> R
1229 1625390 : where
1230 1625390 : F: FnOnce(&File) -> R,
1231 1625390 : {
1232 1625390 : // SAFETY:
1233 1625390 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
1234 1625390 : // - `&` usage below: `self` is `&`, hence Rust typesystem guarantees there are is no `&mut`
1235 1625390 : let file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
1236 1625390 : let res = with(&file);
1237 1625390 : let _ = file.into_raw_fd();
1238 1625390 : res
1239 1625390 : }
1240 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
1241 8 : fn with_std_file_mut<F, R>(&mut self, with: F) -> R
1242 8 : where
1243 8 : F: FnOnce(&mut File) -> R,
1244 8 : {
1245 8 : // SAFETY:
1246 8 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
1247 8 : // - &mut usage below: `self` is `&mut`, hence this call is the only task/thread that has control over the underlying fd
1248 8 : let mut file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
1249 8 : let res = with(&mut file);
1250 8 : let _ = file.into_raw_fd();
1251 8 : res
1252 8 : }
1253 : }
1254 :
1255 : impl tokio_epoll_uring::IoFd for FileGuard {
1256 1625046 : unsafe fn as_fd(&self) -> RawFd {
1257 1625046 : let owned_fd: &OwnedFd = self.as_ref();
1258 1625046 : owned_fd.as_raw_fd()
1259 1625046 : }
1260 : }
1261 :
1262 : #[cfg(test)]
1263 : impl VirtualFile {
1264 41832 : pub(crate) async fn read_blk(
1265 41832 : &self,
1266 41832 : blknum: u32,
1267 41832 : ctx: &RequestContext,
1268 41832 : ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
1269 41832 : self.inner.read_blk(blknum, ctx).await
1270 41832 : }
1271 : }
1272 :
1273 : #[cfg(test)]
1274 : impl VirtualFileInner {
1275 41832 : pub(crate) async fn read_blk(
1276 41832 : &self,
1277 41832 : blknum: u32,
1278 41832 : ctx: &RequestContext,
1279 41832 : ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
1280 : use crate::page_cache::PAGE_SZ;
1281 41832 : let slice = IoBufferMut::with_capacity(PAGE_SZ).slice_full();
1282 41832 : assert_eq!(slice.bytes_total(), PAGE_SZ);
1283 41832 : let slice = self
1284 41832 : .read_exact_at(slice, blknum as u64 * (PAGE_SZ as u64), ctx)
1285 41832 : .await?;
1286 41832 : Ok(crate::tenant::block_io::BlockLease::IoBufferMut(
1287 41832 : slice.into_inner(),
1288 41832 : ))
1289 41832 : }
1290 : }
1291 :
1292 : impl Drop for VirtualFileInner {
1293 : /// If a VirtualFile is dropped, close the underlying file if it was open.
1294 10543 : fn drop(&mut self) {
1295 10543 : let handle = self.handle.get_mut();
1296 :
1297 10543 : fn clean_slot(slot: &Slot, mut slot_guard: RwLockWriteGuard<'_, SlotInner>, tag: u64) {
1298 10543 : if slot_guard.tag == tag {
1299 9388 : slot.recently_used.store(false, Ordering::Relaxed);
1300 : // there is also operation "close-by-replace" for closes done on eviction for
1301 : // comparison.
1302 9388 : if let Some(fd) = slot_guard.file.take() {
1303 9388 : STORAGE_IO_TIME_METRIC
1304 9388 : .get(StorageIoOperation::Close)
1305 9388 : .observe_closure_duration(|| drop(fd));
1306 9388 : }
1307 1155 : }
1308 10543 : }
1309 :
1310 : // We don't have async drop so we cannot directly await the lock here.
1311 : // Instead, first do a best-effort attempt at closing the underlying
1312 : // file descriptor by using `try_write`, and if that fails, spawn
1313 : // a tokio task to do it asynchronously: we just want it to be
1314 : // cleaned up eventually.
1315 : // Most of the time, the `try_lock` should succeed though,
1316 : // as we have `&mut self` access. In other words, if the slot
1317 : // is still occupied by our file, there should be no access from
1318 : // other I/O operations; the only other possible place to lock
1319 : // the slot is the lock algorithm looking for free slots.
1320 10543 : let slot = &get_open_files().slots[handle.index];
1321 10543 : if let Ok(slot_guard) = slot.inner.try_write() {
1322 10543 : clean_slot(slot, slot_guard, handle.tag);
1323 10543 : } else {
1324 0 : let tag = handle.tag;
1325 0 : tokio::spawn(async move {
1326 0 : let slot_guard = slot.inner.write().await;
1327 0 : clean_slot(slot, slot_guard, tag);
1328 0 : });
1329 0 : };
1330 10543 : }
1331 : }
1332 :
1333 : impl OwnedAsyncWriter for VirtualFile {
1334 13206 : async fn write_all_at<Buf: IoBufAligned + Send>(
1335 13206 : &self,
1336 13206 : buf: FullSlice<Buf>,
1337 13206 : offset: u64,
1338 13206 : ctx: &RequestContext,
1339 13206 : ) -> std::io::Result<FullSlice<Buf>> {
1340 13206 : let (buf, res) = VirtualFile::write_all_at(self, buf, offset, ctx).await;
1341 13206 : res.map(|_| buf)
1342 13206 : }
1343 : }
1344 :
1345 : impl OpenFiles {
1346 460 : fn new(num_slots: usize) -> OpenFiles {
1347 460 : let mut slots = Box::new(Vec::with_capacity(num_slots));
1348 4600 : for _ in 0..num_slots {
1349 4600 : let slot = Slot {
1350 4600 : recently_used: AtomicBool::new(false),
1351 4600 : inner: RwLock::new(SlotInner { tag: 0, file: None }),
1352 4600 : };
1353 4600 : slots.push(slot);
1354 4600 : }
1355 :
1356 460 : OpenFiles {
1357 460 : next: AtomicUsize::new(0),
1358 460 : slots: Box::leak(slots),
1359 460 : }
1360 460 : }
1361 : }
1362 :
1363 : ///
1364 : /// Initialize the virtual file module. This must be called once at page
1365 : /// server startup.
1366 : ///
1367 : #[cfg(not(test))]
1368 0 : pub fn init(num_slots: usize, engine: IoEngineKind, mode: IoMode, sync_mode: SyncMode) {
1369 0 : if OPEN_FILES.set(OpenFiles::new(num_slots)).is_err() {
1370 0 : panic!("virtual_file::init called twice");
1371 0 : }
1372 0 : set_io_mode(mode);
1373 0 : io_engine::init(engine);
1374 0 : SYNC_MODE.store(sync_mode as u8, std::sync::atomic::Ordering::Relaxed);
1375 0 : crate::metrics::virtual_file_descriptor_cache::SIZE_MAX.set(num_slots as u64);
1376 0 : }
1377 :
1378 : const TEST_MAX_FILE_DESCRIPTORS: usize = 10;
1379 :
1380 : // Get a handle to the global slots array.
1381 3273145 : fn get_open_files() -> &'static OpenFiles {
1382 3273145 : //
1383 3273145 : // In unit tests, page server startup doesn't happen and no one calls
1384 3273145 : // virtual_file::init(). Initialize it here, with a small array.
1385 3273145 : //
1386 3273145 : // This applies to the virtual file tests below, but all other unit
1387 3273145 : // tests too, so the virtual file facility is always usable in
1388 3273145 : // unit tests.
1389 3273145 : //
1390 3273145 : if cfg!(test) {
1391 3273145 : OPEN_FILES.get_or_init(|| OpenFiles::new(TEST_MAX_FILE_DESCRIPTORS))
1392 : } else {
1393 0 : OPEN_FILES.get().expect("virtual_file::init not called yet")
1394 : }
1395 3273145 : }
1396 :
1397 : /// Gets the io buffer alignment.
1398 0 : pub(crate) const fn get_io_buffer_alignment() -> usize {
1399 0 : DEFAULT_IO_BUFFER_ALIGNMENT
1400 0 : }
1401 :
1402 : pub(crate) type IoBufferMut = AlignedBufferMut<ConstAlign<{ get_io_buffer_alignment() }>>;
1403 : pub(crate) type IoBuffer = AlignedBuffer<ConstAlign<{ get_io_buffer_alignment() }>>;
1404 : pub(crate) type IoPageSlice<'a> =
1405 : AlignedSlice<'a, PAGE_SZ, ConstAlign<{ get_io_buffer_alignment() }>>;
1406 :
1407 : static IO_MODE: AtomicU8 = AtomicU8::new(IoMode::preferred() as u8);
1408 :
1409 0 : pub(crate) fn set_io_mode(mode: IoMode) {
1410 0 : IO_MODE.store(mode as u8, std::sync::atomic::Ordering::Relaxed);
1411 0 : }
1412 :
1413 5064 : pub(crate) fn get_io_mode() -> IoMode {
1414 5064 : IoMode::try_from(IO_MODE.load(Ordering::Relaxed)).unwrap()
1415 5064 : }
1416 :
1417 : static SYNC_MODE: AtomicU8 = AtomicU8::new(SyncMode::Sync as u8);
1418 :
1419 : #[cfg(test)]
1420 : mod tests {
1421 : use crate::context::DownloadBehavior;
1422 : use crate::task_mgr::TaskKind;
1423 :
1424 : use super::*;
1425 : use owned_buffers_io::io_buf_ext::IoBufExt;
1426 : use owned_buffers_io::slice::SliceMutExt;
1427 : use rand::seq::SliceRandom;
1428 : use rand::thread_rng;
1429 : use rand::Rng;
1430 : use std::io::Write;
1431 : use std::os::unix::fs::FileExt;
1432 : use std::sync::Arc;
1433 :
1434 : enum MaybeVirtualFile {
1435 : VirtualFile(VirtualFile),
1436 : File(File),
1437 : }
1438 :
1439 : impl From<VirtualFile> for MaybeVirtualFile {
1440 12 : fn from(vf: VirtualFile) -> Self {
1441 12 : MaybeVirtualFile::VirtualFile(vf)
1442 12 : }
1443 : }
1444 :
1445 : impl MaybeVirtualFile {
1446 808 : async fn read_exact_at(
1447 808 : &self,
1448 808 : mut slice: tokio_epoll_uring::Slice<IoBufferMut>,
1449 808 : offset: u64,
1450 808 : ctx: &RequestContext,
1451 808 : ) -> Result<tokio_epoll_uring::Slice<IoBufferMut>, Error> {
1452 808 : match self {
1453 404 : MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(slice, offset, ctx).await,
1454 404 : MaybeVirtualFile::File(file) => {
1455 404 : let rust_slice: &mut [u8] = slice.as_mut_rust_slice_full_zeroed();
1456 404 : file.read_exact_at(rust_slice, offset).map(|()| slice)
1457 : }
1458 : }
1459 808 : }
1460 16 : async fn write_all_at<Buf: IoBufAligned + Send>(
1461 16 : &self,
1462 16 : buf: FullSlice<Buf>,
1463 16 : offset: u64,
1464 16 : ctx: &RequestContext,
1465 16 : ) -> Result<(), Error> {
1466 16 : match self {
1467 8 : MaybeVirtualFile::VirtualFile(file) => {
1468 8 : let (_buf, res) = file.write_all_at(buf, offset, ctx).await;
1469 8 : res
1470 : }
1471 8 : MaybeVirtualFile::File(file) => file.write_all_at(&buf[..], offset),
1472 : }
1473 16 : }
1474 72 : async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
1475 72 : match self {
1476 36 : MaybeVirtualFile::VirtualFile(file) => file.seek(pos).await,
1477 36 : MaybeVirtualFile::File(file) => file.seek(pos),
1478 : }
1479 72 : }
1480 16 : async fn write_all<Buf: IoBuf + Send>(
1481 16 : &mut self,
1482 16 : buf: FullSlice<Buf>,
1483 16 : ctx: &RequestContext,
1484 16 : ) -> Result<(), Error> {
1485 16 : match self {
1486 8 : MaybeVirtualFile::VirtualFile(file) => {
1487 8 : let (_buf, res) = file.write_all(buf, ctx).await;
1488 8 : res.map(|_| ())
1489 : }
1490 8 : MaybeVirtualFile::File(file) => file.write_all(&buf[..]),
1491 : }
1492 16 : }
1493 :
1494 : // Helper function to slurp contents of a file, starting at the current position,
1495 : // into a string
1496 884 : async fn read_string(&mut self, ctx: &RequestContext) -> Result<String, Error> {
1497 : use std::io::Read;
1498 884 : let mut buf = String::new();
1499 884 : match self {
1500 448 : MaybeVirtualFile::VirtualFile(file) => {
1501 448 : let mut buf = Vec::new();
1502 448 : file.read_to_end(&mut buf, ctx).await?;
1503 444 : return Ok(String::from_utf8(buf).unwrap());
1504 : }
1505 436 : MaybeVirtualFile::File(file) => {
1506 436 : file.read_to_string(&mut buf)?;
1507 : }
1508 : }
1509 432 : Ok(buf)
1510 884 : }
1511 :
1512 : // Helper function to slurp a portion of a file into a string
1513 808 : async fn read_string_at(
1514 808 : &mut self,
1515 808 : pos: u64,
1516 808 : len: usize,
1517 808 : ctx: &RequestContext,
1518 808 : ) -> Result<String, Error> {
1519 808 : let slice = IoBufferMut::with_capacity(len).slice_full();
1520 808 : assert_eq!(slice.bytes_total(), len);
1521 808 : let slice = self.read_exact_at(slice, pos, ctx).await?;
1522 808 : let buf = slice.into_inner();
1523 808 : assert_eq!(buf.len(), len);
1524 :
1525 808 : Ok(String::from_utf8(buf.to_vec()).unwrap())
1526 808 : }
1527 : }
1528 :
1529 : #[tokio::test]
1530 4 : async fn test_virtual_files() -> anyhow::Result<()> {
1531 4 : // The real work is done in the test_files() helper function. This
1532 4 : // allows us to run the same set of tests against a native File, and
1533 4 : // VirtualFile. We trust the native Files and wouldn't need to test them,
1534 4 : // but this allows us to verify that the operations return the same
1535 4 : // results with VirtualFiles as with native Files. (Except that with
1536 4 : // native files, you will run out of file descriptors if the ulimit
1537 4 : // is low enough.)
1538 4 : struct A;
1539 4 :
1540 4 : impl Adapter for A {
1541 412 : async fn open(
1542 412 : path: Utf8PathBuf,
1543 412 : opts: OpenOptions,
1544 412 : ctx: &RequestContext,
1545 412 : ) -> Result<MaybeVirtualFile, anyhow::Error> {
1546 412 : let vf = VirtualFile::open_with_options(&path, &opts, ctx).await?;
1547 412 : Ok(MaybeVirtualFile::VirtualFile(vf))
1548 412 : }
1549 4 : }
1550 4 : test_files::<A>("virtual_files").await
1551 4 : }
1552 :
1553 : #[tokio::test]
1554 4 : async fn test_physical_files() -> anyhow::Result<()> {
1555 4 : struct B;
1556 4 :
1557 4 : impl Adapter for B {
1558 412 : async fn open(
1559 412 : path: Utf8PathBuf,
1560 412 : opts: OpenOptions,
1561 412 : _ctx: &RequestContext,
1562 412 : ) -> Result<MaybeVirtualFile, anyhow::Error> {
1563 4 : Ok(MaybeVirtualFile::File({
1564 412 : let owned_fd = opts.open(path.as_std_path()).await?;
1565 412 : File::from(owned_fd)
1566 4 : }))
1567 412 : }
1568 4 : }
1569 4 :
1570 4 : test_files::<B>("physical_files").await
1571 4 : }
1572 :
1573 : /// This is essentially a closure which returns a MaybeVirtualFile, but because rust edition
1574 : /// 2024 is not yet out with new lifetime capture or outlives rules, this is a async function
1575 : /// in trait which benefits from the new lifetime capture rules already.
1576 : trait Adapter {
1577 : async fn open(
1578 : path: Utf8PathBuf,
1579 : opts: OpenOptions,
1580 : ctx: &RequestContext,
1581 : ) -> Result<MaybeVirtualFile, anyhow::Error>;
1582 : }
1583 :
1584 8 : async fn test_files<A>(testname: &str) -> anyhow::Result<()>
1585 8 : where
1586 8 : A: Adapter,
1587 8 : {
1588 8 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1589 8 : let testdir = crate::config::PageServerConf::test_repo_dir(testname);
1590 8 : std::fs::create_dir_all(&testdir)?;
1591 :
1592 8 : let path_a = testdir.join("file_a");
1593 8 : let mut file_a = A::open(
1594 8 : path_a.clone(),
1595 8 : OpenOptions::new()
1596 8 : .write(true)
1597 8 : .create(true)
1598 8 : .truncate(true)
1599 8 : .to_owned(),
1600 8 : &ctx,
1601 8 : )
1602 8 : .await?;
1603 :
1604 8 : file_a
1605 8 : .write_all(b"foobar".to_vec().slice_len(), &ctx)
1606 8 : .await?;
1607 :
1608 : // cannot read from a file opened in write-only mode
1609 8 : let _ = file_a.read_string(&ctx).await.unwrap_err();
1610 :
1611 : // Close the file and re-open for reading
1612 8 : let mut file_a = A::open(path_a, OpenOptions::new().read(true).to_owned(), &ctx).await?;
1613 :
1614 : // cannot write to a file opened in read-only mode
1615 8 : let _ = file_a
1616 8 : .write_all(b"bar".to_vec().slice_len(), &ctx)
1617 8 : .await
1618 8 : .unwrap_err();
1619 8 :
1620 8 : // Try simple read
1621 8 : assert_eq!("foobar", file_a.read_string(&ctx).await?);
1622 :
1623 : // It's positioned at the EOF now.
1624 8 : assert_eq!("", file_a.read_string(&ctx).await?);
1625 :
1626 : // Test seeks.
1627 8 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1628 8 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1629 :
1630 8 : assert_eq!(file_a.seek(SeekFrom::End(-2)).await?, 4);
1631 8 : assert_eq!("ar", file_a.read_string(&ctx).await?);
1632 :
1633 8 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1634 8 : assert_eq!(file_a.seek(SeekFrom::Current(2)).await?, 3);
1635 8 : assert_eq!("bar", file_a.read_string(&ctx).await?);
1636 :
1637 8 : assert_eq!(file_a.seek(SeekFrom::Current(-5)).await?, 1);
1638 8 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1639 :
1640 : // Test erroneous seeks to before byte 0
1641 8 : file_a.seek(SeekFrom::End(-7)).await.unwrap_err();
1642 8 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1643 8 : file_a.seek(SeekFrom::Current(-2)).await.unwrap_err();
1644 8 :
1645 8 : // the erroneous seek should have left the position unchanged
1646 8 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1647 :
1648 : // Create another test file, and try FileExt functions on it.
1649 8 : let path_b = testdir.join("file_b");
1650 8 : let mut file_b = A::open(
1651 8 : path_b.clone(),
1652 8 : OpenOptions::new()
1653 8 : .read(true)
1654 8 : .write(true)
1655 8 : .create(true)
1656 8 : .truncate(true)
1657 8 : .to_owned(),
1658 8 : &ctx,
1659 8 : )
1660 8 : .await?;
1661 8 : file_b
1662 8 : .write_all_at(IoBuffer::from(b"BAR").slice_len(), 3, &ctx)
1663 8 : .await?;
1664 8 : file_b
1665 8 : .write_all_at(IoBuffer::from(b"FOO").slice_len(), 0, &ctx)
1666 8 : .await?;
1667 :
1668 8 : assert_eq!(file_b.read_string_at(2, 3, &ctx).await?, "OBA");
1669 :
1670 : // Open a lot of files, enough to cause some evictions. (Or to be precise,
1671 : // open the same file many times. The effect is the same.)
1672 : //
1673 : // leave file_a positioned at offset 1 before we start
1674 8 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1675 :
1676 8 : let mut vfiles = Vec::new();
1677 808 : for _ in 0..100 {
1678 800 : let mut vfile = A::open(
1679 800 : path_b.clone(),
1680 800 : OpenOptions::new().read(true).to_owned(),
1681 800 : &ctx,
1682 800 : )
1683 800 : .await?;
1684 800 : assert_eq!("FOOBAR", vfile.read_string(&ctx).await?);
1685 800 : vfiles.push(vfile);
1686 : }
1687 :
1688 : // make sure we opened enough files to definitely cause evictions.
1689 8 : assert!(vfiles.len() > TEST_MAX_FILE_DESCRIPTORS * 2);
1690 :
1691 : // The underlying file descriptor for 'file_a' should be closed now. Try to read
1692 : // from it again. We left the file positioned at offset 1 above.
1693 8 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1694 :
1695 : // Check that all the other FDs still work too. Use them in random order for
1696 : // good measure.
1697 8 : vfiles.as_mut_slice().shuffle(&mut thread_rng());
1698 800 : for vfile in vfiles.iter_mut() {
1699 800 : assert_eq!("OOBAR", vfile.read_string_at(1, 5, &ctx).await?);
1700 : }
1701 :
1702 8 : Ok(())
1703 8 : }
1704 :
1705 : /// Test using VirtualFiles from many threads concurrently. This tests both using
1706 : /// a lot of VirtualFiles concurrently, causing evictions, and also using the same
1707 : /// VirtualFile from multiple threads concurrently.
1708 : #[tokio::test]
1709 4 : async fn test_vfile_concurrency() -> Result<(), Error> {
1710 4 : const SIZE: usize = 8 * 1024;
1711 4 : const VIRTUAL_FILES: usize = 100;
1712 4 : const THREADS: usize = 100;
1713 4 : const SAMPLE: [u8; SIZE] = [0xADu8; SIZE];
1714 4 :
1715 4 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1716 4 : let testdir = crate::config::PageServerConf::test_repo_dir("vfile_concurrency");
1717 4 : std::fs::create_dir_all(&testdir)?;
1718 4 :
1719 4 : // Create a test file.
1720 4 : let test_file_path = testdir.join("concurrency_test_file");
1721 4 : {
1722 4 : let file = File::create(&test_file_path)?;
1723 4 : file.write_all_at(&SAMPLE, 0)?;
1724 4 : }
1725 4 :
1726 4 : // Open the file many times.
1727 4 : let mut files = Vec::new();
1728 404 : for _ in 0..VIRTUAL_FILES {
1729 400 : let f = VirtualFileInner::open_with_options(
1730 400 : &test_file_path,
1731 400 : OpenOptions::new().read(true),
1732 400 : &ctx,
1733 400 : )
1734 400 : .await?;
1735 400 : files.push(f);
1736 4 : }
1737 4 : let files = Arc::new(files);
1738 4 :
1739 4 : // Launch many threads, and use the virtual files concurrently in random order.
1740 4 : let rt = tokio::runtime::Builder::new_multi_thread()
1741 4 : .worker_threads(THREADS)
1742 4 : .thread_name("test_vfile_concurrency thread")
1743 4 : .build()
1744 4 : .unwrap();
1745 4 : let mut hdls = Vec::new();
1746 404 : for _threadno in 0..THREADS {
1747 400 : let files = files.clone();
1748 400 : let ctx = ctx.detached_child(TaskKind::UnitTest, DownloadBehavior::Error);
1749 400 : let hdl = rt.spawn(async move {
1750 400 : let mut buf = IoBufferMut::with_capacity_zeroed(SIZE);
1751 400 : let mut rng = rand::rngs::OsRng;
1752 400000 : for _ in 1..1000 {
1753 399600 : let f = &files[rng.gen_range(0..files.len())];
1754 399600 : buf = f
1755 399600 : .read_exact_at(buf.slice_full(), 0, &ctx)
1756 399600 : .await
1757 399600 : .unwrap()
1758 399600 : .into_inner();
1759 399600 : assert!(buf[..] == SAMPLE);
1760 4 : }
1761 400 : });
1762 400 : hdls.push(hdl);
1763 400 : }
1764 404 : for hdl in hdls {
1765 400 : hdl.await?;
1766 4 : }
1767 4 : std::mem::forget(rt);
1768 4 :
1769 4 : Ok(())
1770 4 : }
1771 :
1772 : #[tokio::test]
1773 4 : async fn test_atomic_overwrite_basic() {
1774 4 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1775 4 : let testdir = crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_basic");
1776 4 : std::fs::create_dir_all(&testdir).unwrap();
1777 4 :
1778 4 : let path = testdir.join("myfile");
1779 4 : let tmp_path = testdir.join("myfile.tmp");
1780 4 :
1781 4 : VirtualFileInner::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1782 4 : .await
1783 4 : .unwrap();
1784 4 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1785 4 : let post = file.read_string(&ctx).await.unwrap();
1786 4 : assert_eq!(post, "foo");
1787 4 : assert!(!tmp_path.exists());
1788 4 : drop(file);
1789 4 :
1790 4 : VirtualFileInner::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"bar".to_vec())
1791 4 : .await
1792 4 : .unwrap();
1793 4 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1794 4 : let post = file.read_string(&ctx).await.unwrap();
1795 4 : assert_eq!(post, "bar");
1796 4 : assert!(!tmp_path.exists());
1797 4 : drop(file);
1798 4 : }
1799 :
1800 : #[tokio::test]
1801 4 : async fn test_atomic_overwrite_preexisting_tmp() {
1802 4 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1803 4 : let testdir =
1804 4 : crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_preexisting_tmp");
1805 4 : std::fs::create_dir_all(&testdir).unwrap();
1806 4 :
1807 4 : let path = testdir.join("myfile");
1808 4 : let tmp_path = testdir.join("myfile.tmp");
1809 4 :
1810 4 : std::fs::write(&tmp_path, "some preexisting junk that should be removed").unwrap();
1811 4 : assert!(tmp_path.exists());
1812 4 :
1813 4 : VirtualFileInner::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1814 4 : .await
1815 4 : .unwrap();
1816 4 :
1817 4 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1818 4 : let post = file.read_string(&ctx).await.unwrap();
1819 4 : assert_eq!(post, "foo");
1820 4 : assert!(!tmp_path.exists());
1821 4 : drop(file);
1822 4 : }
1823 : }
|