Line data Source code
1 : //! VirtualFile is like a normal File, but it's not bound directly to
2 : //! a file descriptor.
3 : //!
4 : //! Instead, the file is opened when it's read from,
5 : //! and if too many files are open globally in the system, least-recently
6 : //! used ones are closed.
7 : //!
8 : //! To track which files have been recently used, we use the clock algorithm
9 : //! with a 'recently_used' flag on each slot.
10 : //!
11 : //! This is similar to PostgreSQL's virtual file descriptor facility in
12 : //! src/backend/storage/file/fd.c
13 : //!
14 : use crate::context::RequestContext;
15 : use crate::metrics::{StorageIoOperation, STORAGE_IO_SIZE, STORAGE_IO_TIME_METRIC};
16 :
17 : use crate::page_cache::{PageWriteGuard, PAGE_SZ};
18 : use crate::tenant::TENANTS_SEGMENT_NAME;
19 : use camino::{Utf8Path, Utf8PathBuf};
20 : use once_cell::sync::OnceCell;
21 : use owned_buffers_io::aligned_buffer::buffer::AlignedBuffer;
22 : use owned_buffers_io::aligned_buffer::{AlignedBufferMut, AlignedSlice, ConstAlign};
23 : use owned_buffers_io::io_buf_aligned::IoBufAlignedMut;
24 : use owned_buffers_io::io_buf_ext::FullSlice;
25 : use pageserver_api::config::defaults::DEFAULT_IO_BUFFER_ALIGNMENT;
26 : use pageserver_api::shard::TenantShardId;
27 : use std::fs::File;
28 : use std::io::{Error, ErrorKind, Seek, SeekFrom};
29 : #[cfg(target_os = "linux")]
30 : use std::os::unix::fs::OpenOptionsExt;
31 : use tokio_epoll_uring::{BoundedBuf, IoBuf, IoBufMut, Slice};
32 :
33 : use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
34 : use std::sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering};
35 : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
36 : use tokio::time::Instant;
37 :
38 : pub use pageserver_api::models::virtual_file as api;
39 : pub(crate) mod io_engine;
40 : pub use io_engine::feature_test as io_engine_feature_test;
41 : pub use io_engine::io_engine_for_bench;
42 : pub use io_engine::FeatureTestResult as IoEngineFeatureTestResult;
43 : mod metadata;
44 : mod open_options;
45 : use self::owned_buffers_io::write::OwnedAsyncWriter;
46 : pub(crate) use api::IoMode;
47 : pub(crate) use io_engine::IoEngineKind;
48 : pub(crate) use metadata::Metadata;
49 : pub(crate) use open_options::*;
50 :
51 : pub(crate) mod owned_buffers_io {
52 : //! Abstractions for IO with owned buffers.
53 : //!
54 : //! Not actually tied to [`crate::virtual_file`] specifically, but, it's the primary
55 : //! reason we need this abstraction.
56 : //!
57 : //! Over time, this could move into the `tokio-epoll-uring` crate, maybe `uring-common`,
58 : //! but for the time being we're proving out the primitives in the neon.git repo
59 : //! for faster iteration.
60 :
61 : pub(crate) mod aligned_buffer;
62 : pub(crate) mod io_buf_aligned;
63 : pub(crate) mod io_buf_ext;
64 : pub(crate) mod slice;
65 : pub(crate) mod write;
66 : pub(crate) mod util {
67 : pub(crate) mod size_tracking_writer;
68 : }
69 : }
70 :
71 : #[derive(Debug)]
72 : pub struct VirtualFile {
73 : inner: VirtualFileInner,
74 : _mode: IoMode,
75 : }
76 :
77 : impl VirtualFile {
78 : /// Open a file in read-only mode. Like File::open.
79 1048 : pub async fn open<P: AsRef<Utf8Path>>(
80 1048 : path: P,
81 1048 : ctx: &RequestContext,
82 1048 : ) -> Result<Self, std::io::Error> {
83 1048 : let inner = VirtualFileInner::open(path, ctx).await?;
84 1048 : Ok(VirtualFile {
85 1048 : inner,
86 1048 : _mode: IoMode::Buffered,
87 1048 : })
88 1048 : }
89 :
90 : /// Open a file in read-only mode. Like File::open.
91 : ///
92 : /// `O_DIRECT` will be enabled base on `virtual_file_io_mode`.
93 1190 : pub async fn open_v2<P: AsRef<Utf8Path>>(
94 1190 : path: P,
95 1190 : ctx: &RequestContext,
96 1190 : ) -> Result<Self, std::io::Error> {
97 1190 : Self::open_with_options_v2(path.as_ref(), OpenOptions::new().read(true), ctx).await
98 1190 : }
99 :
100 1481 : pub async fn create<P: AsRef<Utf8Path>>(
101 1481 : path: P,
102 1481 : ctx: &RequestContext,
103 1481 : ) -> Result<Self, std::io::Error> {
104 1481 : let inner = VirtualFileInner::create(path, ctx).await?;
105 1481 : Ok(VirtualFile {
106 1481 : inner,
107 1481 : _mode: IoMode::Buffered,
108 1481 : })
109 1481 : }
110 :
111 0 : pub async fn create_v2<P: AsRef<Utf8Path>>(
112 0 : path: P,
113 0 : ctx: &RequestContext,
114 0 : ) -> Result<Self, std::io::Error> {
115 0 : VirtualFile::open_with_options_v2(
116 0 : path.as_ref(),
117 0 : OpenOptions::new().write(true).create(true).truncate(true),
118 0 : ctx,
119 0 : )
120 0 : .await
121 0 : }
122 :
123 2000 : pub async fn open_with_options<P: AsRef<Utf8Path>>(
124 2000 : path: P,
125 2000 : open_options: &OpenOptions,
126 2000 : ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
127 2000 : ) -> Result<Self, std::io::Error> {
128 2000 : let inner = VirtualFileInner::open_with_options(path, open_options, ctx).await?;
129 2000 : Ok(VirtualFile {
130 2000 : inner,
131 2000 : _mode: IoMode::Buffered,
132 2000 : })
133 2000 : }
134 :
135 1190 : pub async fn open_with_options_v2<P: AsRef<Utf8Path>>(
136 1190 : path: P,
137 1190 : open_options: &OpenOptions,
138 1190 : ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
139 1190 : ) -> Result<Self, std::io::Error> {
140 1190 : let file = match get_io_mode() {
141 : IoMode::Buffered => {
142 1190 : let inner = VirtualFileInner::open_with_options(path, open_options, ctx).await?;
143 1190 : VirtualFile {
144 1190 : inner,
145 1190 : _mode: IoMode::Buffered,
146 1190 : }
147 : }
148 : #[cfg(target_os = "linux")]
149 : IoMode::Direct => {
150 0 : let inner = VirtualFileInner::open_with_options(
151 0 : path,
152 0 : open_options.clone().custom_flags(nix::libc::O_DIRECT),
153 0 : ctx,
154 0 : )
155 0 : .await?;
156 0 : VirtualFile {
157 0 : inner,
158 0 : _mode: IoMode::Direct,
159 0 : }
160 : }
161 : };
162 1190 : Ok(file)
163 1190 : }
164 :
165 1158 : pub fn path(&self) -> &Utf8Path {
166 1158 : self.inner.path.as_path()
167 1158 : }
168 :
169 22 : pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>(
170 22 : final_path: Utf8PathBuf,
171 22 : tmp_path: Utf8PathBuf,
172 22 : content: B,
173 22 : ) -> std::io::Result<()> {
174 22 : VirtualFileInner::crashsafe_overwrite(final_path, tmp_path, content).await
175 22 : }
176 :
177 2735 : pub async fn sync_all(&self) -> Result<(), Error> {
178 2735 : if SYNC_MODE.load(std::sync::atomic::Ordering::Relaxed) == SyncMode::UnsafeNoSync as u8 {
179 0 : return Ok(());
180 2735 : }
181 2735 : self.inner.sync_all().await
182 2735 : }
183 :
184 0 : pub async fn sync_data(&self) -> Result<(), Error> {
185 0 : if SYNC_MODE.load(std::sync::atomic::Ordering::Relaxed) == SyncMode::UnsafeNoSync as u8 {
186 0 : return Ok(());
187 0 : }
188 0 : self.inner.sync_data().await
189 0 : }
190 :
191 1732 : pub async fn metadata(&self) -> Result<Metadata, Error> {
192 1732 : self.inner.metadata().await
193 1732 : }
194 :
195 226 : pub fn remove(self) {
196 226 : self.inner.remove();
197 226 : }
198 :
199 5434 : pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
200 5434 : self.inner.seek(pos).await
201 5434 : }
202 :
203 512777 : pub async fn read_exact_at<Buf>(
204 512777 : &self,
205 512777 : slice: Slice<Buf>,
206 512777 : offset: u64,
207 512777 : ctx: &RequestContext,
208 512777 : ) -> Result<Slice<Buf>, Error>
209 512777 : where
210 512777 : Buf: IoBufAlignedMut + Send,
211 512777 : {
212 512777 : self.inner.read_exact_at(slice, offset, ctx).await
213 512777 : }
214 :
215 32131 : pub async fn read_exact_at_page(
216 32131 : &self,
217 32131 : page: PageWriteGuard<'static>,
218 32131 : offset: u64,
219 32131 : ctx: &RequestContext,
220 32131 : ) -> Result<PageWriteGuard<'static>, Error> {
221 32131 : self.inner.read_exact_at_page(page, offset, ctx).await
222 32131 : }
223 :
224 4 : pub async fn write_all_at<Buf: IoBuf + Send>(
225 4 : &self,
226 4 : buf: FullSlice<Buf>,
227 4 : offset: u64,
228 4 : ctx: &RequestContext,
229 4 : ) -> (FullSlice<Buf>, Result<(), Error>) {
230 4 : self.inner.write_all_at(buf, offset, ctx).await
231 4 : }
232 :
233 1136202 : pub async fn write_all<Buf: IoBuf + Send>(
234 1136202 : &mut self,
235 1136202 : buf: FullSlice<Buf>,
236 1136202 : ctx: &RequestContext,
237 1136202 : ) -> (FullSlice<Buf>, Result<usize, Error>) {
238 1136202 : self.inner.write_all(buf, ctx).await
239 1136202 : }
240 : }
241 :
242 : /// Indicates whether to enable fsync, fdatasync, or O_SYNC/O_DSYNC when writing
243 : /// files. Switching this off is unsafe and only used for testing on machines
244 : /// with slow drives.
245 : #[repr(u8)]
246 : pub enum SyncMode {
247 : Sync,
248 : UnsafeNoSync,
249 : }
250 :
251 : impl TryFrom<u8> for SyncMode {
252 : type Error = u8;
253 :
254 0 : fn try_from(value: u8) -> Result<Self, Self::Error> {
255 0 : Ok(match value {
256 0 : v if v == (SyncMode::Sync as u8) => SyncMode::Sync,
257 0 : v if v == (SyncMode::UnsafeNoSync as u8) => SyncMode::UnsafeNoSync,
258 0 : x => return Err(x),
259 : })
260 0 : }
261 : }
262 :
263 : ///
264 : /// A virtual file descriptor. You can use this just like std::fs::File, but internally
265 : /// the underlying file is closed if the system is low on file descriptors,
266 : /// and re-opened when it's accessed again.
267 : ///
268 : /// Like with std::fs::File, multiple threads can read/write the file concurrently,
269 : /// holding just a shared reference the same VirtualFile, using the read_at() / write_at()
270 : /// functions from the FileExt trait. But the functions from the Read/Write/Seek traits
271 : /// require a mutable reference, because they modify the "current position".
272 : ///
273 : /// Each VirtualFile has a physical file descriptor in the global OPEN_FILES array, at the
274 : /// slot that 'handle points to, if the underlying file is currently open. If it's not
275 : /// currently open, the 'handle' can still point to the slot where it was last kept. The
276 : /// 'tag' field is used to detect whether the handle still is valid or not.
277 : ///
278 : #[derive(Debug)]
279 : pub struct VirtualFileInner {
280 : /// Lazy handle to the global file descriptor cache. The slot that this points to
281 : /// might contain our File, or it may be empty, or it may contain a File that
282 : /// belongs to a different VirtualFile.
283 : handle: RwLock<SlotHandle>,
284 :
285 : /// Current file position
286 : pos: u64,
287 :
288 : /// File path and options to use to open it.
289 : ///
290 : /// Note: this only contains the options needed to re-open it. For example,
291 : /// if a new file is created, we only pass the create flag when it's initially
292 : /// opened, in the VirtualFile::create() function, and strip the flag before
293 : /// storing it here.
294 : pub path: Utf8PathBuf,
295 : open_options: OpenOptions,
296 :
297 : // These are strings becase we only use them for metrics, and those expect strings.
298 : // It makes no sense for us to constantly turn the `TimelineId` and `TenantId` into
299 : // strings.
300 : tenant_id: String,
301 : shard_id: String,
302 : timeline_id: String,
303 : }
304 :
305 : #[derive(Debug, PartialEq, Clone, Copy)]
306 : struct SlotHandle {
307 : /// Index into OPEN_FILES.slots
308 : index: usize,
309 :
310 : /// Value of 'tag' in the slot. If slot's tag doesn't match, then the slot has
311 : /// been recycled and no longer contains the FD for this virtual file.
312 : tag: u64,
313 : }
314 :
315 : /// OPEN_FILES is the global array that holds the physical file descriptors that
316 : /// are currently open. Each slot in the array is protected by a separate lock,
317 : /// so that different files can be accessed independently. The lock must be held
318 : /// in write mode to replace the slot with a different file, but a read mode
319 : /// is enough to operate on the file, whether you're reading or writing to it.
320 : ///
321 : /// OPEN_FILES starts in uninitialized state, and it's initialized by
322 : /// the virtual_file::init() function. It must be called exactly once at page
323 : /// server startup.
324 : static OPEN_FILES: OnceCell<OpenFiles> = OnceCell::new();
325 :
326 : struct OpenFiles {
327 : slots: &'static [Slot],
328 :
329 : /// clock arm for the clock algorithm
330 : next: AtomicUsize,
331 : }
332 :
333 : struct Slot {
334 : inner: RwLock<SlotInner>,
335 :
336 : /// has this file been used since last clock sweep?
337 : recently_used: AtomicBool,
338 : }
339 :
340 : struct SlotInner {
341 : /// Counter that's incremented every time a different file is stored here.
342 : /// To avoid the ABA problem.
343 : tag: u64,
344 :
345 : /// the underlying file
346 : file: Option<OwnedFd>,
347 : }
348 :
349 : /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`].
350 : struct PageWriteGuardBuf {
351 : page: PageWriteGuard<'static>,
352 : }
353 : // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot,
354 : // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved.
355 : // Page cache pages are zero-initialized, so, wrt uninitialized memory we're good.
356 : // (Page cache tracks separately whether the contents are valid, see `PageWriteGuard::mark_valid`.)
357 : unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf {
358 128535 : fn stable_ptr(&self) -> *const u8 {
359 128535 : self.page.as_ptr()
360 128535 : }
361 240999 : fn bytes_init(&self) -> usize {
362 240999 : self.page.len()
363 240999 : }
364 96393 : fn bytes_total(&self) -> usize {
365 96393 : self.page.len()
366 96393 : }
367 : }
368 : // Safety: see above, plus: the ownership of [`PageWriteGuard`] means exclusive access,
369 : // hence it's safe to hand out the `stable_mut_ptr()`.
370 : unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf {
371 48202 : fn stable_mut_ptr(&mut self) -> *mut u8 {
372 48202 : self.page.as_mut_ptr()
373 48202 : }
374 :
375 32131 : unsafe fn set_init(&mut self, pos: usize) {
376 32131 : // There shouldn't really be any reason to call this API since bytes_init() == bytes_total().
377 32131 : assert!(pos <= self.page.len());
378 32131 : }
379 : }
380 :
381 : impl OpenFiles {
382 : /// Find a slot to use, evicting an existing file descriptor if needed.
383 : ///
384 : /// On return, we hold a lock on the slot, and its 'tag' has been updated
385 : /// recently_used has been set. It's all ready for reuse.
386 192667 : async fn find_victim_slot(&self) -> (SlotHandle, RwLockWriteGuard<SlotInner>) {
387 192667 : //
388 192667 : // Run the clock algorithm to find a slot to replace.
389 192667 : //
390 192667 : let num_slots = self.slots.len();
391 192667 : let mut retries = 0;
392 : let mut slot;
393 : let mut slot_guard;
394 : let index;
395 : loop {
396 2259065 : let next = self.next.fetch_add(1, Ordering::AcqRel) % num_slots;
397 2259065 : slot = &self.slots[next];
398 2259065 :
399 2259065 : // If the recently_used flag on this slot is set, continue the clock
400 2259065 : // sweep. Otherwise try to use this slot. If we cannot acquire the
401 2259065 : // lock, also continue the clock sweep.
402 2259065 : //
403 2259065 : // We only continue in this manner for a while, though. If we loop
404 2259065 : // through the array twice without finding a victim, just pick the
405 2259065 : // next slot and wait until we can reuse it. This way, we avoid
406 2259065 : // spinning in the extreme case that all the slots are busy with an
407 2259065 : // I/O operation.
408 2259065 : if retries < num_slots * 2 {
409 2173593 : if !slot.recently_used.swap(false, Ordering::Release) {
410 1966253 : if let Ok(guard) = slot.inner.try_write() {
411 107195 : slot_guard = guard;
412 107195 : index = next;
413 107195 : break;
414 1859058 : }
415 207340 : }
416 2066398 : retries += 1;
417 : } else {
418 85472 : slot_guard = slot.inner.write().await;
419 85472 : index = next;
420 85472 : break;
421 : }
422 : }
423 :
424 : //
425 : // We now have the victim slot locked. If it was in use previously, close the
426 : // old file.
427 : //
428 192667 : if let Some(old_file) = slot_guard.file.take() {
429 187852 : // the normal path of dropping VirtualFile uses "close", use "close-by-replace" here to
430 187852 : // distinguish the two.
431 187852 : STORAGE_IO_TIME_METRIC
432 187852 : .get(StorageIoOperation::CloseByReplace)
433 187852 : .observe_closure_duration(|| drop(old_file));
434 187852 : }
435 :
436 : // Prepare the slot for reuse and return it
437 192667 : slot_guard.tag += 1;
438 192667 : slot.recently_used.store(true, Ordering::Relaxed);
439 192667 : (
440 192667 : SlotHandle {
441 192667 : index,
442 192667 : tag: slot_guard.tag,
443 192667 : },
444 192667 : slot_guard,
445 192667 : )
446 192667 : }
447 : }
448 :
449 : /// Identify error types that should alwways terminate the process. Other
450 : /// error types may be elegible for retry.
451 2 : pub(crate) fn is_fatal_io_error(e: &std::io::Error) -> bool {
452 : use nix::errno::Errno::*;
453 2 : match e.raw_os_error().map(nix::errno::from_i32) {
454 : Some(EIO) => {
455 : // Terminate on EIO because we no longer trust the device to store
456 : // data safely, or to uphold persistence guarantees on fsync.
457 0 : true
458 : }
459 : Some(EROFS) => {
460 : // Terminate on EROFS because a filesystem is usually remounted
461 : // readonly when it has experienced some critical issue, so the same
462 : // logic as EIO applies.
463 0 : true
464 : }
465 : Some(EACCES) => {
466 : // Terminate on EACCESS because we should always have permissions
467 : // for our own data dir: if we don't, then we can't do our job and
468 : // need administrative intervention to fix permissions. Terminating
469 : // is the best way to make sure we stop cleanly rather than going
470 : // into infinite retry loops, and will make it clear to the outside
471 : // world that we need help.
472 0 : true
473 : }
474 : _ => {
475 : // Treat all other local file I/O errors are retryable. This includes:
476 : // - ENOSPC: we stay up and wait for eviction to free some space
477 : // - EINVAL, EBADF, EBADFD: this is a code bug, not a filesystem/hardware issue
478 : // - WriteZero, Interrupted: these are used internally VirtualFile
479 2 : false
480 : }
481 : }
482 2 : }
483 :
484 : /// Call this when the local filesystem gives us an error with an external
485 : /// cause: this includes EIO, EROFS, and EACCESS: all these indicate either
486 : /// bad storage or bad configuration, and we can't fix that from inside
487 : /// a running process.
488 0 : pub(crate) fn on_fatal_io_error(e: &std::io::Error, context: &str) -> ! {
489 0 : tracing::error!("Fatal I/O error: {e}: {context})");
490 0 : std::process::abort();
491 : }
492 :
493 : pub(crate) trait MaybeFatalIo<T> {
494 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T>;
495 : fn fatal_err(self, context: &str) -> T;
496 : }
497 :
498 : impl<T> MaybeFatalIo<T> for std::io::Result<T> {
499 : /// Terminate the process if the result is an error of a fatal type, else pass it through
500 : ///
501 : /// This is appropriate for writes, where we typically want to die on EIO/ACCES etc, but
502 : /// not on ENOSPC.
503 1140692 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T> {
504 1140692 : if let Err(e) = &self {
505 2 : if is_fatal_io_error(e) {
506 0 : on_fatal_io_error(e, context);
507 2 : }
508 1140690 : }
509 1140692 : self
510 1140692 : }
511 :
512 : /// Terminate the process on any I/O error.
513 : ///
514 : /// This is appropriate for reads on files that we know exist: they should always work.
515 2042 : fn fatal_err(self, context: &str) -> T {
516 2042 : match self {
517 2042 : Ok(v) => v,
518 0 : Err(e) => {
519 0 : on_fatal_io_error(&e, context);
520 : }
521 : }
522 2042 : }
523 : }
524 :
525 : /// Observe duration for the given storage I/O operation
526 : ///
527 : /// Unlike `observe_closure_duration`, this supports async,
528 : /// where "support" means that we measure wall clock time.
529 : macro_rules! observe_duration {
530 : ($op:expr, $($body:tt)*) => {{
531 : let instant = Instant::now();
532 : let result = $($body)*;
533 : let elapsed = instant.elapsed().as_secs_f64();
534 : STORAGE_IO_TIME_METRIC
535 : .get($op)
536 : .observe(elapsed);
537 : result
538 : }}
539 : }
540 :
541 : macro_rules! with_file {
542 : ($this:expr, $op:expr, | $ident:ident | $($body:tt)*) => {{
543 : let $ident = $this.lock_file().await?;
544 : observe_duration!($op, $($body)*)
545 : }};
546 : ($this:expr, $op:expr, | mut $ident:ident | $($body:tt)*) => {{
547 : let mut $ident = $this.lock_file().await?;
548 : observe_duration!($op, $($body)*)
549 : }};
550 : }
551 :
552 : impl VirtualFileInner {
553 : /// Open a file in read-only mode. Like File::open.
554 1048 : pub async fn open<P: AsRef<Utf8Path>>(
555 1048 : path: P,
556 1048 : ctx: &RequestContext,
557 1048 : ) -> Result<VirtualFileInner, std::io::Error> {
558 1048 : Self::open_with_options(path.as_ref(), OpenOptions::new().read(true), ctx).await
559 1048 : }
560 :
561 : /// Create a new file for writing. If the file exists, it will be truncated.
562 : /// Like File::create.
563 1481 : pub async fn create<P: AsRef<Utf8Path>>(
564 1481 : path: P,
565 1481 : ctx: &RequestContext,
566 1481 : ) -> Result<VirtualFileInner, std::io::Error> {
567 1481 : Self::open_with_options(
568 1481 : path.as_ref(),
569 1481 : OpenOptions::new().write(true).create(true).truncate(true),
570 1481 : ctx,
571 1481 : )
572 763 : .await
573 1481 : }
574 :
575 : /// Open a file with given options.
576 : ///
577 : /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt,
578 : /// they will be applied also when the file is subsequently re-opened, not only
579 : /// on the first time. Make sure that's sane!
580 5919 : pub async fn open_with_options<P: AsRef<Utf8Path>>(
581 5919 : path: P,
582 5919 : open_options: &OpenOptions,
583 5919 : _ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
584 5919 : ) -> Result<VirtualFileInner, std::io::Error> {
585 5919 : let path_ref = path.as_ref();
586 5919 : let path_str = path_ref.to_string();
587 5919 : let parts = path_str.split('/').collect::<Vec<&str>>();
588 5919 : let (tenant_id, shard_id, timeline_id) =
589 5919 : if parts.len() > 5 && parts[parts.len() - 5] == TENANTS_SEGMENT_NAME {
590 4425 : let tenant_shard_part = parts[parts.len() - 4];
591 4425 : let (tenant_id, shard_id) = match tenant_shard_part.parse::<TenantShardId>() {
592 4425 : Ok(tenant_shard_id) => (
593 4425 : tenant_shard_id.tenant_id.to_string(),
594 4425 : format!("{}", tenant_shard_id.shard_slug()),
595 4425 : ),
596 : Err(_) => {
597 : // Malformed path: this ID is just for observability, so tolerate it
598 : // and pass through
599 0 : (tenant_shard_part.to_string(), "*".to_string())
600 : }
601 : };
602 4425 : (tenant_id, shard_id, parts[parts.len() - 2].to_string())
603 : } else {
604 1494 : ("*".to_string(), "*".to_string(), "*".to_string())
605 : };
606 5919 : let (handle, mut slot_guard) = get_open_files().find_victim_slot().await;
607 :
608 : // NB: there is also StorageIoOperation::OpenAfterReplace which is for the case
609 : // where our caller doesn't get to use the returned VirtualFile before its
610 : // slot gets re-used by someone else.
611 5919 : let file = observe_duration!(StorageIoOperation::Open, {
612 5919 : open_options.open(path_ref.as_std_path()).await?
613 : });
614 :
615 : // Strip all options other than read and write.
616 : //
617 : // It would perhaps be nicer to check just for the read and write flags
618 : // explicitly, but OpenOptions doesn't contain any functions to read flags,
619 : // only to set them.
620 5919 : let mut reopen_options = open_options.clone();
621 5919 : reopen_options.create(false);
622 5919 : reopen_options.create_new(false);
623 5919 : reopen_options.truncate(false);
624 5919 :
625 5919 : let vfile = VirtualFileInner {
626 5919 : handle: RwLock::new(handle),
627 5919 : pos: 0,
628 5919 : path: path_ref.to_path_buf(),
629 5919 : open_options: reopen_options,
630 5919 : tenant_id,
631 5919 : shard_id,
632 5919 : timeline_id,
633 5919 : };
634 5919 :
635 5919 : // TODO: Under pressure, it's likely the slot will get re-used and
636 5919 : // the underlying file closed before they get around to using it.
637 5919 : // => https://github.com/neondatabase/neon/issues/6065
638 5919 : slot_guard.file.replace(file);
639 5919 :
640 5919 : Ok(vfile)
641 5919 : }
642 :
643 : /// Async version of [`::utils::crashsafe::overwrite`].
644 : ///
645 : /// # NB:
646 : ///
647 : /// Doesn't actually use the [`VirtualFile`] file descriptor cache, but,
648 : /// it did at an earlier time.
649 : /// And it will use this module's [`io_engine`] in the near future, so, leaving it here.
650 28 : pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>(
651 28 : final_path: Utf8PathBuf,
652 28 : tmp_path: Utf8PathBuf,
653 28 : content: B,
654 28 : ) -> std::io::Result<()> {
655 28 : // TODO: use tokio_epoll_uring if configured as `io_engine`.
656 28 : // See https://github.com/neondatabase/neon/issues/6663
657 28 :
658 28 : tokio::task::spawn_blocking(move || {
659 28 : let slice_storage;
660 28 : let content_len = content.bytes_init();
661 28 : let content = if content.bytes_init() > 0 {
662 28 : slice_storage = Some(content.slice(0..content_len));
663 28 : slice_storage.as_deref().expect("just set it to Some()")
664 : } else {
665 0 : &[]
666 : };
667 28 : utils::crashsafe::overwrite(&final_path, &tmp_path, content)
668 28 : .maybe_fatal_err("crashsafe_overwrite")
669 28 : })
670 28 : .await
671 28 : .expect("blocking task is never aborted")
672 28 : }
673 :
674 : /// Call File::sync_all() on the underlying File.
675 2735 : pub async fn sync_all(&self) -> Result<(), Error> {
676 2735 : with_file!(self, StorageIoOperation::Fsync, |file_guard| {
677 2735 : let (_file_guard, res) = io_engine::get().sync_all(file_guard).await;
678 2735 : res.maybe_fatal_err("sync_all")
679 : })
680 2735 : }
681 :
682 : /// Call File::sync_data() on the underlying File.
683 0 : pub async fn sync_data(&self) -> Result<(), Error> {
684 0 : with_file!(self, StorageIoOperation::Fsync, |file_guard| {
685 0 : let (_file_guard, res) = io_engine::get().sync_data(file_guard).await;
686 0 : res.maybe_fatal_err("sync_data")
687 : })
688 0 : }
689 :
690 1732 : pub async fn metadata(&self) -> Result<Metadata, Error> {
691 1732 : with_file!(self, StorageIoOperation::Metadata, |file_guard| {
692 1732 : let (_file_guard, res) = io_engine::get().metadata(file_guard).await;
693 1732 : res
694 : })
695 1732 : }
696 :
697 : /// Helper function internal to `VirtualFile` that looks up the underlying File,
698 : /// opens it and evicts some other File if necessary. The passed parameter is
699 : /// assumed to be a function available for the physical `File`.
700 : ///
701 : /// We are doing it via a macro as Rust doesn't support async closures that
702 : /// take on parameters with lifetimes.
703 1906707 : async fn lock_file(&self) -> Result<FileGuard, Error> {
704 1906707 : let open_files = get_open_files();
705 :
706 186748 : let mut handle_guard = {
707 : // Read the cached slot handle, and see if the slot that it points to still
708 : // contains our File.
709 : //
710 : // We only need to hold the handle lock while we read the current handle. If
711 : // another thread closes the file and recycles the slot for a different file,
712 : // we will notice that the handle we read is no longer valid and retry.
713 1906707 : let mut handle = *self.handle.read().await;
714 : loop {
715 : // Check if the slot contains our File
716 : {
717 1998744 : let slot = &open_files.slots[handle.index];
718 1998744 : let slot_guard = slot.inner.read().await;
719 1998744 : if slot_guard.tag == handle.tag && slot_guard.file.is_some() {
720 : // Found a cached file descriptor.
721 1719959 : slot.recently_used.store(true, Ordering::Relaxed);
722 1719959 : return Ok(FileGuard { slot_guard });
723 278785 : }
724 : }
725 :
726 : // The slot didn't contain our File. We will have to open it ourselves,
727 : // but before that, grab a write lock on handle in the VirtualFile, so
728 : // that no other thread will try to concurrently open the same file.
729 278785 : let handle_guard = self.handle.write().await;
730 :
731 : // If another thread changed the handle while we were not holding the lock,
732 : // then the handle might now be valid again. Loop back to retry.
733 278785 : if *handle_guard != handle {
734 92037 : handle = *handle_guard;
735 92037 : continue;
736 186748 : }
737 186748 : break handle_guard;
738 : }
739 : };
740 :
741 : // We need to open the file ourselves. The handle in the VirtualFile is
742 : // now locked in write-mode. Find a free slot to put it in.
743 186748 : let (handle, mut slot_guard) = open_files.find_victim_slot().await;
744 :
745 : // Re-open the physical file.
746 : // NB: we use StorageIoOperation::OpenAferReplace for this to distinguish this
747 : // case from StorageIoOperation::Open. This helps with identifying thrashing
748 : // of the virtual file descriptor cache.
749 186748 : let file = observe_duration!(StorageIoOperation::OpenAfterReplace, {
750 186748 : self.open_options.open(self.path.as_std_path()).await?
751 : });
752 :
753 : // Store the File in the slot and update the handle in the VirtualFile
754 : // to point to it.
755 186748 : slot_guard.file.replace(file);
756 186748 :
757 186748 : *handle_guard = handle;
758 186748 :
759 186748 : Ok(FileGuard {
760 186748 : slot_guard: slot_guard.downgrade(),
761 186748 : })
762 1906707 : }
763 :
764 226 : pub fn remove(self) {
765 226 : let path = self.path.clone();
766 226 : drop(self);
767 226 : std::fs::remove_file(path).expect("failed to remove the virtual file");
768 226 : }
769 :
770 5434 : pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
771 5434 : match pos {
772 5424 : SeekFrom::Start(offset) => {
773 5424 : self.pos = offset;
774 5424 : }
775 4 : SeekFrom::End(offset) => {
776 4 : self.pos = with_file!(self, StorageIoOperation::Seek, |mut file_guard| file_guard
777 4 : .with_std_file_mut(|std_file| std_file.seek(SeekFrom::End(offset))))?
778 : }
779 6 : SeekFrom::Current(offset) => {
780 6 : let pos = self.pos as i128 + offset as i128;
781 6 : if pos < 0 {
782 2 : return Err(Error::new(
783 2 : ErrorKind::InvalidInput,
784 2 : "offset would be negative",
785 2 : ));
786 4 : }
787 4 : if pos > u64::MAX as i128 {
788 0 : return Err(Error::new(ErrorKind::InvalidInput, "offset overflow"));
789 4 : }
790 4 : self.pos = pos as u64;
791 : }
792 : }
793 5430 : Ok(self.pos)
794 5434 : }
795 :
796 : /// Read the file contents in range `offset..(offset + slice.bytes_total())` into `slice[0..slice.bytes_total()]`.
797 : ///
798 : /// The returned `Slice<Buf>` is equivalent to the input `slice`, i.e., it's the same view into the same buffer.
799 765624 : pub async fn read_exact_at<Buf>(
800 765624 : &self,
801 765624 : slice: Slice<Buf>,
802 765624 : offset: u64,
803 765624 : ctx: &RequestContext,
804 765624 : ) -> Result<Slice<Buf>, Error>
805 765624 : where
806 765624 : Buf: IoBufAlignedMut + Send,
807 765624 : {
808 765624 : let assert_we_return_original_bounds = if cfg!(debug_assertions) {
809 765624 : Some((slice.stable_ptr() as usize, slice.bytes_total()))
810 : } else {
811 0 : None
812 : };
813 :
814 765624 : let original_bounds = slice.bounds();
815 765624 : let (buf, res) =
816 870821 : read_exact_at_impl(slice, offset, |buf, offset| self.read_at(buf, offset, ctx)).await;
817 765624 : let res = res.map(|_| buf.slice(original_bounds));
818 :
819 765624 : if let Some(original_bounds) = assert_we_return_original_bounds {
820 765624 : if let Ok(slice) = &res {
821 765624 : let returned_bounds = (slice.stable_ptr() as usize, slice.bytes_total());
822 765624 : assert_eq!(original_bounds, returned_bounds);
823 0 : }
824 0 : }
825 :
826 765624 : res
827 765624 : }
828 :
829 : /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`].
830 32131 : pub async fn read_exact_at_page(
831 32131 : &self,
832 32131 : page: PageWriteGuard<'static>,
833 32131 : offset: u64,
834 32131 : ctx: &RequestContext,
835 32131 : ) -> Result<PageWriteGuard<'static>, Error> {
836 32131 : let buf = PageWriteGuardBuf { page }.slice_full();
837 32131 : debug_assert_eq!(buf.bytes_total(), PAGE_SZ);
838 32131 : self.read_exact_at(buf, offset, ctx)
839 21861 : .await
840 32131 : .map(|slice| slice.into_inner().page)
841 32131 : }
842 :
843 : // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235
844 4 : pub async fn write_all_at<Buf: IoBuf + Send>(
845 4 : &self,
846 4 : buf: FullSlice<Buf>,
847 4 : mut offset: u64,
848 4 : ctx: &RequestContext,
849 4 : ) -> (FullSlice<Buf>, Result<(), Error>) {
850 4 : let buf = buf.into_raw_slice();
851 4 : let bounds = buf.bounds();
852 4 : let restore =
853 4 : |buf: Slice<_>| FullSlice::must_new(Slice::from_buf_bounds(buf.into_inner(), bounds));
854 4 : let mut buf = buf;
855 8 : while !buf.is_empty() {
856 4 : let (tmp, res) = self.write_at(FullSlice::must_new(buf), offset, ctx).await;
857 4 : buf = tmp.into_raw_slice();
858 0 : match res {
859 : Ok(0) => {
860 0 : return (
861 0 : restore(buf),
862 0 : Err(Error::new(
863 0 : std::io::ErrorKind::WriteZero,
864 0 : "failed to write whole buffer",
865 0 : )),
866 0 : );
867 : }
868 4 : Ok(n) => {
869 4 : buf = buf.slice(n..);
870 4 : offset += n as u64;
871 4 : }
872 0 : Err(e) if e.kind() == std::io::ErrorKind::Interrupted => {}
873 0 : Err(e) => return (restore(buf), Err(e)),
874 : }
875 : }
876 4 : (restore(buf), Ok(()))
877 4 : }
878 :
879 : /// Writes `buf` to the file at the current offset.
880 : ///
881 : /// Panics if there is an uninitialized range in `buf`, as that is most likely a bug in the caller.
882 1136202 : pub async fn write_all<Buf: IoBuf + Send>(
883 1136202 : &mut self,
884 1136202 : buf: FullSlice<Buf>,
885 1136202 : ctx: &RequestContext,
886 1136202 : ) -> (FullSlice<Buf>, Result<usize, Error>) {
887 1136202 : let buf = buf.into_raw_slice();
888 1136202 : let bounds = buf.bounds();
889 1136202 : let restore =
890 1136202 : |buf: Slice<_>| FullSlice::must_new(Slice::from_buf_bounds(buf.into_inner(), bounds));
891 1136202 : let nbytes = buf.len();
892 1136202 : let mut buf = buf;
893 2272364 : while !buf.is_empty() {
894 1136164 : let (tmp, res) = self.write(FullSlice::must_new(buf), ctx).await;
895 1136164 : buf = tmp.into_raw_slice();
896 2 : match res {
897 : Ok(0) => {
898 0 : return (
899 0 : restore(buf),
900 0 : Err(Error::new(
901 0 : std::io::ErrorKind::WriteZero,
902 0 : "failed to write whole buffer",
903 0 : )),
904 0 : );
905 : }
906 1136162 : Ok(n) => {
907 1136162 : buf = buf.slice(n..);
908 1136162 : }
909 2 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
910 2 : Err(e) => return (restore(buf), Err(e)),
911 : }
912 : }
913 1136200 : (restore(buf), Ok(nbytes))
914 1136202 : }
915 :
916 1136164 : async fn write<B: IoBuf + Send>(
917 1136164 : &mut self,
918 1136164 : buf: FullSlice<B>,
919 1136164 : ctx: &RequestContext,
920 1136164 : ) -> (FullSlice<B>, Result<usize, std::io::Error>) {
921 1136164 : let pos = self.pos;
922 1136164 : let (buf, res) = self.write_at(buf, pos, ctx).await;
923 1136164 : let n = match res {
924 1136162 : Ok(n) => n,
925 2 : Err(e) => return (buf, Err(e)),
926 : };
927 1136162 : self.pos += n as u64;
928 1136162 : (buf, Ok(n))
929 1136164 : }
930 :
931 766068 : pub(crate) async fn read_at<Buf>(
932 766068 : &self,
933 766068 : buf: tokio_epoll_uring::Slice<Buf>,
934 766068 : offset: u64,
935 766068 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
936 766068 : ) -> (tokio_epoll_uring::Slice<Buf>, Result<usize, Error>)
937 766068 : where
938 766068 : Buf: tokio_epoll_uring::IoBufMut + Send,
939 766068 : {
940 766068 : let file_guard = match self.lock_file().await {
941 766068 : Ok(file_guard) => file_guard,
942 0 : Err(e) => return (buf, Err(e)),
943 : };
944 :
945 766068 : observe_duration!(StorageIoOperation::Read, {
946 766068 : let ((_file_guard, buf), res) = io_engine::get().read_at(file_guard, offset, buf).await;
947 766068 : if let Ok(size) = res {
948 766066 : STORAGE_IO_SIZE
949 766066 : .with_label_values(&[
950 766066 : "read",
951 766066 : &self.tenant_id,
952 766066 : &self.shard_id,
953 766066 : &self.timeline_id,
954 766066 : ])
955 766066 : .add(size as i64);
956 766066 : }
957 766068 : (buf, res)
958 : })
959 766068 : }
960 :
961 : /// The function aborts the process if the error is fatal.
962 1136168 : async fn write_at<B: IoBuf + Send>(
963 1136168 : &self,
964 1136168 : buf: FullSlice<B>,
965 1136168 : offset: u64,
966 1136168 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
967 1136168 : ) -> (FullSlice<B>, Result<usize, Error>) {
968 1136168 : let (slice, result) = self.write_at_inner(buf, offset, _ctx).await;
969 1136168 : let result = result.maybe_fatal_err("write_at");
970 1136168 : (slice, result)
971 1136168 : }
972 :
973 1136168 : async fn write_at_inner<B: IoBuf + Send>(
974 1136168 : &self,
975 1136168 : buf: FullSlice<B>,
976 1136168 : offset: u64,
977 1136168 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
978 1136168 : ) -> (FullSlice<B>, Result<usize, Error>) {
979 1136168 : let file_guard = match self.lock_file().await {
980 1136168 : Ok(file_guard) => file_guard,
981 0 : Err(e) => return (buf, Err(e)),
982 : };
983 1136168 : observe_duration!(StorageIoOperation::Write, {
984 1136168 : let ((_file_guard, buf), result) =
985 1136168 : io_engine::get().write_at(file_guard, offset, buf).await;
986 1136168 : if let Ok(size) = result {
987 1136166 : STORAGE_IO_SIZE
988 1136166 : .with_label_values(&[
989 1136166 : "write",
990 1136166 : &self.tenant_id,
991 1136166 : &self.shard_id,
992 1136166 : &self.timeline_id,
993 1136166 : ])
994 1136166 : .add(size as i64);
995 1136166 : }
996 1136168 : (buf, result)
997 : })
998 1136168 : }
999 : }
1000 :
1001 : // Adapted from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135
1002 765632 : pub async fn read_exact_at_impl<Buf, F, Fut>(
1003 765632 : mut buf: tokio_epoll_uring::Slice<Buf>,
1004 765632 : mut offset: u64,
1005 765632 : mut read_at: F,
1006 765632 : ) -> (Buf, std::io::Result<()>)
1007 765632 : where
1008 765632 : Buf: IoBufMut + Send,
1009 765632 : F: FnMut(tokio_epoll_uring::Slice<Buf>, u64) -> Fut,
1010 765632 : Fut: std::future::Future<Output = (tokio_epoll_uring::Slice<Buf>, std::io::Result<usize>)>,
1011 765632 : {
1012 1531266 : while buf.bytes_total() != 0 {
1013 : let res;
1014 870821 : (buf, res) = read_at(buf, offset).await;
1015 0 : match res {
1016 2 : Ok(0) => break,
1017 765634 : Ok(n) => {
1018 765634 : buf = buf.slice(n..);
1019 765634 : offset += n as u64;
1020 765634 : }
1021 0 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
1022 0 : Err(e) => return (buf.into_inner(), Err(e)),
1023 : }
1024 : }
1025 : // NB: don't use `buf.is_empty()` here; it is from the
1026 : // `impl Deref for Slice { Target = [u8] }`; the &[u8]
1027 : // returned by it only covers the initialized portion of `buf`.
1028 : // Whereas we're interested in ensuring that we filled the entire
1029 : // buffer that the user passed in.
1030 765632 : if buf.bytes_total() != 0 {
1031 2 : (
1032 2 : buf.into_inner(),
1033 2 : Err(std::io::Error::new(
1034 2 : std::io::ErrorKind::UnexpectedEof,
1035 2 : "failed to fill whole buffer",
1036 2 : )),
1037 2 : )
1038 : } else {
1039 765630 : assert_eq!(buf.len(), buf.bytes_total());
1040 765630 : (buf.into_inner(), Ok(()))
1041 : }
1042 765632 : }
1043 :
1044 : #[cfg(test)]
1045 : mod test_read_exact_at_impl {
1046 :
1047 : use std::{collections::VecDeque, sync::Arc};
1048 :
1049 : use tokio_epoll_uring::{BoundedBuf, BoundedBufMut};
1050 :
1051 : use super::read_exact_at_impl;
1052 :
1053 : struct Expectation {
1054 : offset: u64,
1055 : bytes_total: usize,
1056 : result: std::io::Result<Vec<u8>>,
1057 : }
1058 : struct MockReadAt {
1059 : expectations: VecDeque<Expectation>,
1060 : }
1061 :
1062 : impl MockReadAt {
1063 12 : async fn read_at(
1064 12 : &mut self,
1065 12 : mut buf: tokio_epoll_uring::Slice<Vec<u8>>,
1066 12 : offset: u64,
1067 12 : ) -> (tokio_epoll_uring::Slice<Vec<u8>>, std::io::Result<usize>) {
1068 12 : let exp = self
1069 12 : .expectations
1070 12 : .pop_front()
1071 12 : .expect("read_at called but we have no expectations left");
1072 12 : assert_eq!(exp.offset, offset);
1073 12 : assert_eq!(exp.bytes_total, buf.bytes_total());
1074 12 : match exp.result {
1075 12 : Ok(bytes) => {
1076 12 : assert!(bytes.len() <= buf.bytes_total());
1077 12 : buf.put_slice(&bytes);
1078 12 : (buf, Ok(bytes.len()))
1079 : }
1080 0 : Err(e) => (buf, Err(e)),
1081 : }
1082 12 : }
1083 : }
1084 :
1085 : impl Drop for MockReadAt {
1086 8 : fn drop(&mut self) {
1087 8 : assert_eq!(self.expectations.len(), 0);
1088 8 : }
1089 : }
1090 :
1091 : #[tokio::test]
1092 2 : async fn test_basic() {
1093 2 : let buf = Vec::with_capacity(5).slice_full();
1094 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1095 2 : expectations: VecDeque::from(vec![Expectation {
1096 2 : offset: 0,
1097 2 : bytes_total: 5,
1098 2 : result: Ok(vec![b'a', b'b', b'c', b'd', b'e']),
1099 2 : }]),
1100 2 : }));
1101 2 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1102 2 : let mock_read_at = Arc::clone(&mock_read_at);
1103 2 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1104 2 : })
1105 2 : .await;
1106 2 : assert!(res.is_ok());
1107 2 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd', b'e']);
1108 2 : }
1109 :
1110 : #[tokio::test]
1111 2 : async fn test_empty_buf_issues_no_syscall() {
1112 2 : let buf = Vec::new().slice_full();
1113 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1114 2 : expectations: VecDeque::new(),
1115 2 : }));
1116 2 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1117 0 : let mock_read_at = Arc::clone(&mock_read_at);
1118 2 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1119 2 : })
1120 2 : .await;
1121 2 : assert!(res.is_ok());
1122 2 : }
1123 :
1124 : #[tokio::test]
1125 2 : async fn test_two_read_at_calls_needed_until_buf_filled() {
1126 2 : let buf = Vec::with_capacity(4).slice_full();
1127 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1128 2 : expectations: VecDeque::from(vec![
1129 2 : Expectation {
1130 2 : offset: 0,
1131 2 : bytes_total: 4,
1132 2 : result: Ok(vec![b'a', b'b']),
1133 2 : },
1134 2 : Expectation {
1135 2 : offset: 2,
1136 2 : bytes_total: 2,
1137 2 : result: Ok(vec![b'c', b'd']),
1138 2 : },
1139 2 : ]),
1140 2 : }));
1141 4 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1142 4 : let mock_read_at = Arc::clone(&mock_read_at);
1143 4 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1144 4 : })
1145 2 : .await;
1146 2 : assert!(res.is_ok());
1147 2 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd']);
1148 2 : }
1149 :
1150 : #[tokio::test]
1151 2 : async fn test_eof_before_buffer_full() {
1152 2 : let buf = Vec::with_capacity(3).slice_full();
1153 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1154 2 : expectations: VecDeque::from(vec![
1155 2 : Expectation {
1156 2 : offset: 0,
1157 2 : bytes_total: 3,
1158 2 : result: Ok(vec![b'a']),
1159 2 : },
1160 2 : Expectation {
1161 2 : offset: 1,
1162 2 : bytes_total: 2,
1163 2 : result: Ok(vec![b'b']),
1164 2 : },
1165 2 : Expectation {
1166 2 : offset: 2,
1167 2 : bytes_total: 1,
1168 2 : result: Ok(vec![]),
1169 2 : },
1170 2 : ]),
1171 2 : }));
1172 6 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1173 6 : let mock_read_at = Arc::clone(&mock_read_at);
1174 6 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1175 6 : })
1176 2 : .await;
1177 2 : let Err(err) = res else {
1178 2 : panic!("should return an error");
1179 2 : };
1180 2 : assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof);
1181 2 : assert_eq!(format!("{err}"), "failed to fill whole buffer");
1182 2 : // buffer contents on error are unspecified
1183 2 : }
1184 : }
1185 :
1186 : struct FileGuard {
1187 : slot_guard: RwLockReadGuard<'static, SlotInner>,
1188 : }
1189 :
1190 : impl AsRef<OwnedFd> for FileGuard {
1191 1906707 : fn as_ref(&self) -> &OwnedFd {
1192 1906707 : // This unwrap is safe because we only create `FileGuard`s
1193 1906707 : // if we know that the file is Some.
1194 1906707 : self.slot_guard.file.as_ref().unwrap()
1195 1906707 : }
1196 : }
1197 :
1198 : impl FileGuard {
1199 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
1200 953315 : fn with_std_file<F, R>(&self, with: F) -> R
1201 953315 : where
1202 953315 : F: FnOnce(&File) -> R,
1203 953315 : {
1204 953315 : // SAFETY:
1205 953315 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
1206 953315 : // - `&` usage below: `self` is `&`, hence Rust typesystem guarantees there are is no `&mut`
1207 953315 : let file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
1208 953315 : let res = with(&file);
1209 953315 : let _ = file.into_raw_fd();
1210 953315 : res
1211 953315 : }
1212 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
1213 4 : fn with_std_file_mut<F, R>(&mut self, with: F) -> R
1214 4 : where
1215 4 : F: FnOnce(&mut File) -> R,
1216 4 : {
1217 4 : // SAFETY:
1218 4 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
1219 4 : // - &mut usage below: `self` is `&mut`, hence this call is the only task/thread that has control over the underlying fd
1220 4 : let mut file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
1221 4 : let res = with(&mut file);
1222 4 : let _ = file.into_raw_fd();
1223 4 : res
1224 4 : }
1225 : }
1226 :
1227 : impl tokio_epoll_uring::IoFd for FileGuard {
1228 953388 : unsafe fn as_fd(&self) -> RawFd {
1229 953388 : let owned_fd: &OwnedFd = self.as_ref();
1230 953388 : owned_fd.as_raw_fd()
1231 953388 : }
1232 : }
1233 :
1234 : #[cfg(test)]
1235 : impl VirtualFile {
1236 20916 : pub(crate) async fn read_blk(
1237 20916 : &self,
1238 20916 : blknum: u32,
1239 20916 : ctx: &RequestContext,
1240 20916 : ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
1241 20916 : self.inner.read_blk(blknum, ctx).await
1242 20916 : }
1243 :
1244 224 : async fn read_to_end(&mut self, buf: &mut Vec<u8>, ctx: &RequestContext) -> Result<(), Error> {
1245 226 : self.inner.read_to_end(buf, ctx).await
1246 224 : }
1247 : }
1248 :
1249 : #[cfg(test)]
1250 : impl VirtualFileInner {
1251 20916 : pub(crate) async fn read_blk(
1252 20916 : &self,
1253 20916 : blknum: u32,
1254 20916 : ctx: &RequestContext,
1255 20916 : ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
1256 : use crate::page_cache::PAGE_SZ;
1257 20916 : let slice = IoBufferMut::with_capacity(PAGE_SZ).slice_full();
1258 20916 : assert_eq!(slice.bytes_total(), PAGE_SZ);
1259 20916 : let slice = self
1260 20916 : .read_exact_at(slice, blknum as u64 * (PAGE_SZ as u64), ctx)
1261 10619 : .await?;
1262 20916 : Ok(crate::tenant::block_io::BlockLease::IoBufferMut(
1263 20916 : slice.into_inner(),
1264 20916 : ))
1265 20916 : }
1266 :
1267 224 : async fn read_to_end(&mut self, buf: &mut Vec<u8>, ctx: &RequestContext) -> Result<(), Error> {
1268 224 : let mut tmp = vec![0; 128];
1269 : loop {
1270 444 : let slice = tmp.slice(..128);
1271 444 : let (slice, res) = self.read_at(slice, self.pos, ctx).await;
1272 2 : match res {
1273 222 : Ok(0) => return Ok(()),
1274 220 : Ok(n) => {
1275 220 : self.pos += n as u64;
1276 220 : buf.extend_from_slice(&slice[..n]);
1277 220 : }
1278 2 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
1279 2 : Err(e) => return Err(e),
1280 : }
1281 220 : tmp = slice.into_inner();
1282 : }
1283 224 : }
1284 : }
1285 :
1286 : impl Drop for VirtualFileInner {
1287 : /// If a VirtualFile is dropped, close the underlying file if it was open.
1288 5116 : fn drop(&mut self) {
1289 5116 : let handle = self.handle.get_mut();
1290 :
1291 5116 : fn clean_slot(slot: &Slot, mut slot_guard: RwLockWriteGuard<'_, SlotInner>, tag: u64) {
1292 5116 : if slot_guard.tag == tag {
1293 4556 : slot.recently_used.store(false, Ordering::Relaxed);
1294 : // there is also operation "close-by-replace" for closes done on eviction for
1295 : // comparison.
1296 4556 : if let Some(fd) = slot_guard.file.take() {
1297 4556 : STORAGE_IO_TIME_METRIC
1298 4556 : .get(StorageIoOperation::Close)
1299 4556 : .observe_closure_duration(|| drop(fd));
1300 4556 : }
1301 560 : }
1302 5116 : }
1303 :
1304 : // We don't have async drop so we cannot directly await the lock here.
1305 : // Instead, first do a best-effort attempt at closing the underlying
1306 : // file descriptor by using `try_write`, and if that fails, spawn
1307 : // a tokio task to do it asynchronously: we just want it to be
1308 : // cleaned up eventually.
1309 : // Most of the time, the `try_lock` should succeed though,
1310 : // as we have `&mut self` access. In other words, if the slot
1311 : // is still occupied by our file, there should be no access from
1312 : // other I/O operations; the only other possible place to lock
1313 : // the slot is the lock algorithm looking for free slots.
1314 5116 : let slot = &get_open_files().slots[handle.index];
1315 5116 : if let Ok(slot_guard) = slot.inner.try_write() {
1316 5116 : clean_slot(slot, slot_guard, handle.tag);
1317 5116 : } else {
1318 0 : let tag = handle.tag;
1319 0 : tokio::spawn(async move {
1320 0 : let slot_guard = slot.inner.write().await;
1321 0 : clean_slot(slot, slot_guard, tag);
1322 0 : });
1323 0 : };
1324 5116 : }
1325 : }
1326 :
1327 : impl OwnedAsyncWriter for VirtualFile {
1328 : #[inline(always)]
1329 6597 : async fn write_all<Buf: IoBuf + Send>(
1330 6597 : &mut self,
1331 6597 : buf: FullSlice<Buf>,
1332 6597 : ctx: &RequestContext,
1333 6597 : ) -> std::io::Result<(usize, FullSlice<Buf>)> {
1334 6597 : let (buf, res) = VirtualFile::write_all(self, buf, ctx).await;
1335 6597 : res.map(move |v| (v, buf))
1336 6597 : }
1337 : }
1338 :
1339 : impl OpenFiles {
1340 202 : fn new(num_slots: usize) -> OpenFiles {
1341 202 : let mut slots = Box::new(Vec::with_capacity(num_slots));
1342 2020 : for _ in 0..num_slots {
1343 2020 : let slot = Slot {
1344 2020 : recently_used: AtomicBool::new(false),
1345 2020 : inner: RwLock::new(SlotInner { tag: 0, file: None }),
1346 2020 : };
1347 2020 : slots.push(slot);
1348 2020 : }
1349 :
1350 202 : OpenFiles {
1351 202 : next: AtomicUsize::new(0),
1352 202 : slots: Box::leak(slots),
1353 202 : }
1354 202 : }
1355 : }
1356 :
1357 : ///
1358 : /// Initialize the virtual file module. This must be called once at page
1359 : /// server startup.
1360 : ///
1361 : #[cfg(not(test))]
1362 0 : pub fn init(num_slots: usize, engine: IoEngineKind, mode: IoMode, sync_mode: SyncMode) {
1363 0 : if OPEN_FILES.set(OpenFiles::new(num_slots)).is_err() {
1364 0 : panic!("virtual_file::init called twice");
1365 0 : }
1366 0 : set_io_mode(mode);
1367 0 : io_engine::init(engine);
1368 0 : SYNC_MODE.store(sync_mode as u8, std::sync::atomic::Ordering::Relaxed);
1369 0 : crate::metrics::virtual_file_descriptor_cache::SIZE_MAX.set(num_slots as u64);
1370 0 : }
1371 :
1372 : const TEST_MAX_FILE_DESCRIPTORS: usize = 10;
1373 :
1374 : // Get a handle to the global slots array.
1375 1917742 : fn get_open_files() -> &'static OpenFiles {
1376 1917742 : //
1377 1917742 : // In unit tests, page server startup doesn't happen and no one calls
1378 1917742 : // virtual_file::init(). Initialize it here, with a small array.
1379 1917742 : //
1380 1917742 : // This applies to the virtual file tests below, but all other unit
1381 1917742 : // tests too, so the virtual file facility is always usable in
1382 1917742 : // unit tests.
1383 1917742 : //
1384 1917742 : if cfg!(test) {
1385 1917742 : OPEN_FILES.get_or_init(|| OpenFiles::new(TEST_MAX_FILE_DESCRIPTORS))
1386 : } else {
1387 0 : OPEN_FILES.get().expect("virtual_file::init not called yet")
1388 : }
1389 1917742 : }
1390 :
1391 : /// Gets the io buffer alignment.
1392 0 : pub(crate) const fn get_io_buffer_alignment() -> usize {
1393 0 : DEFAULT_IO_BUFFER_ALIGNMENT
1394 0 : }
1395 :
1396 : pub(crate) type IoBufferMut = AlignedBufferMut<ConstAlign<{ get_io_buffer_alignment() }>>;
1397 : pub(crate) type IoBuffer = AlignedBuffer<ConstAlign<{ get_io_buffer_alignment() }>>;
1398 : pub(crate) type IoPageSlice<'a> =
1399 : AlignedSlice<'a, PAGE_SZ, ConstAlign<{ get_io_buffer_alignment() }>>;
1400 :
1401 : static IO_MODE: AtomicU8 = AtomicU8::new(IoMode::preferred() as u8);
1402 :
1403 0 : pub(crate) fn set_io_mode(mode: IoMode) {
1404 0 : IO_MODE.store(mode as u8, std::sync::atomic::Ordering::Relaxed);
1405 0 : }
1406 :
1407 1190 : pub(crate) fn get_io_mode() -> IoMode {
1408 1190 : IoMode::try_from(IO_MODE.load(Ordering::Relaxed)).unwrap()
1409 1190 : }
1410 :
1411 : static SYNC_MODE: AtomicU8 = AtomicU8::new(SyncMode::Sync as u8);
1412 :
1413 : #[cfg(test)]
1414 : mod tests {
1415 : use crate::context::DownloadBehavior;
1416 : use crate::task_mgr::TaskKind;
1417 :
1418 : use super::*;
1419 : use owned_buffers_io::io_buf_ext::IoBufExt;
1420 : use owned_buffers_io::slice::SliceMutExt;
1421 : use rand::seq::SliceRandom;
1422 : use rand::thread_rng;
1423 : use rand::Rng;
1424 : use std::io::Write;
1425 : use std::os::unix::fs::FileExt;
1426 : use std::sync::Arc;
1427 :
1428 : enum MaybeVirtualFile {
1429 : VirtualFile(VirtualFile),
1430 : File(File),
1431 : }
1432 :
1433 : impl From<VirtualFile> for MaybeVirtualFile {
1434 6 : fn from(vf: VirtualFile) -> Self {
1435 6 : MaybeVirtualFile::VirtualFile(vf)
1436 6 : }
1437 : }
1438 :
1439 : impl MaybeVirtualFile {
1440 404 : async fn read_exact_at(
1441 404 : &self,
1442 404 : mut slice: tokio_epoll_uring::Slice<IoBufferMut>,
1443 404 : offset: u64,
1444 404 : ctx: &RequestContext,
1445 404 : ) -> Result<tokio_epoll_uring::Slice<IoBufferMut>, Error> {
1446 404 : match self {
1447 203 : MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(slice, offset, ctx).await,
1448 202 : MaybeVirtualFile::File(file) => {
1449 202 : let rust_slice: &mut [u8] = slice.as_mut_rust_slice_full_zeroed();
1450 202 : file.read_exact_at(rust_slice, offset).map(|()| slice)
1451 : }
1452 : }
1453 404 : }
1454 8 : async fn write_all_at<Buf: IoBuf + Send>(
1455 8 : &self,
1456 8 : buf: FullSlice<Buf>,
1457 8 : offset: u64,
1458 8 : ctx: &RequestContext,
1459 8 : ) -> Result<(), Error> {
1460 8 : match self {
1461 4 : MaybeVirtualFile::VirtualFile(file) => {
1462 4 : let (_buf, res) = file.write_all_at(buf, offset, ctx).await;
1463 4 : res
1464 : }
1465 4 : MaybeVirtualFile::File(file) => file.write_all_at(&buf[..], offset),
1466 : }
1467 8 : }
1468 36 : async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
1469 36 : match self {
1470 18 : MaybeVirtualFile::VirtualFile(file) => file.seek(pos).await,
1471 18 : MaybeVirtualFile::File(file) => file.seek(pos),
1472 : }
1473 36 : }
1474 8 : async fn write_all<Buf: IoBuf + Send>(
1475 8 : &mut self,
1476 8 : buf: FullSlice<Buf>,
1477 8 : ctx: &RequestContext,
1478 8 : ) -> Result<(), Error> {
1479 8 : match self {
1480 4 : MaybeVirtualFile::VirtualFile(file) => {
1481 4 : let (_buf, res) = file.write_all(buf, ctx).await;
1482 4 : res.map(|_| ())
1483 : }
1484 4 : MaybeVirtualFile::File(file) => file.write_all(&buf[..]),
1485 : }
1486 8 : }
1487 :
1488 : // Helper function to slurp contents of a file, starting at the current position,
1489 : // into a string
1490 442 : async fn read_string(&mut self, ctx: &RequestContext) -> Result<String, Error> {
1491 : use std::io::Read;
1492 442 : let mut buf = String::new();
1493 442 : match self {
1494 224 : MaybeVirtualFile::VirtualFile(file) => {
1495 224 : let mut buf = Vec::new();
1496 226 : file.read_to_end(&mut buf, ctx).await?;
1497 222 : return Ok(String::from_utf8(buf).unwrap());
1498 : }
1499 218 : MaybeVirtualFile::File(file) => {
1500 218 : file.read_to_string(&mut buf)?;
1501 : }
1502 : }
1503 216 : Ok(buf)
1504 442 : }
1505 :
1506 : // Helper function to slurp a portion of a file into a string
1507 404 : async fn read_string_at(
1508 404 : &mut self,
1509 404 : pos: u64,
1510 404 : len: usize,
1511 404 : ctx: &RequestContext,
1512 404 : ) -> Result<String, Error> {
1513 404 : let slice = IoBufferMut::with_capacity(len).slice_full();
1514 404 : assert_eq!(slice.bytes_total(), len);
1515 404 : let slice = self.read_exact_at(slice, pos, ctx).await?;
1516 404 : let buf = slice.into_inner();
1517 404 : assert_eq!(buf.len(), len);
1518 :
1519 404 : Ok(String::from_utf8(buf.to_vec()).unwrap())
1520 404 : }
1521 : }
1522 :
1523 : #[tokio::test]
1524 2 : async fn test_virtual_files() -> anyhow::Result<()> {
1525 2 : // The real work is done in the test_files() helper function. This
1526 2 : // allows us to run the same set of tests against a native File, and
1527 2 : // VirtualFile. We trust the native Files and wouldn't need to test them,
1528 2 : // but this allows us to verify that the operations return the same
1529 2 : // results with VirtualFiles as with native Files. (Except that with
1530 2 : // native files, you will run out of file descriptors if the ulimit
1531 2 : // is low enough.)
1532 2 : struct A;
1533 2 :
1534 2 : impl Adapter for A {
1535 206 : async fn open(
1536 206 : path: Utf8PathBuf,
1537 206 : opts: OpenOptions,
1538 206 : ctx: &RequestContext,
1539 206 : ) -> Result<MaybeVirtualFile, anyhow::Error> {
1540 206 : let vf = VirtualFile::open_with_options(&path, &opts, ctx).await?;
1541 206 : Ok(MaybeVirtualFile::VirtualFile(vf))
1542 206 : }
1543 2 : }
1544 531 : test_files::<A>("virtual_files").await
1545 2 : }
1546 :
1547 : #[tokio::test]
1548 2 : async fn test_physical_files() -> anyhow::Result<()> {
1549 2 : struct B;
1550 2 :
1551 2 : impl Adapter for B {
1552 206 : async fn open(
1553 206 : path: Utf8PathBuf,
1554 206 : opts: OpenOptions,
1555 206 : _ctx: &RequestContext,
1556 206 : ) -> Result<MaybeVirtualFile, anyhow::Error> {
1557 2 : Ok(MaybeVirtualFile::File({
1558 206 : let owned_fd = opts.open(path.as_std_path()).await?;
1559 206 : File::from(owned_fd)
1560 2 : }))
1561 206 : }
1562 2 : }
1563 2 :
1564 104 : test_files::<B>("physical_files").await
1565 2 : }
1566 :
1567 : /// This is essentially a closure which returns a MaybeVirtualFile, but because rust edition
1568 : /// 2024 is not yet out with new lifetime capture or outlives rules, this is a async function
1569 : /// in trait which benefits from the new lifetime capture rules already.
1570 : trait Adapter {
1571 : async fn open(
1572 : path: Utf8PathBuf,
1573 : opts: OpenOptions,
1574 : ctx: &RequestContext,
1575 : ) -> Result<MaybeVirtualFile, anyhow::Error>;
1576 : }
1577 :
1578 4 : async fn test_files<A>(testname: &str) -> anyhow::Result<()>
1579 4 : where
1580 4 : A: Adapter,
1581 4 : {
1582 4 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1583 4 : let testdir = crate::config::PageServerConf::test_repo_dir(testname);
1584 4 : std::fs::create_dir_all(&testdir)?;
1585 :
1586 4 : let path_a = testdir.join("file_a");
1587 4 : let mut file_a = A::open(
1588 4 : path_a.clone(),
1589 4 : OpenOptions::new()
1590 4 : .write(true)
1591 4 : .create(true)
1592 4 : .truncate(true)
1593 4 : .to_owned(),
1594 4 : &ctx,
1595 4 : )
1596 4 : .await?;
1597 4 : file_a
1598 4 : .write_all(b"foobar".to_vec().slice_len(), &ctx)
1599 1 : .await?;
1600 :
1601 : // cannot read from a file opened in write-only mode
1602 4 : let _ = file_a.read_string(&ctx).await.unwrap_err();
1603 :
1604 : // Close the file and re-open for reading
1605 4 : let mut file_a = A::open(path_a, OpenOptions::new().read(true).to_owned(), &ctx).await?;
1606 :
1607 : // cannot write to a file opened in read-only mode
1608 4 : let _ = file_a
1609 4 : .write_all(b"bar".to_vec().slice_len(), &ctx)
1610 1 : .await
1611 4 : .unwrap_err();
1612 4 :
1613 4 : // Try simple read
1614 4 : assert_eq!("foobar", file_a.read_string(&ctx).await?);
1615 :
1616 : // It's positioned at the EOF now.
1617 4 : assert_eq!("", file_a.read_string(&ctx).await?);
1618 :
1619 : // Test seeks.
1620 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1621 4 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1622 :
1623 4 : assert_eq!(file_a.seek(SeekFrom::End(-2)).await?, 4);
1624 4 : assert_eq!("ar", file_a.read_string(&ctx).await?);
1625 :
1626 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1627 4 : assert_eq!(file_a.seek(SeekFrom::Current(2)).await?, 3);
1628 4 : assert_eq!("bar", file_a.read_string(&ctx).await?);
1629 :
1630 4 : assert_eq!(file_a.seek(SeekFrom::Current(-5)).await?, 1);
1631 4 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1632 :
1633 : // Test erroneous seeks to before byte 0
1634 4 : file_a.seek(SeekFrom::End(-7)).await.unwrap_err();
1635 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1636 4 : file_a.seek(SeekFrom::Current(-2)).await.unwrap_err();
1637 4 :
1638 4 : // the erroneous seek should have left the position unchanged
1639 4 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1640 :
1641 : // Create another test file, and try FileExt functions on it.
1642 4 : let path_b = testdir.join("file_b");
1643 4 : let mut file_b = A::open(
1644 4 : path_b.clone(),
1645 4 : OpenOptions::new()
1646 4 : .read(true)
1647 4 : .write(true)
1648 4 : .create(true)
1649 4 : .truncate(true)
1650 4 : .to_owned(),
1651 4 : &ctx,
1652 4 : )
1653 2 : .await?;
1654 4 : file_b
1655 4 : .write_all_at(b"BAR".to_vec().slice_len(), 3, &ctx)
1656 1 : .await?;
1657 4 : file_b
1658 4 : .write_all_at(b"FOO".to_vec().slice_len(), 0, &ctx)
1659 1 : .await?;
1660 :
1661 4 : assert_eq!(file_b.read_string_at(2, 3, &ctx).await?, "OBA");
1662 :
1663 : // Open a lot of files, enough to cause some evictions. (Or to be precise,
1664 : // open the same file many times. The effect is the same.)
1665 : //
1666 : // leave file_a positioned at offset 1 before we start
1667 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1668 :
1669 4 : let mut vfiles = Vec::new();
1670 404 : for _ in 0..100 {
1671 400 : let mut vfile = A::open(
1672 400 : path_b.clone(),
1673 400 : OpenOptions::new().read(true).to_owned(),
1674 400 : &ctx,
1675 400 : )
1676 200 : .await?;
1677 400 : assert_eq!("FOOBAR", vfile.read_string(&ctx).await?);
1678 400 : vfiles.push(vfile);
1679 : }
1680 :
1681 : // make sure we opened enough files to definitely cause evictions.
1682 4 : assert!(vfiles.len() > TEST_MAX_FILE_DESCRIPTORS * 2);
1683 :
1684 : // The underlying file descriptor for 'file_a' should be closed now. Try to read
1685 : // from it again. We left the file positioned at offset 1 above.
1686 4 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1687 :
1688 : // Check that all the other FDs still work too. Use them in random order for
1689 : // good measure.
1690 4 : vfiles.as_mut_slice().shuffle(&mut thread_rng());
1691 400 : for vfile in vfiles.iter_mut() {
1692 400 : assert_eq!("OOBAR", vfile.read_string_at(1, 5, &ctx).await?);
1693 : }
1694 :
1695 4 : Ok(())
1696 4 : }
1697 :
1698 : /// Test using VirtualFiles from many threads concurrently. This tests both using
1699 : /// a lot of VirtualFiles concurrently, causing evictions, and also using the same
1700 : /// VirtualFile from multiple threads concurrently.
1701 : #[tokio::test]
1702 2 : async fn test_vfile_concurrency() -> Result<(), Error> {
1703 2 : const SIZE: usize = 8 * 1024;
1704 2 : const VIRTUAL_FILES: usize = 100;
1705 2 : const THREADS: usize = 100;
1706 2 : const SAMPLE: [u8; SIZE] = [0xADu8; SIZE];
1707 2 :
1708 2 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1709 2 : let testdir = crate::config::PageServerConf::test_repo_dir("vfile_concurrency");
1710 2 : std::fs::create_dir_all(&testdir)?;
1711 2 :
1712 2 : // Create a test file.
1713 2 : let test_file_path = testdir.join("concurrency_test_file");
1714 2 : {
1715 2 : let file = File::create(&test_file_path)?;
1716 2 : file.write_all_at(&SAMPLE, 0)?;
1717 2 : }
1718 2 :
1719 2 : // Open the file many times.
1720 2 : let mut files = Vec::new();
1721 202 : for _ in 0..VIRTUAL_FILES {
1722 200 : let f = VirtualFileInner::open_with_options(
1723 200 : &test_file_path,
1724 200 : OpenOptions::new().read(true),
1725 200 : &ctx,
1726 200 : )
1727 101 : .await?;
1728 200 : files.push(f);
1729 2 : }
1730 2 : let files = Arc::new(files);
1731 2 :
1732 2 : // Launch many threads, and use the virtual files concurrently in random order.
1733 2 : let rt = tokio::runtime::Builder::new_multi_thread()
1734 2 : .worker_threads(THREADS)
1735 2 : .thread_name("test_vfile_concurrency thread")
1736 2 : .build()
1737 2 : .unwrap();
1738 2 : let mut hdls = Vec::new();
1739 202 : for _threadno in 0..THREADS {
1740 200 : let files = files.clone();
1741 200 : let ctx = ctx.detached_child(TaskKind::UnitTest, DownloadBehavior::Error);
1742 200 : let hdl = rt.spawn(async move {
1743 200 : let mut buf = IoBufferMut::with_capacity_zeroed(SIZE);
1744 200 : let mut rng = rand::rngs::OsRng;
1745 200000 : for _ in 1..1000 {
1746 199800 : let f = &files[rng.gen_range(0..files.len())];
1747 199800 : buf = f
1748 199800 : .read_exact_at(buf.slice_full(), 0, &ctx)
1749 577648 : .await
1750 199800 : .unwrap()
1751 199800 : .into_inner();
1752 199800 : assert!(buf[..] == SAMPLE);
1753 2 : }
1754 200 : });
1755 200 : hdls.push(hdl);
1756 200 : }
1757 202 : for hdl in hdls {
1758 200 : hdl.await?;
1759 2 : }
1760 2 : std::mem::forget(rt);
1761 2 :
1762 2 : Ok(())
1763 2 : }
1764 :
1765 : #[tokio::test]
1766 2 : async fn test_atomic_overwrite_basic() {
1767 2 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1768 2 : let testdir = crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_basic");
1769 2 : std::fs::create_dir_all(&testdir).unwrap();
1770 2 :
1771 2 : let path = testdir.join("myfile");
1772 2 : let tmp_path = testdir.join("myfile.tmp");
1773 2 :
1774 2 : VirtualFileInner::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1775 2 : .await
1776 2 : .unwrap();
1777 2 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1778 2 : let post = file.read_string(&ctx).await.unwrap();
1779 2 : assert_eq!(post, "foo");
1780 2 : assert!(!tmp_path.exists());
1781 2 : drop(file);
1782 2 :
1783 2 : VirtualFileInner::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"bar".to_vec())
1784 2 : .await
1785 2 : .unwrap();
1786 2 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1787 2 : let post = file.read_string(&ctx).await.unwrap();
1788 2 : assert_eq!(post, "bar");
1789 2 : assert!(!tmp_path.exists());
1790 2 : drop(file);
1791 2 : }
1792 :
1793 : #[tokio::test]
1794 2 : async fn test_atomic_overwrite_preexisting_tmp() {
1795 2 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1796 2 : let testdir =
1797 2 : crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_preexisting_tmp");
1798 2 : std::fs::create_dir_all(&testdir).unwrap();
1799 2 :
1800 2 : let path = testdir.join("myfile");
1801 2 : let tmp_path = testdir.join("myfile.tmp");
1802 2 :
1803 2 : std::fs::write(&tmp_path, "some preexisting junk that should be removed").unwrap();
1804 2 : assert!(tmp_path.exists());
1805 2 :
1806 2 : VirtualFileInner::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1807 2 : .await
1808 2 : .unwrap();
1809 2 :
1810 2 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1811 2 : let post = file.read_string(&ctx).await.unwrap();
1812 2 : assert_eq!(post, "foo");
1813 2 : assert!(!tmp_path.exists());
1814 2 : drop(file);
1815 2 : }
1816 : }
|