LCOV - code coverage report
Current view: top level - pageserver/src - virtual_file.rs (source / functions) Coverage Total Hit
Test: 32f4a56327bc9da697706839ed4836b2a00a408f.info Lines: 93.8 % 806 756
Test Date: 2024-02-07 07:37:29 Functions: 93.4 % 151 141

            Line data    Source code
       1              : //!
       2              : //! VirtualFile is like a normal File, but it's not bound directly to
       3              : //! a file descriptor. Instead, the file is opened when it's read from,
       4              : //! and if too many files are open globally in the system, least-recently
       5              : //! used ones are closed.
       6              : //!
       7              : //! To track which files have been recently used, we use the clock algorithm
       8              : //! with a 'recently_used' flag on each slot.
       9              : //!
      10              : //! This is similar to PostgreSQL's virtual file descriptor facility in
      11              : //! src/backend/storage/file/fd.c
      12              : //!
      13              : use crate::metrics::{StorageIoOperation, STORAGE_IO_SIZE, STORAGE_IO_TIME_METRIC};
      14              : 
      15              : use crate::page_cache::PageWriteGuard;
      16              : use crate::tenant::TENANTS_SEGMENT_NAME;
      17              : use camino::{Utf8Path, Utf8PathBuf};
      18              : use once_cell::sync::OnceCell;
      19              : use pageserver_api::shard::TenantShardId;
      20              : use std::fs::{self, File};
      21              : use std::io::{Error, ErrorKind, Seek, SeekFrom};
      22              : use tokio_epoll_uring::IoBufMut;
      23              : 
      24              : use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
      25              : use std::os::unix::fs::FileExt;
      26              : use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
      27              : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
      28              : use tokio::time::Instant;
      29              : use utils::fs_ext;
      30              : 
      31              : mod io_engine;
      32              : mod open_options;
      33              : pub use io_engine::IoEngineKind;
      34              : pub(crate) use open_options::*;
      35              : 
      36              : ///
      37              : /// A virtual file descriptor. You can use this just like std::fs::File, but internally
      38              : /// the underlying file is closed if the system is low on file descriptors,
      39              : /// and re-opened when it's accessed again.
      40              : ///
      41              : /// Like with std::fs::File, multiple threads can read/write the file concurrently,
      42              : /// holding just a shared reference the same VirtualFile, using the read_at() / write_at()
      43              : /// functions from the FileExt trait. But the functions from the Read/Write/Seek traits
      44              : /// require a mutable reference, because they modify the "current position".
      45              : ///
      46              : /// Each VirtualFile has a physical file descriptor in the global OPEN_FILES array, at the
      47              : /// slot that 'handle points to, if the underlying file is currently open. If it's not
      48              : /// currently open, the 'handle' can still point to the slot where it was last kept. The
      49              : /// 'tag' field is used to detect whether the handle still is valid or not.
      50              : ///
      51            0 : #[derive(Debug)]
      52              : pub struct VirtualFile {
      53              :     /// Lazy handle to the global file descriptor cache. The slot that this points to
      54              :     /// might contain our File, or it may be empty, or it may contain a File that
      55              :     /// belongs to a different VirtualFile.
      56              :     handle: RwLock<SlotHandle>,
      57              : 
      58              :     /// Current file position
      59              :     pos: u64,
      60              : 
      61              :     /// File path and options to use to open it.
      62              :     ///
      63              :     /// Note: this only contains the options needed to re-open it. For example,
      64              :     /// if a new file is created, we only pass the create flag when it's initially
      65              :     /// opened, in the VirtualFile::create() function, and strip the flag before
      66              :     /// storing it here.
      67              :     pub path: Utf8PathBuf,
      68              :     open_options: OpenOptions,
      69              : 
      70              :     // These are strings becase we only use them for metrics, and those expect strings.
      71              :     // It makes no sense for us to constantly turn the `TimelineId` and `TenantId` into
      72              :     // strings.
      73              :     tenant_id: String,
      74              :     shard_id: String,
      75              :     timeline_id: String,
      76              : }
      77              : 
      78       298020 : #[derive(Debug, PartialEq, Clone, Copy)]
      79              : struct SlotHandle {
      80              :     /// Index into OPEN_FILES.slots
      81              :     index: usize,
      82              : 
      83              :     /// Value of 'tag' in the slot. If slot's tag doesn't match, then the slot has
      84              :     /// been recycled and no longer contains the FD for this virtual file.
      85              :     tag: u64,
      86              : }
      87              : 
      88              : /// OPEN_FILES is the global array that holds the physical file descriptors that
      89              : /// are currently open. Each slot in the array is protected by a separate lock,
      90              : /// so that different files can be accessed independently. The lock must be held
      91              : /// in write mode to replace the slot with a different file, but a read mode
      92              : /// is enough to operate on the file, whether you're reading or writing to it.
      93              : ///
      94              : /// OPEN_FILES starts in uninitialized state, and it's initialized by
      95              : /// the virtual_file::init() function. It must be called exactly once at page
      96              : /// server startup.
      97              : static OPEN_FILES: OnceCell<OpenFiles> = OnceCell::new();
      98              : 
      99              : struct OpenFiles {
     100              :     slots: &'static [Slot],
     101              : 
     102              :     /// clock arm for the clock algorithm
     103              :     next: AtomicUsize,
     104              : }
     105              : 
     106              : struct Slot {
     107              :     inner: RwLock<SlotInner>,
     108              : 
     109              :     /// has this file been used since last clock sweep?
     110              :     recently_used: AtomicBool,
     111              : }
     112              : 
     113              : struct SlotInner {
     114              :     /// Counter that's incremented every time a different file is stored here.
     115              :     /// To avoid the ABA problem.
     116              :     tag: u64,
     117              : 
     118              :     /// the underlying file
     119              :     file: Option<OwnedFd>,
     120              : }
     121              : 
     122              : /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`].
     123              : struct PageWriteGuardBuf {
     124              :     page: PageWriteGuard<'static>,
     125              :     init_up_to: usize,
     126              : }
     127              : // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot,
     128              : // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved.
     129              : unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf {
     130      6296273 :     fn stable_ptr(&self) -> *const u8 {
     131      6296273 :         self.page.as_ptr()
     132      6296273 :     }
     133     18888819 :     fn bytes_init(&self) -> usize {
     134     18888819 :         self.init_up_to
     135     18888819 :     }
     136      6296273 :     fn bytes_total(&self) -> usize {
     137      6296273 :         self.page.len()
     138      6296273 :     }
     139              : }
     140              : // Safety: see above, plus: the ownership of [`PageWriteGuard`] means exclusive access,
     141              : // hence it's safe to hand out the `stable_mut_ptr()`.
     142              : unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf {
     143      6296273 :     fn stable_mut_ptr(&mut self) -> *mut u8 {
     144      6296273 :         self.page.as_mut_ptr()
     145      6296273 :     }
     146              : 
     147      6296273 :     unsafe fn set_init(&mut self, pos: usize) {
     148      6296273 :         assert!(pos <= self.page.len());
     149      6296273 :         self.init_up_to = pos;
     150      6296273 :     }
     151              : }
     152              : 
     153              : impl OpenFiles {
     154              :     /// Find a slot to use, evicting an existing file descriptor if needed.
     155              :     ///
     156              :     /// On return, we hold a lock on the slot, and its 'tag' has been updated
     157              :     /// recently_used has been set. It's all ready for reuse.
     158       284742 :     async fn find_victim_slot(&self) -> (SlotHandle, RwLockWriteGuard<SlotInner>) {
     159       284742 :         //
     160       284742 :         // Run the clock algorithm to find a slot to replace.
     161       284742 :         //
     162       284742 :         let num_slots = self.slots.len();
     163       284742 :         let mut retries = 0;
     164              :         let mut slot;
     165              :         let mut slot_guard;
     166              :         let index;
     167      2393041 :         loop {
     168      2393041 :             let next = self.next.fetch_add(1, Ordering::AcqRel) % num_slots;
     169      2393041 :             slot = &self.slots[next];
     170      2393041 : 
     171      2393041 :             // If the recently_used flag on this slot is set, continue the clock
     172      2393041 :             // sweep. Otherwise try to use this slot. If we cannot acquire the
     173      2393041 :             // lock, also continue the clock sweep.
     174      2393041 :             //
     175      2393041 :             // We only continue in this manner for a while, though. If we loop
     176      2393041 :             // through the array twice without finding a victim, just pick the
     177      2393041 :             // next slot and wait until we can reuse it. This way, we avoid
     178      2393041 :             // spinning in the extreme case that all the slots are busy with an
     179      2393041 :             // I/O operation.
     180      2393041 :             if retries < num_slots * 2 {
     181      2310796 :                 if !slot.recently_used.swap(false, Ordering::Release) {
     182      2039372 :                     if let Ok(guard) = slot.inner.try_write() {
     183       202497 :                         slot_guard = guard;
     184       202497 :                         index = next;
     185       202497 :                         break;
     186      1836875 :                     }
     187       271424 :                 }
     188      2108299 :                 retries += 1;
     189              :             } else {
     190        82245 :                 slot_guard = slot.inner.write().await;
     191        82245 :                 index = next;
     192        82245 :                 break;
     193              :             }
     194              :         }
     195              : 
     196              :         //
     197              :         // We now have the victim slot locked. If it was in use previously, close the
     198              :         // old file.
     199              :         //
     200       284742 :         if let Some(old_file) = slot_guard.file.take() {
     201       223737 :             // the normal path of dropping VirtualFile uses "close", use "close-by-replace" here to
     202       223737 :             // distinguish the two.
     203       223737 :             STORAGE_IO_TIME_METRIC
     204       223737 :                 .get(StorageIoOperation::CloseByReplace)
     205       223737 :                 .observe_closure_duration(|| drop(old_file));
     206       223737 :         }
     207              : 
     208              :         // Prepare the slot for reuse and return it
     209       284742 :         slot_guard.tag += 1;
     210       284742 :         slot.recently_used.store(true, Ordering::Relaxed);
     211       284742 :         (
     212       284742 :             SlotHandle {
     213       284742 :                 index,
     214       284742 :                 tag: slot_guard.tag,
     215       284742 :             },
     216       284742 :             slot_guard,
     217       284742 :         )
     218       284742 :     }
     219              : }
     220              : 
     221              : /// Identify error types that should alwways terminate the process.  Other
     222              : /// error types may be elegible for retry.
     223            0 : pub(crate) fn is_fatal_io_error(e: &std::io::Error) -> bool {
     224            0 :     use nix::errno::Errno::*;
     225            0 :     match e.raw_os_error().map(nix::errno::from_i32) {
     226              :         Some(EIO) => {
     227              :             // Terminate on EIO because we no longer trust the device to store
     228              :             // data safely, or to uphold persistence guarantees on fsync.
     229            0 :             true
     230              :         }
     231              :         Some(EROFS) => {
     232              :             // Terminate on EROFS because a filesystem is usually remounted
     233              :             // readonly when it has experienced some critical issue, so the same
     234              :             // logic as EIO applies.
     235            0 :             true
     236              :         }
     237              :         Some(EACCES) => {
     238              :             // Terminate on EACCESS because we should always have permissions
     239              :             // for our own data dir: if we don't, then we can't do our job and
     240              :             // need administrative intervention to fix permissions.  Terminating
     241              :             // is the best way to make sure we stop cleanly rather than going
     242              :             // into infinite retry loops, and will make it clear to the outside
     243              :             // world that we need help.
     244            0 :             true
     245              :         }
     246              :         _ => {
     247              :             // Treat all other local file I/O errors are retryable.  This includes:
     248              :             // - ENOSPC: we stay up and wait for eviction to free some space
     249              :             // - EINVAL, EBADF, EBADFD: this is a code bug, not a filesystem/hardware issue
     250              :             // - WriteZero, Interrupted: these are used internally VirtualFile
     251            0 :             false
     252              :         }
     253              :     }
     254            0 : }
     255              : 
     256              : /// Call this when the local filesystem gives us an error with an external
     257              : /// cause: this includes EIO, EROFS, and EACCESS: all these indicate either
     258              : /// bad storage or bad configuration, and we can't fix that from inside
     259              : /// a running process.
     260            0 : pub(crate) fn on_fatal_io_error(e: &std::io::Error, context: &str) -> ! {
     261            0 :     tracing::error!("Fatal I/O error: {e}: {context})");
     262            0 :     std::process::abort();
     263              : }
     264              : 
     265              : pub(crate) trait MaybeFatalIo<T> {
     266              :     fn maybe_fatal_err(self, context: &str) -> std::io::Result<T>;
     267              :     fn fatal_err(self, context: &str) -> T;
     268              : }
     269              : 
     270              : impl<T> MaybeFatalIo<T> for std::io::Result<T> {
     271              :     /// Terminate the process if the result is an error of a fatal type, else pass it through
     272              :     ///
     273              :     /// This is appropriate for writes, where we typically want to die on EIO/ACCES etc, but
     274              :     /// not on ENOSPC.
     275          119 :     fn maybe_fatal_err(self, context: &str) -> std::io::Result<T> {
     276          119 :         if let Err(e) = &self {
     277            0 :             if is_fatal_io_error(e) {
     278            0 :                 on_fatal_io_error(e, context);
     279            0 :             }
     280          119 :         }
     281          119 :         self
     282          119 :     }
     283              : 
     284              :     /// Terminate the process on any I/O error.
     285              :     ///
     286              :     /// This is appropriate for reads on files that we know exist: they should always work.
     287         1434 :     fn fatal_err(self, context: &str) -> T {
     288         1434 :         match self {
     289         1434 :             Ok(v) => v,
     290            0 :             Err(e) => {
     291            0 :                 on_fatal_io_error(&e, context);
     292              :             }
     293              :         }
     294         1434 :     }
     295              : }
     296              : 
     297              : /// Observe duration for the given storage I/O operation
     298              : ///
     299              : /// Unlike `observe_closure_duration`, this supports async,
     300              : /// where "support" means that we measure wall clock time.
     301              : macro_rules! observe_duration {
     302              :     ($op:expr, $($body:tt)*) => {{
     303              :         let instant = Instant::now();
     304              :         let result = $($body)*;
     305              :         let elapsed = instant.elapsed().as_secs_f64();
     306              :         STORAGE_IO_TIME_METRIC
     307              :             .get($op)
     308              :             .observe(elapsed);
     309              :         result
     310              :     }}
     311              : }
     312              : 
     313              : macro_rules! with_file {
     314              :     ($this:expr, $op:expr, | $ident:ident | $($body:tt)*) => {{
     315              :         let $ident = $this.lock_file().await?;
     316              :         observe_duration!($op, $($body)*)
     317              :     }};
     318              :     ($this:expr, $op:expr, | mut $ident:ident | $($body:tt)*) => {{
     319              :         let mut $ident = $this.lock_file().await?;
     320              :         observe_duration!($op, $($body)*)
     321              :     }};
     322              : }
     323              : 
     324              : impl VirtualFile {
     325              :     /// Open a file in read-only mode. Like File::open.
     326        34437 :     pub async fn open(path: &Utf8Path) -> Result<VirtualFile, std::io::Error> {
     327        34437 :         Self::open_with_options(path, OpenOptions::new().read(true)).await
     328        34437 :     }
     329              : 
     330              :     /// Create a new file for writing. If the file exists, it will be truncated.
     331              :     /// Like File::create.
     332        15942 :     pub async fn create(path: &Utf8Path) -> Result<VirtualFile, std::io::Error> {
     333        15942 :         Self::open_with_options(
     334        15942 :             path,
     335        15942 :             OpenOptions::new().write(true).create(true).truncate(true),
     336        15942 :         )
     337          233 :         .await
     338        15942 :     }
     339              : 
     340              :     /// Open a file with given options.
     341              :     ///
     342              :     /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt,
     343              :     /// they will be applied also when the file is subsequently re-opened, not only
     344              :     /// on the first time. Make sure that's sane!
     345        80227 :     pub async fn open_with_options(
     346        80227 :         path: &Utf8Path,
     347        80227 :         open_options: &OpenOptions,
     348        80227 :     ) -> Result<VirtualFile, std::io::Error> {
     349        80227 :         let path_str = path.to_string();
     350        80227 :         let parts = path_str.split('/').collect::<Vec<&str>>();
     351        80227 :         let (tenant_id, shard_id, timeline_id) =
     352        80227 :             if parts.len() > 5 && parts[parts.len() - 5] == TENANTS_SEGMENT_NAME {
     353        68903 :                 let tenant_shard_part = parts[parts.len() - 4];
     354        68903 :                 let (tenant_id, shard_id) = match tenant_shard_part.parse::<TenantShardId>() {
     355        68903 :                     Ok(tenant_shard_id) => (
     356        68903 :                         tenant_shard_id.tenant_id.to_string(),
     357        68903 :                         format!("{}", tenant_shard_id.shard_slug()),
     358        68903 :                     ),
     359              :                     Err(_) => {
     360              :                         // Malformed path: this ID is just for observability, so tolerate it
     361              :                         // and pass through
     362            0 :                         (tenant_shard_part.to_string(), "*".to_string())
     363              :                     }
     364              :                 };
     365        68903 :                 (tenant_id, shard_id, parts[parts.len() - 2].to_string())
     366              :             } else {
     367        11324 :                 ("*".to_string(), "*".to_string(), "*".to_string())
     368              :             };
     369        80227 :         let (handle, mut slot_guard) = get_open_files().find_victim_slot().await;
     370              : 
     371              :         // NB: there is also StorageIoOperation::OpenAfterReplace which is for the case
     372              :         // where our caller doesn't get to use the returned VirtualFile before its
     373              :         // slot gets re-used by someone else.
     374        80227 :         let file = observe_duration!(StorageIoOperation::Open, {
     375        80227 :             open_options.open(path.as_std_path()).await?
     376              :         });
     377              : 
     378              :         // Strip all options other than read and write.
     379              :         //
     380              :         // It would perhaps be nicer to check just for the read and write flags
     381              :         // explicitly, but OpenOptions doesn't contain any functions to read flags,
     382              :         // only to set them.
     383        80227 :         let mut reopen_options = open_options.clone();
     384        80227 :         reopen_options.create(false);
     385        80227 :         reopen_options.create_new(false);
     386        80227 :         reopen_options.truncate(false);
     387        80227 : 
     388        80227 :         let vfile = VirtualFile {
     389        80227 :             handle: RwLock::new(handle),
     390        80227 :             pos: 0,
     391        80227 :             path: path.to_path_buf(),
     392        80227 :             open_options: reopen_options,
     393        80227 :             tenant_id,
     394        80227 :             shard_id,
     395        80227 :             timeline_id,
     396        80227 :         };
     397        80227 : 
     398        80227 :         // TODO: Under pressure, it's likely the slot will get re-used and
     399        80227 :         // the underlying file closed before they get around to using it.
     400        80227 :         // => https://github.com/neondatabase/neon/issues/6065
     401        80227 :         slot_guard.file.replace(file);
     402        80227 : 
     403        80227 :         Ok(vfile)
     404        80227 :     }
     405              : 
     406              :     /// Writes a file to the specified `final_path` in a crash safe fasion
     407              :     ///
     408              :     /// The file is first written to the specified tmp_path, and in a second
     409              :     /// step, the tmp path is renamed to the final path. As renames are
     410              :     /// atomic, a crash during the write operation will never leave behind a
     411              :     /// partially written file.
     412         8849 :     pub async fn crashsafe_overwrite(
     413         8849 :         final_path: &Utf8Path,
     414         8849 :         tmp_path: &Utf8Path,
     415         8849 :         content: &[u8],
     416         8849 :     ) -> std::io::Result<()> {
     417         8849 :         let Some(final_path_parent) = final_path.parent() else {
     418            0 :             return Err(std::io::Error::from_raw_os_error(
     419            0 :                 nix::errno::Errno::EINVAL as i32,
     420            0 :             ));
     421              :         };
     422         8849 :         std::fs::remove_file(tmp_path).or_else(fs_ext::ignore_not_found)?;
     423         8849 :         let mut file = Self::open_with_options(
     424         8849 :             tmp_path,
     425         8849 :             OpenOptions::new()
     426         8849 :                 .write(true)
     427         8849 :                 // Use `create_new` so that, if we race with ourselves or something else,
     428         8849 :                 // we bail out instead of causing damage.
     429         8849 :                 .create_new(true),
     430         8849 :         )
     431          449 :         .await?;
     432         8849 :         file.write_all(content).await?;
     433         8849 :         file.sync_all().await?;
     434         8849 :         drop(file); // before the rename, that's important!
     435         8849 :                     // renames are atomic
     436         8849 :         std::fs::rename(tmp_path, final_path)?;
     437              :         // Only open final path parent dirfd now, so that this operation only
     438              :         // ever holds one VirtualFile fd at a time.  That's important because
     439              :         // the current `find_victim_slot` impl might pick the same slot for both
     440              :         // VirtualFile., and it eventually does a blocking write lock instead of
     441              :         // try_lock.
     442         8849 :         let final_parent_dirfd =
     443         8849 :             Self::open_with_options(final_path_parent, OpenOptions::new().read(true)).await?;
     444         8849 :         final_parent_dirfd.sync_all().await?;
     445         8849 :         Ok(())
     446         8849 :     }
     447              : 
     448              :     /// Call File::sync_all() on the underlying File.
     449        39642 :     pub async fn sync_all(&self) -> Result<(), Error> {
     450        39642 :         with_file!(self, StorageIoOperation::Fsync, |file_guard| file_guard
     451        39642 :             .with_std_file(|std_file| std_file.sync_all()))
     452        39642 :     }
     453              : 
     454        21944 :     pub async fn metadata(&self) -> Result<fs::Metadata, Error> {
     455        21944 :         with_file!(self, StorageIoOperation::Metadata, |file_guard| file_guard
     456        21944 :             .with_std_file(|std_file| std_file.metadata()))
     457        21944 :     }
     458              : 
     459              :     /// Helper function internal to `VirtualFile` that looks up the underlying File,
     460              :     /// opens it and evicts some other File if necessary. The passed parameter is
     461              :     /// assumed to be a function available for the physical `File`.
     462              :     ///
     463              :     /// We are doing it via a macro as Rust doesn't support async closures that
     464              :     /// take on parameters with lifetimes.
     465     15504468 :     async fn lock_file(&self) -> Result<FileGuard, Error> {
     466     15504447 :         let open_files = get_open_files();
     467              : 
     468       204515 :         let mut handle_guard = {
     469              :             // Read the cached slot handle, and see if the slot that it points to still
     470              :             // contains our File.
     471              :             //
     472              :             // We only need to hold the handle lock while we read the current handle. If
     473              :             // another thread closes the file and recycles the slot for a different file,
     474              :             // we will notice that the handle we read is no longer valid and retry.
     475     15504447 :             let mut handle = *self.handle.read().await;
     476     15597952 :             loop {
     477     15597952 :                 // Check if the slot contains our File
     478     15597952 :                 {
     479     15597952 :                     let slot = &open_files.slots[handle.index];
     480     15597952 :                     let slot_guard = slot.inner.read().await;
     481     15597952 :                     if slot_guard.tag == handle.tag && slot_guard.file.is_some() {
     482              :                         // Found a cached file descriptor.
     483     15299932 :                         slot.recently_used.store(true, Ordering::Relaxed);
     484     15299932 :                         return Ok(FileGuard { slot_guard });
     485       298020 :                     }
     486              :                 }
     487              : 
     488              :                 // The slot didn't contain our File. We will have to open it ourselves,
     489              :                 // but before that, grab a write lock on handle in the VirtualFile, so
     490              :                 // that no other thread will try to concurrently open the same file.
     491       298020 :                 let handle_guard = self.handle.write().await;
     492              : 
     493              :                 // If another thread changed the handle while we were not holding the lock,
     494              :                 // then the handle might now be valid again. Loop back to retry.
     495       298020 :                 if *handle_guard != handle {
     496        93505 :                     handle = *handle_guard;
     497        93505 :                     continue;
     498       204515 :                 }
     499       204515 :                 break handle_guard;
     500              :             }
     501              :         };
     502              : 
     503              :         // We need to open the file ourselves. The handle in the VirtualFile is
     504              :         // now locked in write-mode. Find a free slot to put it in.
     505       204515 :         let (handle, mut slot_guard) = open_files.find_victim_slot().await;
     506              : 
     507              :         // Re-open the physical file.
     508              :         // NB: we use StorageIoOperation::OpenAferReplace for this to distinguish this
     509              :         // case from StorageIoOperation::Open. This helps with identifying thrashing
     510              :         // of the virtual file descriptor cache.
     511       204515 :         let file = observe_duration!(StorageIoOperation::OpenAfterReplace, {
     512       204515 :             self.open_options.open(self.path.as_std_path()).await?
     513              :         });
     514              : 
     515              :         // Store the File in the slot and update the handle in the VirtualFile
     516              :         // to point to it.
     517       204515 :         slot_guard.file.replace(file);
     518       204515 : 
     519       204515 :         *handle_guard = handle;
     520       204515 : 
     521       204515 :         return Ok(FileGuard {
     522       204515 :             slot_guard: slot_guard.downgrade(),
     523       204515 :         });
     524     15504447 :     }
     525              : 
     526            0 :     pub fn remove(self) {
     527            0 :         let path = self.path.clone();
     528            0 :         drop(self);
     529            0 :         std::fs::remove_file(path).expect("failed to remove the virtual file");
     530            0 :     }
     531              : 
     532        65853 :     pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
     533        65853 :         match pos {
     534        65843 :             SeekFrom::Start(offset) => {
     535        65843 :                 self.pos = offset;
     536        65843 :             }
     537            4 :             SeekFrom::End(offset) => {
     538            4 :                 self.pos = with_file!(self, StorageIoOperation::Seek, |mut file_guard| file_guard
     539            4 :                     .with_std_file_mut(|std_file| std_file.seek(SeekFrom::End(offset))))?
     540              :             }
     541            6 :             SeekFrom::Current(offset) => {
     542            6 :                 let pos = self.pos as i128 + offset as i128;
     543            6 :                 if pos < 0 {
     544            2 :                     return Err(Error::new(
     545            2 :                         ErrorKind::InvalidInput,
     546            2 :                         "offset would be negative",
     547            2 :                     ));
     548            4 :                 }
     549            4 :                 if pos > u64::MAX as i128 {
     550            0 :                     return Err(Error::new(ErrorKind::InvalidInput, "offset overflow"));
     551            4 :                 }
     552            4 :                 self.pos = pos as u64;
     553              :             }
     554              :         }
     555        65849 :         Ok(self.pos)
     556        65853 :     }
     557              : 
     558      6516461 :     pub async fn read_exact_at<B>(&self, buf: B, offset: u64) -> Result<B, Error>
     559      6516461 :     where
     560      6516461 :         B: IoBufMut + Send,
     561      6516461 :     {
     562      6516461 :         let (buf, res) =
     563      6516461 :             read_exact_at_impl(buf, offset, |buf, offset| self.read_at(buf, offset)).await;
     564      6516461 :         res.map(|()| buf)
     565      6516461 :     }
     566              : 
     567              :     /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`].
     568      6296273 :     pub async fn read_exact_at_page(
     569      6296273 :         &self,
     570      6296273 :         page: PageWriteGuard<'static>,
     571      6296273 :         offset: u64,
     572      6296273 :     ) -> Result<PageWriteGuard<'static>, Error> {
     573      6296259 :         let buf = PageWriteGuardBuf {
     574      6296259 :             page,
     575      6296259 :             init_up_to: 0,
     576      6296259 :         };
     577      6296259 :         let res = self.read_exact_at(buf, offset).await;
     578      6296259 :         res.map(|PageWriteGuardBuf { page, .. }| page)
     579      6296259 :             .map_err(|e| Error::new(ErrorKind::Other, e))
     580      6296259 :     }
     581              : 
     582              :     // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235
     583      3717709 :     pub async fn write_all_at(&self, mut buf: &[u8], mut offset: u64) -> Result<(), Error> {
     584      7435417 :         while !buf.is_empty() {
     585      3717709 :             match self.write_at(buf, offset).await {
     586              :                 Ok(0) => {
     587            0 :                     return Err(Error::new(
     588            0 :                         std::io::ErrorKind::WriteZero,
     589            0 :                         "failed to write whole buffer",
     590            0 :                     ));
     591              :                 }
     592      3717708 :                 Ok(n) => {
     593      3717708 :                     buf = &buf[n..];
     594      3717708 :                     offset += n as u64;
     595      3717708 :                 }
     596            0 :                 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
     597            0 :                 Err(e) => return Err(e),
     598              :             }
     599              :         }
     600      3717708 :         Ok(())
     601      3717708 :     }
     602              : 
     603      5208282 :     pub async fn write_all(&mut self, mut buf: &[u8]) -> Result<(), Error> {
     604     10416516 :         while !buf.is_empty() {
     605      5208243 :             match self.write(buf).await {
     606              :                 Ok(0) => {
     607            0 :                     return Err(Error::new(
     608            0 :                         std::io::ErrorKind::WriteZero,
     609            0 :                         "failed to write whole buffer",
     610            0 :                     ));
     611              :                 }
     612      5208241 :                 Ok(n) => {
     613      5208241 :                     buf = &buf[n..];
     614      5208241 :                 }
     615            2 :                 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
     616            2 :                 Err(e) => return Err(e),
     617              :             }
     618              :         }
     619      5208273 :         Ok(())
     620      5208275 :     }
     621              : 
     622      5208250 :     async fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
     623      5208243 :         let pos = self.pos;
     624      5208243 :         let n = self.write_at(buf, pos).await?;
     625      5208241 :         self.pos += n as u64;
     626      5208241 :         Ok(n)
     627      5208243 :     }
     628              : 
     629      6516905 :     pub(crate) async fn read_at<B>(&self, buf: B, offset: u64) -> (B, Result<usize, Error>)
     630      6516905 :     where
     631      6516905 :         B: tokio_epoll_uring::BoundedBufMut + Send,
     632      6516905 :     {
     633      6516905 :         let file_guard = match self.lock_file().await {
     634      6516905 :             Ok(file_guard) => file_guard,
     635            0 :             Err(e) => return (buf, Err(e)),
     636              :         };
     637              : 
     638      6516905 :         observe_duration!(StorageIoOperation::Read, {
     639      6516905 :             let ((_file_guard, buf), res) = io_engine::get().read_at(file_guard, offset, buf).await;
     640      6516905 :             if let Ok(size) = res {
     641      6516903 :                 STORAGE_IO_SIZE
     642      6516903 :                     .with_label_values(&[
     643      6516903 :                         "read",
     644      6516903 :                         &self.tenant_id,
     645      6516903 :                         &self.shard_id,
     646      6516903 :                         &self.timeline_id,
     647      6516903 :                     ])
     648      6516903 :                     .add(size as i64);
     649      6516903 :             }
     650      6516905 :             (buf, res)
     651              :         })
     652      6516905 :     }
     653              : 
     654      8925959 :     async fn write_at(&self, buf: &[u8], offset: u64) -> Result<usize, Error> {
     655      8925952 :         let result = with_file!(self, StorageIoOperation::Write, |file_guard| {
     656      8925952 :             file_guard.with_std_file(|std_file| std_file.write_at(buf, offset))
     657              :         });
     658      8925952 :         if let Ok(size) = result {
     659      8925949 :             STORAGE_IO_SIZE
     660      8925949 :                 .with_label_values(&["write", &self.tenant_id, &self.shard_id, &self.timeline_id])
     661      8925949 :                 .add(size as i64);
     662      8925949 :         }
     663      8925951 :         result
     664      8925951 :     }
     665              : }
     666              : 
     667              : // Adapted from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135
     668      6516469 : pub async fn read_exact_at_impl<B, F, Fut>(
     669      6516469 :     buf: B,
     670      6516469 :     mut offset: u64,
     671      6516469 :     mut read_at: F,
     672      6516469 : ) -> (B, std::io::Result<()>)
     673      6516469 : where
     674      6516469 :     B: IoBufMut + Send,
     675      6516469 :     F: FnMut(tokio_epoll_uring::Slice<B>, u64) -> Fut,
     676      6516469 :     Fut: std::future::Future<Output = (tokio_epoll_uring::Slice<B>, std::io::Result<usize>)>,
     677      6516469 : {
     678      6516469 :     use tokio_epoll_uring::BoundedBuf;
     679      6516469 :     let mut buf: tokio_epoll_uring::Slice<B> = buf.slice_full(); // includes all the uninitialized memory
     680     13032940 :     while buf.bytes_total() != 0 {
     681              :         let res;
     682      6516473 :         (buf, res) = read_at(buf, offset).await;
     683            0 :         match res {
     684            2 :             Ok(0) => break,
     685      6516471 :             Ok(n) => {
     686      6516471 :                 buf = buf.slice(n..);
     687      6516471 :                 offset += n as u64;
     688      6516471 :             }
     689            0 :             Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
     690            0 :             Err(e) => return (buf.into_inner(), Err(e)),
     691              :         }
     692              :     }
     693              :     // NB: don't use `buf.is_empty()` here; it is from the
     694              :     // `impl Deref for Slice { Target = [u8] }`; the the &[u8]
     695              :     // returned by it only covers the initialized portion of `buf`.
     696              :     // Whereas we're interested in ensuring that we filled the entire
     697              :     // buffer that the user passed in.
     698      6516469 :     if buf.bytes_total() != 0 {
     699            2 :         (
     700            2 :             buf.into_inner(),
     701            2 :             Err(std::io::Error::new(
     702            2 :                 std::io::ErrorKind::UnexpectedEof,
     703            2 :                 "failed to fill whole buffer",
     704            2 :             )),
     705            2 :         )
     706              :     } else {
     707      6516467 :         assert_eq!(buf.len(), buf.bytes_total());
     708      6516467 :         (buf.into_inner(), Ok(()))
     709              :     }
     710      6516469 : }
     711              : 
     712              : #[cfg(test)]
     713              : mod test_read_exact_at_impl {
     714              : 
     715              :     use std::{collections::VecDeque, sync::Arc};
     716              : 
     717              :     use tokio_epoll_uring::{BoundedBuf, BoundedBufMut};
     718              : 
     719              :     use super::read_exact_at_impl;
     720              : 
     721              :     struct Expectation {
     722              :         offset: u64,
     723              :         bytes_total: usize,
     724              :         result: std::io::Result<Vec<u8>>,
     725              :     }
     726              :     struct MockReadAt {
     727              :         expectations: VecDeque<Expectation>,
     728              :     }
     729              : 
     730              :     impl MockReadAt {
     731           12 :         async fn read_at(
     732           12 :             &mut self,
     733           12 :             mut buf: tokio_epoll_uring::Slice<Vec<u8>>,
     734           12 :             offset: u64,
     735           12 :         ) -> (tokio_epoll_uring::Slice<Vec<u8>>, std::io::Result<usize>) {
     736           12 :             let exp = self
     737           12 :                 .expectations
     738           12 :                 .pop_front()
     739           12 :                 .expect("read_at called but we have no expectations left");
     740           12 :             assert_eq!(exp.offset, offset);
     741           12 :             assert_eq!(exp.bytes_total, buf.bytes_total());
     742           12 :             match exp.result {
     743           12 :                 Ok(bytes) => {
     744           12 :                     assert!(bytes.len() <= buf.bytes_total());
     745           12 :                     buf.put_slice(&bytes);
     746           12 :                     (buf, Ok(bytes.len()))
     747              :                 }
     748            0 :                 Err(e) => (buf, Err(e)),
     749              :             }
     750           12 :         }
     751              :     }
     752              : 
     753              :     impl Drop for MockReadAt {
     754            8 :         fn drop(&mut self) {
     755            8 :             assert_eq!(self.expectations.len(), 0);
     756            8 :         }
     757              :     }
     758              : 
     759            2 :     #[tokio::test]
     760            2 :     async fn test_basic() {
     761            2 :         let buf = Vec::with_capacity(5);
     762            2 :         let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
     763            2 :             expectations: VecDeque::from(vec![Expectation {
     764            2 :                 offset: 0,
     765            2 :                 bytes_total: 5,
     766            2 :                 result: Ok(vec![b'a', b'b', b'c', b'd', b'e']),
     767            2 :             }]),
     768            2 :         }));
     769            2 :         let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
     770            2 :             let mock_read_at = Arc::clone(&mock_read_at);
     771            2 :             async move { mock_read_at.lock().await.read_at(buf, offset).await }
     772            2 :         })
     773            0 :         .await;
     774            2 :         assert!(res.is_ok());
     775            2 :         assert_eq!(buf, vec![b'a', b'b', b'c', b'd', b'e']);
     776              :     }
     777              : 
     778            2 :     #[tokio::test]
     779            2 :     async fn test_empty_buf_issues_no_syscall() {
     780            2 :         let buf = Vec::new();
     781            2 :         let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
     782            2 :             expectations: VecDeque::new(),
     783            2 :         }));
     784            2 :         let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
     785            0 :             let mock_read_at = Arc::clone(&mock_read_at);
     786            0 :             async move { mock_read_at.lock().await.read_at(buf, offset).await }
     787            2 :         })
     788            0 :         .await;
     789            2 :         assert!(res.is_ok());
     790              :     }
     791              : 
     792            2 :     #[tokio::test]
     793            2 :     async fn test_two_read_at_calls_needed_until_buf_filled() {
     794            2 :         let buf = Vec::with_capacity(4);
     795            2 :         let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
     796            2 :             expectations: VecDeque::from(vec![
     797            2 :                 Expectation {
     798            2 :                     offset: 0,
     799            2 :                     bytes_total: 4,
     800            2 :                     result: Ok(vec![b'a', b'b']),
     801            2 :                 },
     802            2 :                 Expectation {
     803            2 :                     offset: 2,
     804            2 :                     bytes_total: 2,
     805            2 :                     result: Ok(vec![b'c', b'd']),
     806            2 :                 },
     807            2 :             ]),
     808            2 :         }));
     809            4 :         let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
     810            4 :             let mock_read_at = Arc::clone(&mock_read_at);
     811            4 :             async move { mock_read_at.lock().await.read_at(buf, offset).await }
     812            4 :         })
     813            0 :         .await;
     814            2 :         assert!(res.is_ok());
     815            2 :         assert_eq!(buf, vec![b'a', b'b', b'c', b'd']);
     816              :     }
     817              : 
     818            2 :     #[tokio::test]
     819            2 :     async fn test_eof_before_buffer_full() {
     820            2 :         let buf = Vec::with_capacity(3);
     821            2 :         let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
     822            2 :             expectations: VecDeque::from(vec![
     823            2 :                 Expectation {
     824            2 :                     offset: 0,
     825            2 :                     bytes_total: 3,
     826            2 :                     result: Ok(vec![b'a']),
     827            2 :                 },
     828            2 :                 Expectation {
     829            2 :                     offset: 1,
     830            2 :                     bytes_total: 2,
     831            2 :                     result: Ok(vec![b'b']),
     832            2 :                 },
     833            2 :                 Expectation {
     834            2 :                     offset: 2,
     835            2 :                     bytes_total: 1,
     836            2 :                     result: Ok(vec![]),
     837            2 :                 },
     838            2 :             ]),
     839            2 :         }));
     840            6 :         let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
     841            6 :             let mock_read_at = Arc::clone(&mock_read_at);
     842            6 :             async move { mock_read_at.lock().await.read_at(buf, offset).await }
     843            6 :         })
     844            0 :         .await;
     845            2 :         let Err(err) = res else {
     846            0 :             panic!("should return an error");
     847              :         };
     848            2 :         assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof);
     849            2 :         assert_eq!(format!("{err}"), "failed to fill whole buffer");
     850              :         // buffer contents on error are unspecified
     851              :     }
     852              : }
     853              : 
     854              : struct FileGuard {
     855              :     slot_guard: RwLockReadGuard<'static, SlotInner>,
     856              : }
     857              : 
     858              : impl AsRef<OwnedFd> for FileGuard {
     859     15504468 :     fn as_ref(&self) -> &OwnedFd {
     860     15504468 :         // This unwrap is safe because we only create `FileGuard`s
     861     15504468 :         // if we know that the file is Some.
     862     15504468 :         self.slot_guard.file.as_ref().unwrap()
     863     15504468 :     }
     864              : }
     865              : 
     866              : impl FileGuard {
     867              :     /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
     868     15359618 :     fn with_std_file<F, R>(&self, with: F) -> R
     869     15359618 :     where
     870     15359618 :         F: FnOnce(&File) -> R,
     871     15359618 :     {
     872     15359618 :         // SAFETY:
     873     15359618 :         // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
     874     15359618 :         // - `&` usage below: `self` is `&`, hence Rust typesystem guarantees there are is no `&mut`
     875     15359618 :         let file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
     876     15359618 :         let res = with(&file);
     877     15359618 :         let _ = file.into_raw_fd();
     878     15359618 :         res
     879     15359618 :     }
     880              :     /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
     881            4 :     fn with_std_file_mut<F, R>(&mut self, with: F) -> R
     882            4 :     where
     883            4 :         F: FnOnce(&mut File) -> R,
     884            4 :     {
     885            4 :         // SAFETY:
     886            4 :         // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
     887            4 :         // - &mut usage below: `self` is `&mut`, hence this call is the only task/thread that has control over the underlying fd
     888            4 :         let mut file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
     889            4 :         let res = with(&mut file);
     890            4 :         let _ = file.into_raw_fd();
     891            4 :         res
     892            4 :     }
     893              : }
     894              : 
     895              : impl tokio_epoll_uring::IoFd for FileGuard {
     896       144825 :     unsafe fn as_fd(&self) -> RawFd {
     897       144825 :         let owned_fd: &OwnedFd = self.as_ref();
     898       144825 :         owned_fd.as_raw_fd()
     899       144825 :     }
     900              : }
     901              : 
     902              : #[cfg(test)]
     903              : impl VirtualFile {
     904        20200 :     pub(crate) async fn read_blk(
     905        20200 :         &self,
     906        20200 :         blknum: u32,
     907        20200 :     ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
     908        20200 :         use crate::page_cache::PAGE_SZ;
     909        20200 :         let buf = vec![0; PAGE_SZ];
     910        20200 :         let buf = self
     911        20200 :             .read_exact_at(buf, blknum as u64 * (PAGE_SZ as u64))
     912        10256 :             .await?;
     913        20200 :         Ok(crate::tenant::block_io::BlockLease::Vec(buf))
     914        20200 :     }
     915              : 
     916          224 :     async fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<(), Error> {
     917          224 :         let mut tmp = vec![0; 128];
     918              :         loop {
     919              :             let res;
     920          444 :             (tmp, res) = self.read_at(tmp, self.pos).await;
     921            2 :             match res {
     922          222 :                 Ok(0) => return Ok(()),
     923          220 :                 Ok(n) => {
     924          220 :                     self.pos += n as u64;
     925          220 :                     buf.extend_from_slice(&tmp[..n]);
     926          220 :                 }
     927            2 :                 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
     928            2 :                 Err(e) => return Err(e),
     929              :             }
     930              :         }
     931          224 :     }
     932              : }
     933              : 
     934              : impl Drop for VirtualFile {
     935              :     /// If a VirtualFile is dropped, close the underlying file if it was open.
     936        66379 :     fn drop(&mut self) {
     937        66379 :         let handle = self.handle.get_mut();
     938        66379 : 
     939        66379 :         fn clean_slot(slot: &Slot, mut slot_guard: RwLockWriteGuard<'_, SlotInner>, tag: u64) {
     940        66379 :             if slot_guard.tag == tag {
     941        66379 :                 slot.recently_used.store(false, Ordering::Relaxed);
     942        66379 :                 // there is also operation "close-by-replace" for closes done on eviction for
     943        66379 :                 // comparison.
     944        66379 :                 if let Some(fd) = slot_guard.file.take() {
     945        55667 :                     STORAGE_IO_TIME_METRIC
     946        55667 :                         .get(StorageIoOperation::Close)
     947        55667 :                         .observe_closure_duration(|| drop(fd));
     948        55667 :                 }
     949        66379 :             }
     950        66379 :         }
     951        66379 : 
     952        66379 :         // We don't have async drop so we cannot directly await the lock here.
     953        66379 :         // Instead, first do a best-effort attempt at closing the underlying
     954        66379 :         // file descriptor by using `try_write`, and if that fails, spawn
     955        66379 :         // a tokio task to do it asynchronously: we just want it to be
     956        66379 :         // cleaned up eventually.
     957        66379 :         // Most of the time, the `try_lock` should succeed though,
     958        66379 :         // as we have `&mut self` access. In other words, if the slot
     959        66379 :         // is still occupied by our file, there should be no access from
     960        66379 :         // other I/O operations; the only other possible place to lock
     961        66379 :         // the slot is the lock algorithm looking for free slots.
     962        66379 :         let slot = &get_open_files().slots[handle.index];
     963        66379 :         if let Ok(slot_guard) = slot.inner.try_write() {
     964        66374 :             clean_slot(slot, slot_guard, handle.tag);
     965        66374 :         } else {
     966            5 :             let tag = handle.tag;
     967            5 :             tokio::spawn(async move {
     968            5 :                 let slot_guard = slot.inner.write().await;
     969            5 :                 clean_slot(slot, slot_guard, tag);
     970            5 :             });
     971            5 :         };
     972        66379 :     }
     973              : }
     974              : 
     975              : impl OpenFiles {
     976          706 :     fn new(num_slots: usize) -> OpenFiles {
     977          706 :         let mut slots = Box::new(Vec::with_capacity(num_slots));
     978        61420 :         for _ in 0..num_slots {
     979        61420 :             let slot = Slot {
     980        61420 :                 recently_used: AtomicBool::new(false),
     981        61420 :                 inner: RwLock::new(SlotInner { tag: 0, file: None }),
     982        61420 :             };
     983        61420 :             slots.push(slot);
     984        61420 :         }
     985              : 
     986          706 :         OpenFiles {
     987          706 :             next: AtomicUsize::new(0),
     988          706 :             slots: Box::leak(slots),
     989          706 :         }
     990          706 :     }
     991              : }
     992              : 
     993              : ///
     994              : /// Initialize the virtual file module. This must be called once at page
     995              : /// server startup.
     996              : ///
     997              : #[cfg(not(test))]
     998          604 : pub fn init(num_slots: usize, engine: IoEngineKind) {
     999          604 :     if OPEN_FILES.set(OpenFiles::new(num_slots)).is_err() {
    1000            0 :         panic!("virtual_file::init called twice");
    1001          604 :     }
    1002          604 :     io_engine::init(engine);
    1003          604 :     crate::metrics::virtual_file_descriptor_cache::SIZE_MAX.set(num_slots as u64);
    1004          604 : }
    1005              : 
    1006              : const TEST_MAX_FILE_DESCRIPTORS: usize = 10;
    1007              : 
    1008              : // Get a handle to the global slots array.
    1009     15651074 : fn get_open_files() -> &'static OpenFiles {
    1010     15651074 :     //
    1011     15651074 :     // In unit tests, page server startup doesn't happen and no one calls
    1012     15651074 :     // virtual_file::init(). Initialize it here, with a small array.
    1013     15651074 :     //
    1014     15651074 :     // This applies to the virtual file tests below, but all other unit
    1015     15651074 :     // tests too, so the virtual file facility is always usable in
    1016     15651074 :     // unit tests.
    1017     15651074 :     //
    1018     15651074 :     if cfg!(test) {
    1019       370653 :         OPEN_FILES.get_or_init(|| OpenFiles::new(TEST_MAX_FILE_DESCRIPTORS))
    1020              :     } else {
    1021     15280421 :         OPEN_FILES.get().expect("virtual_file::init not called yet")
    1022              :     }
    1023     15651074 : }
    1024              : 
    1025              : #[cfg(test)]
    1026              : mod tests {
    1027              :     use super::*;
    1028              :     use rand::seq::SliceRandom;
    1029              :     use rand::thread_rng;
    1030              :     use rand::Rng;
    1031              :     use std::future::Future;
    1032              :     use std::io::Write;
    1033              :     use std::sync::Arc;
    1034              : 
    1035              :     enum MaybeVirtualFile {
    1036              :         VirtualFile(VirtualFile),
    1037              :         File(File),
    1038              :     }
    1039              : 
    1040              :     impl From<VirtualFile> for MaybeVirtualFile {
    1041            6 :         fn from(vf: VirtualFile) -> Self {
    1042            6 :             MaybeVirtualFile::VirtualFile(vf)
    1043            6 :         }
    1044              :     }
    1045              : 
    1046              :     impl MaybeVirtualFile {
    1047          404 :         async fn read_exact_at(&self, mut buf: Vec<u8>, offset: u64) -> Result<Vec<u8>, Error> {
    1048          404 :             match self {
    1049          203 :                 MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(buf, offset).await,
    1050          202 :                 MaybeVirtualFile::File(file) => file.read_exact_at(&mut buf, offset).map(|()| buf),
    1051              :             }
    1052          404 :         }
    1053            8 :         async fn write_all_at(&self, buf: &[u8], offset: u64) -> Result<(), Error> {
    1054            8 :             match self {
    1055            4 :                 MaybeVirtualFile::VirtualFile(file) => file.write_all_at(buf, offset).await,
    1056            4 :                 MaybeVirtualFile::File(file) => file.write_all_at(buf, offset),
    1057              :             }
    1058            8 :         }
    1059           36 :         async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
    1060           36 :             match self {
    1061           18 :                 MaybeVirtualFile::VirtualFile(file) => file.seek(pos).await,
    1062           18 :                 MaybeVirtualFile::File(file) => file.seek(pos),
    1063              :             }
    1064           36 :         }
    1065            8 :         async fn write_all(&mut self, buf: &[u8]) -> Result<(), Error> {
    1066            8 :             match self {
    1067            4 :                 MaybeVirtualFile::VirtualFile(file) => file.write_all(buf).await,
    1068            4 :                 MaybeVirtualFile::File(file) => file.write_all(buf),
    1069              :             }
    1070            8 :         }
    1071              : 
    1072              :         // Helper function to slurp contents of a file, starting at the current position,
    1073              :         // into a string
    1074          442 :         async fn read_string(&mut self) -> Result<String, Error> {
    1075          442 :             use std::io::Read;
    1076          442 :             let mut buf = String::new();
    1077          442 :             match self {
    1078          224 :                 MaybeVirtualFile::VirtualFile(file) => {
    1079          224 :                     let mut buf = Vec::new();
    1080          226 :                     file.read_to_end(&mut buf).await?;
    1081          222 :                     return Ok(String::from_utf8(buf).unwrap());
    1082              :                 }
    1083          218 :                 MaybeVirtualFile::File(file) => {
    1084          218 :                     file.read_to_string(&mut buf)?;
    1085              :                 }
    1086              :             }
    1087          216 :             Ok(buf)
    1088          442 :         }
    1089              : 
    1090              :         // Helper function to slurp a portion of a file into a string
    1091          404 :         async fn read_string_at(&mut self, pos: u64, len: usize) -> Result<String, Error> {
    1092          404 :             let buf = vec![0; len];
    1093          404 :             let buf = self.read_exact_at(buf, pos).await?;
    1094          404 :             Ok(String::from_utf8(buf).unwrap())
    1095          404 :         }
    1096              :     }
    1097              : 
    1098            2 :     #[tokio::test]
    1099            2 :     async fn test_virtual_files() -> anyhow::Result<()> {
    1100            2 :         // The real work is done in the test_files() helper function. This
    1101            2 :         // allows us to run the same set of tests against a native File, and
    1102            2 :         // VirtualFile. We trust the native Files and wouldn't need to test them,
    1103            2 :         // but this allows us to verify that the operations return the same
    1104            2 :         // results with VirtualFiles as with native Files. (Except that with
    1105            2 :         // native files, you will run out of file descriptors if the ulimit
    1106            2 :         // is low enough.)
    1107          206 :         test_files("virtual_files", |path, open_options| async move {
    1108          206 :             let vf = VirtualFile::open_with_options(&path, &open_options).await?;
    1109          206 :             Ok(MaybeVirtualFile::VirtualFile(vf))
    1110          206 :         })
    1111          527 :         .await
    1112              :     }
    1113              : 
    1114            2 :     #[tokio::test]
    1115            2 :     async fn test_physical_files() -> anyhow::Result<()> {
    1116          206 :         test_files("physical_files", |path, open_options| async move {
    1117          206 :             Ok(MaybeVirtualFile::File({
    1118          206 :                 let owned_fd = open_options.open(path.as_std_path()).await?;
    1119          206 :                 File::from(owned_fd)
    1120              :             }))
    1121          206 :         })
    1122          104 :         .await
    1123              :     }
    1124              : 
    1125            4 :     async fn test_files<OF, FT>(testname: &str, openfunc: OF) -> anyhow::Result<()>
    1126            4 :     where
    1127            4 :         OF: Fn(Utf8PathBuf, OpenOptions) -> FT,
    1128            4 :         FT: Future<Output = Result<MaybeVirtualFile, std::io::Error>>,
    1129            4 :     {
    1130            4 :         let testdir = crate::config::PageServerConf::test_repo_dir(testname);
    1131            4 :         std::fs::create_dir_all(&testdir)?;
    1132              : 
    1133            4 :         let path_a = testdir.join("file_a");
    1134            4 :         let mut file_a = openfunc(
    1135            4 :             path_a.clone(),
    1136            4 :             OpenOptions::new()
    1137            4 :                 .write(true)
    1138            4 :                 .create(true)
    1139            4 :                 .truncate(true)
    1140            4 :                 .to_owned(),
    1141            4 :         )
    1142            4 :         .await?;
    1143            4 :         file_a.write_all(b"foobar").await?;
    1144              : 
    1145              :         // cannot read from a file opened in write-only mode
    1146            4 :         let _ = file_a.read_string().await.unwrap_err();
    1147              : 
    1148              :         // Close the file and re-open for reading
    1149            4 :         let mut file_a = openfunc(path_a, OpenOptions::new().read(true).to_owned()).await?;
    1150              : 
    1151              :         // cannot write to a file opened in read-only mode
    1152            4 :         let _ = file_a.write_all(b"bar").await.unwrap_err();
    1153            4 : 
    1154            4 :         // Try simple read
    1155            4 :         assert_eq!("foobar", file_a.read_string().await?);
    1156              : 
    1157              :         // It's positioned at the EOF now.
    1158            4 :         assert_eq!("", file_a.read_string().await?);
    1159              : 
    1160              :         // Test seeks.
    1161            4 :         assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
    1162            4 :         assert_eq!("oobar", file_a.read_string().await?);
    1163              : 
    1164            4 :         assert_eq!(file_a.seek(SeekFrom::End(-2)).await?, 4);
    1165            4 :         assert_eq!("ar", file_a.read_string().await?);
    1166              : 
    1167            4 :         assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
    1168            4 :         assert_eq!(file_a.seek(SeekFrom::Current(2)).await?, 3);
    1169            4 :         assert_eq!("bar", file_a.read_string().await?);
    1170              : 
    1171            4 :         assert_eq!(file_a.seek(SeekFrom::Current(-5)).await?, 1);
    1172            4 :         assert_eq!("oobar", file_a.read_string().await?);
    1173              : 
    1174              :         // Test erroneous seeks to before byte 0
    1175            4 :         file_a.seek(SeekFrom::End(-7)).await.unwrap_err();
    1176            4 :         assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
    1177            4 :         file_a.seek(SeekFrom::Current(-2)).await.unwrap_err();
    1178            4 : 
    1179            4 :         // the erroneous seek should have left the position unchanged
    1180            4 :         assert_eq!("oobar", file_a.read_string().await?);
    1181              : 
    1182              :         // Create another test file, and try FileExt functions on it.
    1183            4 :         let path_b = testdir.join("file_b");
    1184            4 :         let mut file_b = openfunc(
    1185            4 :             path_b.clone(),
    1186            4 :             OpenOptions::new()
    1187            4 :                 .read(true)
    1188            4 :                 .write(true)
    1189            4 :                 .create(true)
    1190            4 :                 .truncate(true)
    1191            4 :                 .to_owned(),
    1192            4 :         )
    1193            2 :         .await?;
    1194            4 :         file_b.write_all_at(b"BAR", 3).await?;
    1195            4 :         file_b.write_all_at(b"FOO", 0).await?;
    1196              : 
    1197            4 :         assert_eq!(file_b.read_string_at(2, 3).await?, "OBA");
    1198              : 
    1199              :         // Open a lot of files, enough to cause some evictions. (Or to be precise,
    1200              :         // open the same file many times. The effect is the same.)
    1201              :         //
    1202              :         // leave file_a positioned at offset 1 before we start
    1203            4 :         assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
    1204              : 
    1205            4 :         let mut vfiles = Vec::new();
    1206          404 :         for _ in 0..100 {
    1207          400 :             let mut vfile =
    1208          400 :                 openfunc(path_b.clone(), OpenOptions::new().read(true).to_owned()).await?;
    1209          400 :             assert_eq!("FOOBAR", vfile.read_string().await?);
    1210          400 :             vfiles.push(vfile);
    1211              :         }
    1212              : 
    1213              :         // make sure we opened enough files to definitely cause evictions.
    1214            4 :         assert!(vfiles.len() > TEST_MAX_FILE_DESCRIPTORS * 2);
    1215              : 
    1216              :         // The underlying file descriptor for 'file_a' should be closed now. Try to read
    1217              :         // from it again. We left the file positioned at offset 1 above.
    1218            4 :         assert_eq!("oobar", file_a.read_string().await?);
    1219              : 
    1220              :         // Check that all the other FDs still work too. Use them in random order for
    1221              :         // good measure.
    1222            4 :         vfiles.as_mut_slice().shuffle(&mut thread_rng());
    1223          400 :         for vfile in vfiles.iter_mut() {
    1224          400 :             assert_eq!("OOBAR", vfile.read_string_at(1, 5).await?);
    1225              :         }
    1226              : 
    1227            4 :         Ok(())
    1228            4 :     }
    1229              : 
    1230              :     /// Test using VirtualFiles from many threads concurrently. This tests both using
    1231              :     /// a lot of VirtualFiles concurrently, causing evictions, and also using the same
    1232              :     /// VirtualFile from multiple threads concurrently.
    1233            2 :     #[tokio::test]
    1234            2 :     async fn test_vfile_concurrency() -> Result<(), Error> {
    1235            2 :         const SIZE: usize = 8 * 1024;
    1236            2 :         const VIRTUAL_FILES: usize = 100;
    1237            2 :         const THREADS: usize = 100;
    1238            2 :         const SAMPLE: [u8; SIZE] = [0xADu8; SIZE];
    1239            2 : 
    1240            2 :         let testdir = crate::config::PageServerConf::test_repo_dir("vfile_concurrency");
    1241            2 :         std::fs::create_dir_all(&testdir)?;
    1242              : 
    1243              :         // Create a test file.
    1244            2 :         let test_file_path = testdir.join("concurrency_test_file");
    1245              :         {
    1246            2 :             let file = File::create(&test_file_path)?;
    1247            2 :             file.write_all_at(&SAMPLE, 0)?;
    1248              :         }
    1249              : 
    1250              :         // Open the file many times.
    1251            2 :         let mut files = Vec::new();
    1252          202 :         for _ in 0..VIRTUAL_FILES {
    1253          200 :             let f = VirtualFile::open_with_options(&test_file_path, OpenOptions::new().read(true))
    1254          101 :                 .await?;
    1255          200 :             files.push(f);
    1256              :         }
    1257            2 :         let files = Arc::new(files);
    1258            2 : 
    1259            2 :         // Launch many threads, and use the virtual files concurrently in random order.
    1260            2 :         let rt = tokio::runtime::Builder::new_multi_thread()
    1261            2 :             .worker_threads(THREADS)
    1262            2 :             .thread_name("test_vfile_concurrency thread")
    1263            2 :             .build()
    1264            2 :             .unwrap();
    1265            2 :         let mut hdls = Vec::new();
    1266          202 :         for _threadno in 0..THREADS {
    1267          200 :             let files = files.clone();
    1268          200 :             let hdl = rt.spawn(async move {
    1269          200 :                 let mut buf = vec![0u8; SIZE];
    1270          200 :                 let mut rng = rand::rngs::OsRng;
    1271       200000 :                 for _ in 1..1000 {
    1272       199800 :                     let f = &files[rng.gen_range(0..files.len())];
    1273       569784 :                     buf = f.read_exact_at(buf, 0).await.unwrap();
    1274       199800 :                     assert!(buf == SAMPLE);
    1275              :                 }
    1276          200 :             });
    1277          200 :             hdls.push(hdl);
    1278          200 :         }
    1279          202 :         for hdl in hdls {
    1280          200 :             hdl.await?;
    1281              :         }
    1282            2 :         std::mem::forget(rt);
    1283            2 : 
    1284            2 :         Ok(())
    1285              :     }
    1286              : 
    1287            2 :     #[tokio::test]
    1288            2 :     async fn test_atomic_overwrite_basic() {
    1289            2 :         let testdir = crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_basic");
    1290            2 :         std::fs::create_dir_all(&testdir).unwrap();
    1291            2 : 
    1292            2 :         let path = testdir.join("myfile");
    1293            2 :         let tmp_path = testdir.join("myfile.tmp");
    1294            2 : 
    1295            2 :         VirtualFile::crashsafe_overwrite(&path, &tmp_path, b"foo")
    1296            3 :             .await
    1297            2 :             .unwrap();
    1298            2 :         let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
    1299            2 :         let post = file.read_string().await.unwrap();
    1300            2 :         assert_eq!(post, "foo");
    1301            2 :         assert!(!tmp_path.exists());
    1302            2 :         drop(file);
    1303            2 : 
    1304            2 :         VirtualFile::crashsafe_overwrite(&path, &tmp_path, b"bar")
    1305            2 :             .await
    1306            2 :             .unwrap();
    1307            2 :         let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
    1308            2 :         let post = file.read_string().await.unwrap();
    1309            2 :         assert_eq!(post, "bar");
    1310            2 :         assert!(!tmp_path.exists());
    1311            2 :         drop(file);
    1312              :     }
    1313              : 
    1314            2 :     #[tokio::test]
    1315            2 :     async fn test_atomic_overwrite_preexisting_tmp() {
    1316            2 :         let testdir =
    1317            2 :             crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_preexisting_tmp");
    1318            2 :         std::fs::create_dir_all(&testdir).unwrap();
    1319            2 : 
    1320            2 :         let path = testdir.join("myfile");
    1321            2 :         let tmp_path = testdir.join("myfile.tmp");
    1322            2 : 
    1323            2 :         std::fs::write(&tmp_path, "some preexisting junk that should be removed").unwrap();
    1324            2 :         assert!(tmp_path.exists());
    1325              : 
    1326            2 :         VirtualFile::crashsafe_overwrite(&path, &tmp_path, b"foo")
    1327            3 :             .await
    1328            2 :             .unwrap();
    1329              : 
    1330            2 :         let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
    1331            2 :         let post = file.read_string().await.unwrap();
    1332            2 :         assert_eq!(post, "foo");
    1333            2 :         assert!(!tmp_path.exists());
    1334            2 :         drop(file);
    1335              :     }
    1336              : }
        

Generated by: LCOV version 2.1-beta