LCOV - code coverage report
Current view: top level - pageserver/src - virtual_file.rs (source / functions) Coverage Total Hit
Test: aca8877be6ceba750c1be359ed71bc1799d52b30.info Lines: 93.3 % 853 796
Test Date: 2024-02-14 18:05:35 Functions: 90.4 % 167 151

            Line data    Source code
       1              : //!
       2              : //! VirtualFile is like a normal File, but it's not bound directly to
       3              : //! a file descriptor. Instead, the file is opened when it's read from,
       4              : //! and if too many files are open globally in the system, least-recently
       5              : //! used ones are closed.
       6              : //!
       7              : //! To track which files have been recently used, we use the clock algorithm
       8              : //! with a 'recently_used' flag on each slot.
       9              : //!
      10              : //! This is similar to PostgreSQL's virtual file descriptor facility in
      11              : //! src/backend/storage/file/fd.c
      12              : //!
      13              : use crate::metrics::{StorageIoOperation, STORAGE_IO_SIZE, STORAGE_IO_TIME_METRIC};
      14              : 
      15              : use crate::page_cache::PageWriteGuard;
      16              : use crate::tenant::TENANTS_SEGMENT_NAME;
      17              : use camino::{Utf8Path, Utf8PathBuf};
      18              : use once_cell::sync::OnceCell;
      19              : use pageserver_api::shard::TenantShardId;
      20              : use std::fs::{self, File};
      21              : use std::io::{Error, ErrorKind, Seek, SeekFrom};
      22              : use tokio_epoll_uring::{BoundedBuf, IoBuf, IoBufMut, Slice};
      23              : 
      24              : use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
      25              : use std::os::unix::fs::FileExt;
      26              : use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
      27              : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
      28              : use tokio::time::Instant;
      29              : 
      30              : pub use pageserver_api::models::virtual_file as api;
      31              : pub(crate) mod io_engine;
      32              : mod open_options;
      33              : pub(crate) use io_engine::IoEngineKind;
      34              : pub(crate) use open_options::*;
      35              : 
      36              : ///
      37              : /// A virtual file descriptor. You can use this just like std::fs::File, but internally
      38              : /// the underlying file is closed if the system is low on file descriptors,
      39              : /// and re-opened when it's accessed again.
      40              : ///
      41              : /// Like with std::fs::File, multiple threads can read/write the file concurrently,
      42              : /// holding just a shared reference the same VirtualFile, using the read_at() / write_at()
      43              : /// functions from the FileExt trait. But the functions from the Read/Write/Seek traits
      44              : /// require a mutable reference, because they modify the "current position".
      45              : ///
      46              : /// Each VirtualFile has a physical file descriptor in the global OPEN_FILES array, at the
      47              : /// slot that 'handle points to, if the underlying file is currently open. If it's not
      48              : /// currently open, the 'handle' can still point to the slot where it was last kept. The
      49              : /// 'tag' field is used to detect whether the handle still is valid or not.
      50              : ///
      51            0 : #[derive(Debug)]
      52              : pub struct VirtualFile {
      53              :     /// Lazy handle to the global file descriptor cache. The slot that this points to
      54              :     /// might contain our File, or it may be empty, or it may contain a File that
      55              :     /// belongs to a different VirtualFile.
      56              :     handle: RwLock<SlotHandle>,
      57              : 
      58              :     /// Current file position
      59              :     pos: u64,
      60              : 
      61              :     /// File path and options to use to open it.
      62              :     ///
      63              :     /// Note: this only contains the options needed to re-open it. For example,
      64              :     /// if a new file is created, we only pass the create flag when it's initially
      65              :     /// opened, in the VirtualFile::create() function, and strip the flag before
      66              :     /// storing it here.
      67              :     pub path: Utf8PathBuf,
      68              :     open_options: OpenOptions,
      69              : 
      70              :     // These are strings becase we only use them for metrics, and those expect strings.
      71              :     // It makes no sense for us to constantly turn the `TimelineId` and `TenantId` into
      72              :     // strings.
      73              :     tenant_id: String,
      74              :     shard_id: String,
      75              :     timeline_id: String,
      76              : }
      77              : 
      78       304215 : #[derive(Debug, PartialEq, Clone, Copy)]
      79              : struct SlotHandle {
      80              :     /// Index into OPEN_FILES.slots
      81              :     index: usize,
      82              : 
      83              :     /// Value of 'tag' in the slot. If slot's tag doesn't match, then the slot has
      84              :     /// been recycled and no longer contains the FD for this virtual file.
      85              :     tag: u64,
      86              : }
      87              : 
      88              : /// OPEN_FILES is the global array that holds the physical file descriptors that
      89              : /// are currently open. Each slot in the array is protected by a separate lock,
      90              : /// so that different files can be accessed independently. The lock must be held
      91              : /// in write mode to replace the slot with a different file, but a read mode
      92              : /// is enough to operate on the file, whether you're reading or writing to it.
      93              : ///
      94              : /// OPEN_FILES starts in uninitialized state, and it's initialized by
      95              : /// the virtual_file::init() function. It must be called exactly once at page
      96              : /// server startup.
      97              : static OPEN_FILES: OnceCell<OpenFiles> = OnceCell::new();
      98              : 
      99              : struct OpenFiles {
     100              :     slots: &'static [Slot],
     101              : 
     102              :     /// clock arm for the clock algorithm
     103              :     next: AtomicUsize,
     104              : }
     105              : 
     106              : struct Slot {
     107              :     inner: RwLock<SlotInner>,
     108              : 
     109              :     /// has this file been used since last clock sweep?
     110              :     recently_used: AtomicBool,
     111              : }
     112              : 
     113              : struct SlotInner {
     114              :     /// Counter that's incremented every time a different file is stored here.
     115              :     /// To avoid the ABA problem.
     116              :     tag: u64,
     117              : 
     118              :     /// the underlying file
     119              :     file: Option<OwnedFd>,
     120              : }
     121              : 
     122              : /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`].
     123              : struct PageWriteGuardBuf {
     124              :     page: PageWriteGuard<'static>,
     125              :     init_up_to: usize,
     126              : }
     127              : // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot,
     128              : // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved.
     129              : unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf {
     130      4925336 :     fn stable_ptr(&self) -> *const u8 {
     131      4925336 :         self.page.as_ptr()
     132      4925336 :     }
     133     14776009 :     fn bytes_init(&self) -> usize {
     134     14776009 :         self.init_up_to
     135     14776009 :     }
     136      4925338 :     fn bytes_total(&self) -> usize {
     137      4925338 :         self.page.len()
     138      4925338 :     }
     139              : }
     140              : // Safety: see above, plus: the ownership of [`PageWriteGuard`] means exclusive access,
     141              : // hence it's safe to hand out the `stable_mut_ptr()`.
     142              : unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf {
     143      4925337 :     fn stable_mut_ptr(&mut self) -> *mut u8 {
     144      4925337 :         self.page.as_mut_ptr()
     145      4925337 :     }
     146              : 
     147      4925337 :     unsafe fn set_init(&mut self, pos: usize) {
     148      4925337 :         assert!(pos <= self.page.len());
     149      4925337 :         self.init_up_to = pos;
     150      4925337 :     }
     151              : }
     152              : 
     153              : impl OpenFiles {
     154              :     /// Find a slot to use, evicting an existing file descriptor if needed.
     155              :     ///
     156              :     /// On return, we hold a lock on the slot, and its 'tag' has been updated
     157              :     /// recently_used has been set. It's all ready for reuse.
     158       259472 :     async fn find_victim_slot(&self) -> (SlotHandle, RwLockWriteGuard<SlotInner>) {
     159       259472 :         //
     160       259472 :         // Run the clock algorithm to find a slot to replace.
     161       259472 :         //
     162       259472 :         let num_slots = self.slots.len();
     163       259472 :         let mut retries = 0;
     164              :         let mut slot;
     165              :         let mut slot_guard;
     166              :         let index;
     167      2787480 :         loop {
     168      2787480 :             let next = self.next.fetch_add(1, Ordering::AcqRel) % num_slots;
     169      2787480 :             slot = &self.slots[next];
     170      2787480 : 
     171      2787480 :             // If the recently_used flag on this slot is set, continue the clock
     172      2787480 :             // sweep. Otherwise try to use this slot. If we cannot acquire the
     173      2787480 :             // lock, also continue the clock sweep.
     174      2787480 :             //
     175      2787480 :             // We only continue in this manner for a while, though. If we loop
     176      2787480 :             // through the array twice without finding a victim, just pick the
     177      2787480 :             // next slot and wait until we can reuse it. This way, we avoid
     178      2787480 :             // spinning in the extreme case that all the slots are busy with an
     179      2787480 :             // I/O operation.
     180      2787480 :             if retries < num_slots * 2 {
     181      2682253 :                 if !slot.recently_used.swap(false, Ordering::Release) {
     182      2435337 :                     if let Ok(guard) = slot.inner.try_write() {
     183       154245 :                         slot_guard = guard;
     184       154245 :                         index = next;
     185       154245 :                         break;
     186      2281092 :                     }
     187       246916 :                 }
     188      2528008 :                 retries += 1;
     189              :             } else {
     190       105227 :                 slot_guard = slot.inner.write().await;
     191       105227 :                 index = next;
     192       105227 :                 break;
     193              :             }
     194              :         }
     195              : 
     196              :         //
     197              :         // We now have the victim slot locked. If it was in use previously, close the
     198              :         // old file.
     199              :         //
     200       259472 :         if let Some(old_file) = slot_guard.file.take() {
     201       216342 :             // the normal path of dropping VirtualFile uses "close", use "close-by-replace" here to
     202       216342 :             // distinguish the two.
     203       216342 :             STORAGE_IO_TIME_METRIC
     204       216342 :                 .get(StorageIoOperation::CloseByReplace)
     205       216342 :                 .observe_closure_duration(|| drop(old_file));
     206       216342 :         }
     207              : 
     208              :         // Prepare the slot for reuse and return it
     209       259472 :         slot_guard.tag += 1;
     210       259472 :         slot.recently_used.store(true, Ordering::Relaxed);
     211       259472 :         (
     212       259472 :             SlotHandle {
     213       259472 :                 index,
     214       259472 :                 tag: slot_guard.tag,
     215       259472 :             },
     216       259472 :             slot_guard,
     217       259472 :         )
     218       259472 :     }
     219              : }
     220              : 
     221              : /// Identify error types that should alwways terminate the process.  Other
     222              : /// error types may be elegible for retry.
     223            0 : pub(crate) fn is_fatal_io_error(e: &std::io::Error) -> bool {
     224            0 :     use nix::errno::Errno::*;
     225            0 :     match e.raw_os_error().map(nix::errno::from_i32) {
     226              :         Some(EIO) => {
     227              :             // Terminate on EIO because we no longer trust the device to store
     228              :             // data safely, or to uphold persistence guarantees on fsync.
     229            0 :             true
     230              :         }
     231              :         Some(EROFS) => {
     232              :             // Terminate on EROFS because a filesystem is usually remounted
     233              :             // readonly when it has experienced some critical issue, so the same
     234              :             // logic as EIO applies.
     235            0 :             true
     236              :         }
     237              :         Some(EACCES) => {
     238              :             // Terminate on EACCESS because we should always have permissions
     239              :             // for our own data dir: if we don't, then we can't do our job and
     240              :             // need administrative intervention to fix permissions.  Terminating
     241              :             // is the best way to make sure we stop cleanly rather than going
     242              :             // into infinite retry loops, and will make it clear to the outside
     243              :             // world that we need help.
     244            0 :             true
     245              :         }
     246              :         _ => {
     247              :             // Treat all other local file I/O errors are retryable.  This includes:
     248              :             // - ENOSPC: we stay up and wait for eviction to free some space
     249              :             // - EINVAL, EBADF, EBADFD: this is a code bug, not a filesystem/hardware issue
     250              :             // - WriteZero, Interrupted: these are used internally VirtualFile
     251            0 :             false
     252              :         }
     253              :     }
     254            0 : }
     255              : 
     256              : /// Call this when the local filesystem gives us an error with an external
     257              : /// cause: this includes EIO, EROFS, and EACCESS: all these indicate either
     258              : /// bad storage or bad configuration, and we can't fix that from inside
     259              : /// a running process.
     260            0 : pub(crate) fn on_fatal_io_error(e: &std::io::Error, context: &str) -> ! {
     261            0 :     tracing::error!("Fatal I/O error: {e}: {context})");
     262            0 :     std::process::abort();
     263              : }
     264              : 
     265              : pub(crate) trait MaybeFatalIo<T> {
     266              :     fn maybe_fatal_err(self, context: &str) -> std::io::Result<T>;
     267              :     fn fatal_err(self, context: &str) -> T;
     268              : }
     269              : 
     270              : impl<T> MaybeFatalIo<T> for std::io::Result<T> {
     271              :     /// Terminate the process if the result is an error of a fatal type, else pass it through
     272              :     ///
     273              :     /// This is appropriate for writes, where we typically want to die on EIO/ACCES etc, but
     274              :     /// not on ENOSPC.
     275          115 :     fn maybe_fatal_err(self, context: &str) -> std::io::Result<T> {
     276          115 :         if let Err(e) = &self {
     277            0 :             if is_fatal_io_error(e) {
     278            0 :                 on_fatal_io_error(e, context);
     279            0 :             }
     280          115 :         }
     281          115 :         self
     282          115 :     }
     283              : 
     284              :     /// Terminate the process on any I/O error.
     285              :     ///
     286              :     /// This is appropriate for reads on files that we know exist: they should always work.
     287         1401 :     fn fatal_err(self, context: &str) -> T {
     288         1401 :         match self {
     289         1401 :             Ok(v) => v,
     290            0 :             Err(e) => {
     291            0 :                 on_fatal_io_error(&e, context);
     292              :             }
     293              :         }
     294         1401 :     }
     295              : }
     296              : 
     297              : /// Observe duration for the given storage I/O operation
     298              : ///
     299              : /// Unlike `observe_closure_duration`, this supports async,
     300              : /// where "support" means that we measure wall clock time.
     301              : macro_rules! observe_duration {
     302              :     ($op:expr, $($body:tt)*) => {{
     303              :         let instant = Instant::now();
     304              :         let result = $($body)*;
     305              :         let elapsed = instant.elapsed().as_secs_f64();
     306              :         STORAGE_IO_TIME_METRIC
     307              :             .get($op)
     308              :             .observe(elapsed);
     309              :         result
     310              :     }}
     311              : }
     312              : 
     313              : macro_rules! with_file {
     314              :     ($this:expr, $op:expr, | $ident:ident | $($body:tt)*) => {{
     315              :         let $ident = $this.lock_file().await?;
     316              :         observe_duration!($op, $($body)*)
     317              :     }};
     318              :     ($this:expr, $op:expr, | mut $ident:ident | $($body:tt)*) => {{
     319              :         let mut $ident = $this.lock_file().await?;
     320              :         observe_duration!($op, $($body)*)
     321              :     }};
     322              : }
     323              : 
     324              : impl VirtualFile {
     325              :     /// Open a file in read-only mode. Like File::open.
     326        33410 :     pub async fn open(path: &Utf8Path) -> Result<VirtualFile, std::io::Error> {
     327        33410 :         Self::open_with_options(path, OpenOptions::new().read(true)).await
     328        33410 :     }
     329              : 
     330              :     /// Create a new file for writing. If the file exists, it will be truncated.
     331              :     /// Like File::create.
     332        15857 :     pub async fn create(path: &Utf8Path) -> Result<VirtualFile, std::io::Error> {
     333        15857 :         Self::open_with_options(
     334        15857 :             path,
     335        15857 :             OpenOptions::new().write(true).create(true).truncate(true),
     336        15857 :         )
     337          232 :         .await
     338        15857 :     }
     339              : 
     340              :     /// Open a file with given options.
     341              :     ///
     342              :     /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt,
     343              :     /// they will be applied also when the file is subsequently re-opened, not only
     344              :     /// on the first time. Make sure that's sane!
     345        61790 :     pub async fn open_with_options(
     346        61790 :         path: &Utf8Path,
     347        61790 :         open_options: &OpenOptions,
     348        61790 :     ) -> Result<VirtualFile, std::io::Error> {
     349        61790 :         let path_str = path.to_string();
     350        61790 :         let parts = path_str.split('/').collect::<Vec<&str>>();
     351        61790 :         let (tenant_id, shard_id, timeline_id) =
     352        61790 :             if parts.len() > 5 && parts[parts.len() - 5] == TENANTS_SEGMENT_NAME {
     353        61330 :                 let tenant_shard_part = parts[parts.len() - 4];
     354        61330 :                 let (tenant_id, shard_id) = match tenant_shard_part.parse::<TenantShardId>() {
     355        61330 :                     Ok(tenant_shard_id) => (
     356        61330 :                         tenant_shard_id.tenant_id.to_string(),
     357        61330 :                         format!("{}", tenant_shard_id.shard_slug()),
     358        61330 :                     ),
     359              :                     Err(_) => {
     360              :                         // Malformed path: this ID is just for observability, so tolerate it
     361              :                         // and pass through
     362            0 :                         (tenant_shard_part.to_string(), "*".to_string())
     363              :                     }
     364              :                 };
     365        61330 :                 (tenant_id, shard_id, parts[parts.len() - 2].to_string())
     366              :             } else {
     367          460 :                 ("*".to_string(), "*".to_string(), "*".to_string())
     368              :             };
     369        61790 :         let (handle, mut slot_guard) = get_open_files().find_victim_slot().await;
     370              : 
     371              :         // NB: there is also StorageIoOperation::OpenAfterReplace which is for the case
     372              :         // where our caller doesn't get to use the returned VirtualFile before its
     373              :         // slot gets re-used by someone else.
     374        61790 :         let file = observe_duration!(StorageIoOperation::Open, {
     375        61790 :             open_options.open(path.as_std_path()).await?
     376              :         });
     377              : 
     378              :         // Strip all options other than read and write.
     379              :         //
     380              :         // It would perhaps be nicer to check just for the read and write flags
     381              :         // explicitly, but OpenOptions doesn't contain any functions to read flags,
     382              :         // only to set them.
     383        61790 :         let mut reopen_options = open_options.clone();
     384        61790 :         reopen_options.create(false);
     385        61790 :         reopen_options.create_new(false);
     386        61790 :         reopen_options.truncate(false);
     387        61790 : 
     388        61790 :         let vfile = VirtualFile {
     389        61790 :             handle: RwLock::new(handle),
     390        61790 :             pos: 0,
     391        61790 :             path: path.to_path_buf(),
     392        61790 :             open_options: reopen_options,
     393        61790 :             tenant_id,
     394        61790 :             shard_id,
     395        61790 :             timeline_id,
     396        61790 :         };
     397        61790 : 
     398        61790 :         // TODO: Under pressure, it's likely the slot will get re-used and
     399        61790 :         // the underlying file closed before they get around to using it.
     400        61790 :         // => https://github.com/neondatabase/neon/issues/6065
     401        61790 :         slot_guard.file.replace(file);
     402        61790 : 
     403        61790 :         Ok(vfile)
     404        61790 :     }
     405              : 
     406              :     /// Async version of [`::utils::crashsafe::overwrite`].
     407              :     ///
     408              :     /// # NB:
     409              :     ///
     410              :     /// Doesn't actually use the [`VirtualFile`] file descriptor cache, but,
     411              :     /// it did at an earlier time.
     412              :     /// And it will use this module's [`io_engine`] in the near future, so, leaving it here.
     413         8859 :     pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>(
     414         8859 :         final_path: Utf8PathBuf,
     415         8859 :         tmp_path: Utf8PathBuf,
     416         8859 :         content: B,
     417         8859 :     ) -> std::io::Result<()> {
     418         8859 :         // TODO: use tokio_epoll_uring if configured as `io_engine`.
     419         8859 :         // See https://github.com/neondatabase/neon/issues/6663
     420         8859 : 
     421         8859 :         tokio::task::spawn_blocking(move || {
     422         8859 :             let slice_storage;
     423         8859 :             let content_len = content.bytes_init();
     424         8859 :             let content = if content.bytes_init() > 0 {
     425         8859 :                 slice_storage = Some(content.slice(0..content_len));
     426         8859 :                 slice_storage.as_deref().expect("just set it to Some()")
     427              :             } else {
     428            0 :                 &[]
     429              :             };
     430         8859 :             utils::crashsafe::overwrite(&final_path, &tmp_path, content)
     431         8859 :         })
     432         8896 :         .await
     433         8859 :         .expect("blocking task is never aborted")
     434         8859 :     }
     435              : 
     436              :     /// Call File::sync_all() on the underlying File.
     437        22304 :     pub async fn sync_all(&self) -> Result<(), Error> {
     438        22304 :         with_file!(self, StorageIoOperation::Fsync, |file_guard| file_guard
     439        22304 :             .with_std_file(|std_file| std_file.sync_all()))
     440        22304 :     }
     441              : 
     442        22304 :     pub async fn metadata(&self) -> Result<fs::Metadata, Error> {
     443        22304 :         with_file!(self, StorageIoOperation::Metadata, |file_guard| file_guard
     444        22304 :             .with_std_file(|std_file| std_file.metadata()))
     445        22304 :     }
     446              : 
     447              :     /// Helper function internal to `VirtualFile` that looks up the underlying File,
     448              :     /// opens it and evicts some other File if necessary. The passed parameter is
     449              :     /// assumed to be a function available for the physical `File`.
     450              :     ///
     451              :     /// We are doing it via a macro as Rust doesn't support async closures that
     452              :     /// take on parameters with lifetimes.
     453     13854409 :     async fn lock_file(&self) -> Result<FileGuard, Error> {
     454     13854409 :         let open_files = get_open_files();
     455              : 
     456       197682 :         let mut handle_guard = {
     457              :             // Read the cached slot handle, and see if the slot that it points to still
     458              :             // contains our File.
     459              :             //
     460              :             // We only need to hold the handle lock while we read the current handle. If
     461              :             // another thread closes the file and recycles the slot for a different file,
     462              :             // we will notice that the handle we read is no longer valid and retry.
     463     13854409 :             let mut handle = *self.handle.read().await;
     464     13960942 :             loop {
     465     13960942 :                 // Check if the slot contains our File
     466     13960942 :                 {
     467     13960942 :                     let slot = &open_files.slots[handle.index];
     468     13960942 :                     let slot_guard = slot.inner.read().await;
     469     13960942 :                     if slot_guard.tag == handle.tag && slot_guard.file.is_some() {
     470              :                         // Found a cached file descriptor.
     471     13656727 :                         slot.recently_used.store(true, Ordering::Relaxed);
     472     13656727 :                         return Ok(FileGuard { slot_guard });
     473       304215 :                     }
     474              :                 }
     475              : 
     476              :                 // The slot didn't contain our File. We will have to open it ourselves,
     477              :                 // but before that, grab a write lock on handle in the VirtualFile, so
     478              :                 // that no other thread will try to concurrently open the same file.
     479       304215 :                 let handle_guard = self.handle.write().await;
     480              : 
     481              :                 // If another thread changed the handle while we were not holding the lock,
     482              :                 // then the handle might now be valid again. Loop back to retry.
     483       304215 :                 if *handle_guard != handle {
     484       106533 :                     handle = *handle_guard;
     485       106533 :                     continue;
     486       197682 :                 }
     487       197682 :                 break handle_guard;
     488              :             }
     489              :         };
     490              : 
     491              :         // We need to open the file ourselves. The handle in the VirtualFile is
     492              :         // now locked in write-mode. Find a free slot to put it in.
     493       197682 :         let (handle, mut slot_guard) = open_files.find_victim_slot().await;
     494              : 
     495              :         // Re-open the physical file.
     496              :         // NB: we use StorageIoOperation::OpenAferReplace for this to distinguish this
     497              :         // case from StorageIoOperation::Open. This helps with identifying thrashing
     498              :         // of the virtual file descriptor cache.
     499       197682 :         let file = observe_duration!(StorageIoOperation::OpenAfterReplace, {
     500       197682 :             self.open_options.open(self.path.as_std_path()).await?
     501              :         });
     502              : 
     503              :         // Store the File in the slot and update the handle in the VirtualFile
     504              :         // to point to it.
     505       197682 :         slot_guard.file.replace(file);
     506       197682 : 
     507       197682 :         *handle_guard = handle;
     508       197682 : 
     509       197682 :         return Ok(FileGuard {
     510       197682 :             slot_guard: slot_guard.downgrade(),
     511       197682 :         });
     512     13854409 :     }
     513              : 
     514            0 :     pub fn remove(self) {
     515            0 :         let path = self.path.clone();
     516            0 :         drop(self);
     517            0 :         std::fs::remove_file(path).expect("failed to remove the virtual file");
     518            0 :     }
     519              : 
     520        66934 :     pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
     521        66934 :         match pos {
     522        66924 :             SeekFrom::Start(offset) => {
     523        66924 :                 self.pos = offset;
     524        66924 :             }
     525            4 :             SeekFrom::End(offset) => {
     526            4 :                 self.pos = with_file!(self, StorageIoOperation::Seek, |mut file_guard| file_guard
     527            4 :                     .with_std_file_mut(|std_file| std_file.seek(SeekFrom::End(offset))))?
     528              :             }
     529            6 :             SeekFrom::Current(offset) => {
     530            6 :                 let pos = self.pos as i128 + offset as i128;
     531            6 :                 if pos < 0 {
     532            2 :                     return Err(Error::new(
     533            2 :                         ErrorKind::InvalidInput,
     534            2 :                         "offset would be negative",
     535            2 :                     ));
     536            4 :                 }
     537            4 :                 if pos > u64::MAX as i128 {
     538            0 :                     return Err(Error::new(ErrorKind::InvalidInput, "offset overflow"));
     539            4 :                 }
     540            4 :                 self.pos = pos as u64;
     541              :             }
     542              :         }
     543        66930 :         Ok(self.pos)
     544        66934 :     }
     545              : 
     546      5145545 :     pub async fn read_exact_at<B>(&self, buf: B, offset: u64) -> Result<B, Error>
     547      5145545 :     where
     548      5145545 :         B: IoBufMut + Send,
     549      5145545 :     {
     550      5145545 :         let (buf, res) =
     551      5145545 :             read_exact_at_impl(buf, offset, |buf, offset| self.read_at(buf, offset)).await;
     552      5145545 :         res.map(|()| buf)
     553      5145545 :     }
     554              : 
     555              :     /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`].
     556      4925338 :     pub async fn read_exact_at_page(
     557      4925338 :         &self,
     558      4925338 :         page: PageWriteGuard<'static>,
     559      4925338 :         offset: u64,
     560      4925343 :     ) -> Result<PageWriteGuard<'static>, Error> {
     561      4925343 :         let buf = PageWriteGuardBuf {
     562      4925343 :             page,
     563      4925343 :             init_up_to: 0,
     564      4925343 :         };
     565      4925343 :         let res = self.read_exact_at(buf, offset).await;
     566      4925343 :         res.map(|PageWriteGuardBuf { page, .. }| page)
     567      4925343 :             .map_err(|e| Error::new(ErrorKind::Other, e))
     568      4925343 :     }
     569              : 
     570              :     // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235
     571      3696832 :     pub async fn write_all_at<B: BoundedBuf>(
     572      3696832 :         &self,
     573      3696832 :         buf: B,
     574      3696832 :         mut offset: u64,
     575      3696832 :     ) -> (B::Buf, Result<(), Error>) {
     576      3696832 :         let buf_len = buf.bytes_init();
     577      3696832 :         if buf_len == 0 {
     578            0 :             return (Slice::into_inner(buf.slice_full()), Ok(()));
     579      3696832 :         }
     580      3696832 :         let mut buf = buf.slice(0..buf_len);
     581      7393664 :         while !buf.is_empty() {
     582              :             // TODO: push `buf` further down
     583      3696832 :             match self.write_at(&buf, offset).await {
     584              :                 Ok(0) => {
     585            0 :                     return (
     586            0 :                         Slice::into_inner(buf),
     587            0 :                         Err(Error::new(
     588            0 :                             std::io::ErrorKind::WriteZero,
     589            0 :                             "failed to write whole buffer",
     590            0 :                         )),
     591            0 :                     );
     592              :                 }
     593      3696832 :                 Ok(n) => {
     594      3696832 :                     buf = buf.slice(n..);
     595      3696832 :                     offset += n as u64;
     596      3696832 :                 }
     597            0 :                 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
     598            0 :                 Err(e) => return (Slice::into_inner(buf), Err(e)),
     599              :             }
     600              :         }
     601      3696832 :         (Slice::into_inner(buf), Ok(()))
     602      3696832 :     }
     603              : 
     604              :     /// Writes `buf.slice(0..buf.bytes_init())`.
     605              :     /// Returns the IoBuf that is underlying the BoundedBuf `buf`.
     606              :     /// I.e., the returned value's `bytes_init()` method returns something different than the `bytes_init()` that was passed in.
     607              :     /// It's quite brittle and easy to mis-use, so, we return the size in the Ok() variant.
     608      4967006 :     pub async fn write_all<B: BoundedBuf>(&mut self, buf: B) -> (B::Buf, Result<usize, Error>) {
     609      4967006 :         let nbytes = buf.bytes_init();
     610      4967006 :         if nbytes == 0 {
     611           30 :             return (Slice::into_inner(buf.slice_full()), Ok(0));
     612      4966976 :         }
     613      4966976 :         let mut buf = buf.slice(0..nbytes);
     614      9933950 :         while !buf.is_empty() {
     615              :             // TODO: push `Slice` further down
     616      4966976 :             match self.write(&buf).await {
     617              :                 Ok(0) => {
     618            0 :                     return (
     619            0 :                         Slice::into_inner(buf),
     620            0 :                         Err(Error::new(
     621            0 :                             std::io::ErrorKind::WriteZero,
     622            0 :                             "failed to write whole buffer",
     623            0 :                         )),
     624            0 :                     );
     625              :                 }
     626      4966974 :                 Ok(n) => {
     627      4966974 :                     buf = buf.slice(n..);
     628      4966974 :                 }
     629            2 :                 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
     630            2 :                 Err(e) => return (Slice::into_inner(buf), Err(e)),
     631              :             }
     632              :         }
     633      4966974 :         (Slice::into_inner(buf), Ok(nbytes))
     634      4967006 :     }
     635              : 
     636      4966976 :     async fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
     637      4966976 :         let pos = self.pos;
     638      4966976 :         let n = self.write_at(buf, pos).await?;
     639      4966974 :         self.pos += n as u64;
     640      4966974 :         Ok(n)
     641      4966976 :     }
     642              : 
     643      5145989 :     pub(crate) async fn read_at<B>(&self, buf: B, offset: u64) -> (B, Result<usize, Error>)
     644      5145989 :     where
     645      5145989 :         B: tokio_epoll_uring::BoundedBufMut + Send,
     646      5145989 :     {
     647      5145989 :         let file_guard = match self.lock_file().await {
     648      5145989 :             Ok(file_guard) => file_guard,
     649            0 :             Err(e) => return (buf, Err(e)),
     650              :         };
     651              : 
     652      5145989 :         observe_duration!(StorageIoOperation::Read, {
     653      5145989 :             let ((_file_guard, buf), res) = io_engine::get().read_at(file_guard, offset, buf).await;
     654      5145989 :             if let Ok(size) = res {
     655      5145987 :                 STORAGE_IO_SIZE
     656      5145987 :                     .with_label_values(&[
     657      5145987 :                         "read",
     658      5145987 :                         &self.tenant_id,
     659      5145987 :                         &self.shard_id,
     660      5145987 :                         &self.timeline_id,
     661      5145987 :                     ])
     662      5145987 :                     .add(size as i64);
     663      5145987 :             }
     664      5145989 :             (buf, res)
     665              :         })
     666      5145989 :     }
     667              : 
     668      8663808 :     async fn write_at(&self, buf: &[u8], offset: u64) -> Result<usize, Error> {
     669      8663808 :         let result = with_file!(self, StorageIoOperation::Write, |file_guard| {
     670      8663808 :             file_guard.with_std_file(|std_file| std_file.write_at(buf, offset))
     671              :         });
     672      8663808 :         if let Ok(size) = result {
     673      8663806 :             STORAGE_IO_SIZE
     674      8663806 :                 .with_label_values(&["write", &self.tenant_id, &self.shard_id, &self.timeline_id])
     675      8663806 :                 .add(size as i64);
     676      8663806 :         }
     677      8663808 :         result
     678      8663808 :     }
     679              : }
     680              : 
     681              : // Adapted from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135
     682      5145553 : pub async fn read_exact_at_impl<B, F, Fut>(
     683      5145553 :     buf: B,
     684      5145553 :     mut offset: u64,
     685      5145553 :     mut read_at: F,
     686      5145553 : ) -> (B, std::io::Result<()>)
     687      5145553 : where
     688      5145553 :     B: IoBufMut + Send,
     689      5145553 :     F: FnMut(tokio_epoll_uring::Slice<B>, u64) -> Fut,
     690      5145553 :     Fut: std::future::Future<Output = (tokio_epoll_uring::Slice<B>, std::io::Result<usize>)>,
     691      5145553 : {
     692      5145553 :     let mut buf: tokio_epoll_uring::Slice<B> = buf.slice_full(); // includes all the uninitialized memory
     693     10291108 :     while buf.bytes_total() != 0 {
     694              :         let res;
     695      5145557 :         (buf, res) = read_at(buf, offset).await;
     696            0 :         match res {
     697            2 :             Ok(0) => break,
     698      5145555 :             Ok(n) => {
     699      5145555 :                 buf = buf.slice(n..);
     700      5145555 :                 offset += n as u64;
     701      5145555 :             }
     702            0 :             Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
     703            0 :             Err(e) => return (buf.into_inner(), Err(e)),
     704              :         }
     705              :     }
     706              :     // NB: don't use `buf.is_empty()` here; it is from the
     707              :     // `impl Deref for Slice { Target = [u8] }`; the the &[u8]
     708              :     // returned by it only covers the initialized portion of `buf`.
     709              :     // Whereas we're interested in ensuring that we filled the entire
     710              :     // buffer that the user passed in.
     711      5145553 :     if buf.bytes_total() != 0 {
     712            2 :         (
     713            2 :             buf.into_inner(),
     714            2 :             Err(std::io::Error::new(
     715            2 :                 std::io::ErrorKind::UnexpectedEof,
     716            2 :                 "failed to fill whole buffer",
     717            2 :             )),
     718            2 :         )
     719              :     } else {
     720      5145551 :         assert_eq!(buf.len(), buf.bytes_total());
     721      5145551 :         (buf.into_inner(), Ok(()))
     722              :     }
     723      5145553 : }
     724              : 
     725              : #[cfg(test)]
     726              : mod test_read_exact_at_impl {
     727              : 
     728              :     use std::{collections::VecDeque, sync::Arc};
     729              : 
     730              :     use tokio_epoll_uring::{BoundedBuf, BoundedBufMut};
     731              : 
     732              :     use super::read_exact_at_impl;
     733              : 
     734              :     struct Expectation {
     735              :         offset: u64,
     736              :         bytes_total: usize,
     737              :         result: std::io::Result<Vec<u8>>,
     738              :     }
     739              :     struct MockReadAt {
     740              :         expectations: VecDeque<Expectation>,
     741              :     }
     742              : 
     743              :     impl MockReadAt {
     744           12 :         async fn read_at(
     745           12 :             &mut self,
     746           12 :             mut buf: tokio_epoll_uring::Slice<Vec<u8>>,
     747           12 :             offset: u64,
     748           12 :         ) -> (tokio_epoll_uring::Slice<Vec<u8>>, std::io::Result<usize>) {
     749           12 :             let exp = self
     750           12 :                 .expectations
     751           12 :                 .pop_front()
     752           12 :                 .expect("read_at called but we have no expectations left");
     753           12 :             assert_eq!(exp.offset, offset);
     754           12 :             assert_eq!(exp.bytes_total, buf.bytes_total());
     755           12 :             match exp.result {
     756           12 :                 Ok(bytes) => {
     757           12 :                     assert!(bytes.len() <= buf.bytes_total());
     758           12 :                     buf.put_slice(&bytes);
     759           12 :                     (buf, Ok(bytes.len()))
     760              :                 }
     761            0 :                 Err(e) => (buf, Err(e)),
     762              :             }
     763           12 :         }
     764              :     }
     765              : 
     766              :     impl Drop for MockReadAt {
     767            8 :         fn drop(&mut self) {
     768            8 :             assert_eq!(self.expectations.len(), 0);
     769            8 :         }
     770              :     }
     771              : 
     772            2 :     #[tokio::test]
     773            2 :     async fn test_basic() {
     774            2 :         let buf = Vec::with_capacity(5);
     775            2 :         let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
     776            2 :             expectations: VecDeque::from(vec![Expectation {
     777            2 :                 offset: 0,
     778            2 :                 bytes_total: 5,
     779            2 :                 result: Ok(vec![b'a', b'b', b'c', b'd', b'e']),
     780            2 :             }]),
     781            2 :         }));
     782            2 :         let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
     783            2 :             let mock_read_at = Arc::clone(&mock_read_at);
     784            2 :             async move { mock_read_at.lock().await.read_at(buf, offset).await }
     785            2 :         })
     786            2 :         .await;
     787            2 :         assert!(res.is_ok());
     788            2 :         assert_eq!(buf, vec![b'a', b'b', b'c', b'd', b'e']);
     789            2 :     }
     790              : 
     791            2 :     #[tokio::test]
     792            2 :     async fn test_empty_buf_issues_no_syscall() {
     793            2 :         let buf = Vec::new();
     794            2 :         let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
     795            2 :             expectations: VecDeque::new(),
     796            2 :         }));
     797            2 :         let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
     798            0 :             let mock_read_at = Arc::clone(&mock_read_at);
     799            2 :             async move { mock_read_at.lock().await.read_at(buf, offset).await }
     800            2 :         })
     801            2 :         .await;
     802            2 :         assert!(res.is_ok());
     803            2 :     }
     804              : 
     805            2 :     #[tokio::test]
     806            2 :     async fn test_two_read_at_calls_needed_until_buf_filled() {
     807            2 :         let buf = Vec::with_capacity(4);
     808            2 :         let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
     809            2 :             expectations: VecDeque::from(vec![
     810            2 :                 Expectation {
     811            2 :                     offset: 0,
     812            2 :                     bytes_total: 4,
     813            2 :                     result: Ok(vec![b'a', b'b']),
     814            2 :                 },
     815            2 :                 Expectation {
     816            2 :                     offset: 2,
     817            2 :                     bytes_total: 2,
     818            2 :                     result: Ok(vec![b'c', b'd']),
     819            2 :                 },
     820            2 :             ]),
     821            2 :         }));
     822            4 :         let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
     823            4 :             let mock_read_at = Arc::clone(&mock_read_at);
     824            4 :             async move { mock_read_at.lock().await.read_at(buf, offset).await }
     825            4 :         })
     826            2 :         .await;
     827            2 :         assert!(res.is_ok());
     828            2 :         assert_eq!(buf, vec![b'a', b'b', b'c', b'd']);
     829            2 :     }
     830              : 
     831            2 :     #[tokio::test]
     832            2 :     async fn test_eof_before_buffer_full() {
     833            2 :         let buf = Vec::with_capacity(3);
     834            2 :         let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
     835            2 :             expectations: VecDeque::from(vec![
     836            2 :                 Expectation {
     837            2 :                     offset: 0,
     838            2 :                     bytes_total: 3,
     839            2 :                     result: Ok(vec![b'a']),
     840            2 :                 },
     841            2 :                 Expectation {
     842            2 :                     offset: 1,
     843            2 :                     bytes_total: 2,
     844            2 :                     result: Ok(vec![b'b']),
     845            2 :                 },
     846            2 :                 Expectation {
     847            2 :                     offset: 2,
     848            2 :                     bytes_total: 1,
     849            2 :                     result: Ok(vec![]),
     850            2 :                 },
     851            2 :             ]),
     852            2 :         }));
     853            6 :         let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
     854            6 :             let mock_read_at = Arc::clone(&mock_read_at);
     855            6 :             async move { mock_read_at.lock().await.read_at(buf, offset).await }
     856            6 :         })
     857            2 :         .await;
     858            2 :         let Err(err) = res else {
     859            2 :             panic!("should return an error");
     860            2 :         };
     861            2 :         assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof);
     862            2 :         assert_eq!(format!("{err}"), "failed to fill whole buffer");
     863            2 :         // buffer contents on error are unspecified
     864            2 :     }
     865              : }
     866              : 
     867              : struct FileGuard {
     868              :     slot_guard: RwLockReadGuard<'static, SlotInner>,
     869              : }
     870              : 
     871              : impl AsRef<OwnedFd> for FileGuard {
     872     13854401 :     fn as_ref(&self) -> &OwnedFd {
     873     13854401 :         // This unwrap is safe because we only create `FileGuard`s
     874     13854401 :         // if we know that the file is Some.
     875     13854401 :         self.slot_guard.file.as_ref().unwrap()
     876     13854401 :     }
     877              : }
     878              : 
     879              : impl FileGuard {
     880              :     /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
     881     13709694 :     fn with_std_file<F, R>(&self, with: F) -> R
     882     13709694 :     where
     883     13709694 :         F: FnOnce(&File) -> R,
     884     13709694 :     {
     885     13709694 :         // SAFETY:
     886     13709694 :         // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
     887     13709694 :         // - `&` usage below: `self` is `&`, hence Rust typesystem guarantees there are is no `&mut`
     888     13709694 :         let file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
     889     13709694 :         let res = with(&file);
     890     13709694 :         let _ = file.into_raw_fd();
     891     13709694 :         res
     892     13709694 :     }
     893              :     /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
     894            4 :     fn with_std_file_mut<F, R>(&mut self, with: F) -> R
     895            4 :     where
     896            4 :         F: FnOnce(&mut File) -> R,
     897            4 :     {
     898            4 :         // SAFETY:
     899            4 :         // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
     900            4 :         // - &mut usage below: `self` is `&mut`, hence this call is the only task/thread that has control over the underlying fd
     901            4 :         let mut file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
     902            4 :         let res = with(&mut file);
     903            4 :         let _ = file.into_raw_fd();
     904            4 :         res
     905            4 :     }
     906              : }
     907              : 
     908              : impl tokio_epoll_uring::IoFd for FileGuard {
     909       144711 :     unsafe fn as_fd(&self) -> RawFd {
     910       144711 :         let owned_fd: &OwnedFd = self.as_ref();
     911       144711 :         owned_fd.as_raw_fd()
     912       144711 :     }
     913              : }
     914              : 
     915              : #[cfg(test)]
     916              : impl VirtualFile {
     917        20200 :     pub(crate) async fn read_blk(
     918        20200 :         &self,
     919        20200 :         blknum: u32,
     920        20200 :     ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
     921        20200 :         use crate::page_cache::PAGE_SZ;
     922        20200 :         let buf = vec![0; PAGE_SZ];
     923        20200 :         let buf = self
     924        20200 :             .read_exact_at(buf, blknum as u64 * (PAGE_SZ as u64))
     925        10256 :             .await?;
     926        20200 :         Ok(crate::tenant::block_io::BlockLease::Vec(buf))
     927        20200 :     }
     928              : 
     929          224 :     async fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<(), Error> {
     930          224 :         let mut tmp = vec![0; 128];
     931              :         loop {
     932              :             let res;
     933          444 :             (tmp, res) = self.read_at(tmp, self.pos).await;
     934            2 :             match res {
     935          222 :                 Ok(0) => return Ok(()),
     936          220 :                 Ok(n) => {
     937          220 :                     self.pos += n as u64;
     938          220 :                     buf.extend_from_slice(&tmp[..n]);
     939          220 :                 }
     940            2 :                 Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
     941            2 :                 Err(e) => return Err(e),
     942              :             }
     943              :         }
     944          224 :     }
     945              : }
     946              : 
     947              : impl Drop for VirtualFile {
     948              :     /// If a VirtualFile is dropped, close the underlying file if it was open.
     949        47312 :     fn drop(&mut self) {
     950        47312 :         let handle = self.handle.get_mut();
     951        47312 : 
     952        47312 :         fn clean_slot(slot: &Slot, mut slot_guard: RwLockWriteGuard<'_, SlotInner>, tag: u64) {
     953        47312 :             if slot_guard.tag == tag {
     954        47312 :                 slot.recently_used.store(false, Ordering::Relaxed);
     955        47312 :                 // there is also operation "close-by-replace" for closes done on eviction for
     956        47312 :                 // comparison.
     957        47312 :                 if let Some(fd) = slot_guard.file.take() {
     958        37605 :                     STORAGE_IO_TIME_METRIC
     959        37605 :                         .get(StorageIoOperation::Close)
     960        37605 :                         .observe_closure_duration(|| drop(fd));
     961        37605 :                 }
     962        47312 :             }
     963        47312 :         }
     964        47312 : 
     965        47312 :         // We don't have async drop so we cannot directly await the lock here.
     966        47312 :         // Instead, first do a best-effort attempt at closing the underlying
     967        47312 :         // file descriptor by using `try_write`, and if that fails, spawn
     968        47312 :         // a tokio task to do it asynchronously: we just want it to be
     969        47312 :         // cleaned up eventually.
     970        47312 :         // Most of the time, the `try_lock` should succeed though,
     971        47312 :         // as we have `&mut self` access. In other words, if the slot
     972        47312 :         // is still occupied by our file, there should be no access from
     973        47312 :         // other I/O operations; the only other possible place to lock
     974        47312 :         // the slot is the lock algorithm looking for free slots.
     975        47312 :         let slot = &get_open_files().slots[handle.index];
     976        47312 :         if let Ok(slot_guard) = slot.inner.try_write() {
     977        47312 :             clean_slot(slot, slot_guard, handle.tag);
     978        47312 :         } else {
     979            0 :             let tag = handle.tag;
     980            0 :             tokio::spawn(async move {
     981            0 :                 let slot_guard = slot.inner.write().await;
     982            0 :                 clean_slot(slot, slot_guard, tag);
     983            0 :             });
     984            0 :         };
     985        47312 :     }
     986              : }
     987              : 
     988              : impl OpenFiles {
     989          721 :     fn new(num_slots: usize) -> OpenFiles {
     990          721 :         let mut slots = Box::new(Vec::with_capacity(num_slots));
     991        63460 :         for _ in 0..num_slots {
     992        63460 :             let slot = Slot {
     993        63460 :                 recently_used: AtomicBool::new(false),
     994        63460 :                 inner: RwLock::new(SlotInner { tag: 0, file: None }),
     995        63460 :             };
     996        63460 :             slots.push(slot);
     997        63460 :         }
     998              : 
     999          721 :         OpenFiles {
    1000          721 :             next: AtomicUsize::new(0),
    1001          721 :             slots: Box::leak(slots),
    1002          721 :         }
    1003          721 :     }
    1004              : }
    1005              : 
    1006              : ///
    1007              : /// Initialize the virtual file module. This must be called once at page
    1008              : /// server startup.
    1009              : ///
    1010              : #[cfg(not(test))]
    1011          625 : pub fn init(num_slots: usize, engine: IoEngineKind) {
    1012          625 :     if OPEN_FILES.set(OpenFiles::new(num_slots)).is_err() {
    1013            0 :         panic!("virtual_file::init called twice");
    1014          625 :     }
    1015          625 :     io_engine::init(engine);
    1016          625 :     crate::metrics::virtual_file_descriptor_cache::SIZE_MAX.set(num_slots as u64);
    1017          625 : }
    1018              : 
    1019              : const TEST_MAX_FILE_DESCRIPTORS: usize = 10;
    1020              : 
    1021              : // Get a handle to the global slots array.
    1022     13963504 : fn get_open_files() -> &'static OpenFiles {
    1023     13963504 :     //
    1024     13963504 :     // In unit tests, page server startup doesn't happen and no one calls
    1025     13963504 :     // virtual_file::init(). Initialize it here, with a small array.
    1026     13963504 :     //
    1027     13963504 :     // This applies to the virtual file tests below, but all other unit
    1028     13963504 :     // tests too, so the virtual file facility is always usable in
    1029     13963504 :     // unit tests.
    1030     13963504 :     //
    1031     13963504 :     if cfg!(test) {
    1032       365255 :         OPEN_FILES.get_or_init(|| OpenFiles::new(TEST_MAX_FILE_DESCRIPTORS))
    1033              :     } else {
    1034     13598249 :         OPEN_FILES.get().expect("virtual_file::init not called yet")
    1035              :     }
    1036     13963504 : }
    1037              : 
    1038              : #[cfg(test)]
    1039              : mod tests {
    1040              :     use super::*;
    1041              :     use rand::seq::SliceRandom;
    1042              :     use rand::thread_rng;
    1043              :     use rand::Rng;
    1044              :     use std::future::Future;
    1045              :     use std::io::Write;
    1046              :     use std::sync::Arc;
    1047              : 
    1048              :     enum MaybeVirtualFile {
    1049              :         VirtualFile(VirtualFile),
    1050              :         File(File),
    1051              :     }
    1052              : 
    1053              :     impl From<VirtualFile> for MaybeVirtualFile {
    1054            6 :         fn from(vf: VirtualFile) -> Self {
    1055            6 :             MaybeVirtualFile::VirtualFile(vf)
    1056            6 :         }
    1057              :     }
    1058              : 
    1059              :     impl MaybeVirtualFile {
    1060          404 :         async fn read_exact_at(&self, mut buf: Vec<u8>, offset: u64) -> Result<Vec<u8>, Error> {
    1061          404 :             match self {
    1062          203 :                 MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(buf, offset).await,
    1063          202 :                 MaybeVirtualFile::File(file) => file.read_exact_at(&mut buf, offset).map(|()| buf),
    1064              :             }
    1065          404 :         }
    1066            8 :         async fn write_all_at<B: BoundedBuf>(&self, buf: B, offset: u64) -> Result<(), Error> {
    1067            8 :             match self {
    1068            4 :                 MaybeVirtualFile::VirtualFile(file) => {
    1069            4 :                     let (_buf, res) = file.write_all_at(buf, offset).await;
    1070            4 :                     res
    1071              :                 }
    1072            4 :                 MaybeVirtualFile::File(file) => {
    1073            4 :                     let buf_len = buf.bytes_init();
    1074            4 :                     if buf_len == 0 {
    1075            0 :                         return Ok(());
    1076            4 :                     }
    1077            4 :                     file.write_all_at(&buf.slice(0..buf_len), offset)
    1078              :                 }
    1079              :             }
    1080            8 :         }
    1081           36 :         async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
    1082           36 :             match self {
    1083           18 :                 MaybeVirtualFile::VirtualFile(file) => file.seek(pos).await,
    1084           18 :                 MaybeVirtualFile::File(file) => file.seek(pos),
    1085              :             }
    1086           36 :         }
    1087            8 :         async fn write_all<B: BoundedBuf>(&mut self, buf: B) -> Result<(), Error> {
    1088            8 :             match self {
    1089            4 :                 MaybeVirtualFile::VirtualFile(file) => {
    1090            4 :                     let (_buf, res) = file.write_all(buf).await;
    1091            4 :                     res.map(|_| ())
    1092              :                 }
    1093            4 :                 MaybeVirtualFile::File(file) => {
    1094            4 :                     let buf_len = buf.bytes_init();
    1095            4 :                     if buf_len == 0 {
    1096            0 :                         return Ok(());
    1097            4 :                     }
    1098            4 :                     file.write_all(&buf.slice(0..buf_len))
    1099              :                 }
    1100              :             }
    1101            8 :         }
    1102              : 
    1103              :         // Helper function to slurp contents of a file, starting at the current position,
    1104              :         // into a string
    1105          442 :         async fn read_string(&mut self) -> Result<String, Error> {
    1106          442 :             use std::io::Read;
    1107          442 :             let mut buf = String::new();
    1108          442 :             match self {
    1109          224 :                 MaybeVirtualFile::VirtualFile(file) => {
    1110          224 :                     let mut buf = Vec::new();
    1111          226 :                     file.read_to_end(&mut buf).await?;
    1112          222 :                     return Ok(String::from_utf8(buf).unwrap());
    1113              :                 }
    1114          218 :                 MaybeVirtualFile::File(file) => {
    1115          218 :                     file.read_to_string(&mut buf)?;
    1116              :                 }
    1117              :             }
    1118          216 :             Ok(buf)
    1119          442 :         }
    1120              : 
    1121              :         // Helper function to slurp a portion of a file into a string
    1122          404 :         async fn read_string_at(&mut self, pos: u64, len: usize) -> Result<String, Error> {
    1123          404 :             let buf = vec![0; len];
    1124          404 :             let buf = self.read_exact_at(buf, pos).await?;
    1125          404 :             Ok(String::from_utf8(buf).unwrap())
    1126          404 :         }
    1127              :     }
    1128              : 
    1129            2 :     #[tokio::test]
    1130            2 :     async fn test_virtual_files() -> anyhow::Result<()> {
    1131            2 :         // The real work is done in the test_files() helper function. This
    1132            2 :         // allows us to run the same set of tests against a native File, and
    1133            2 :         // VirtualFile. We trust the native Files and wouldn't need to test them,
    1134            2 :         // but this allows us to verify that the operations return the same
    1135            2 :         // results with VirtualFiles as with native Files. (Except that with
    1136            2 :         // native files, you will run out of file descriptors if the ulimit
    1137            2 :         // is low enough.)
    1138          206 :         test_files("virtual_files", |path, open_options| async move {
    1139          206 :             let vf = VirtualFile::open_with_options(&path, &open_options).await?;
    1140          206 :             Ok(MaybeVirtualFile::VirtualFile(vf))
    1141          412 :         })
    1142          527 :         .await
    1143            2 :     }
    1144              : 
    1145            2 :     #[tokio::test]
    1146            2 :     async fn test_physical_files() -> anyhow::Result<()> {
    1147          206 :         test_files("physical_files", |path, open_options| async move {
    1148          206 :             Ok(MaybeVirtualFile::File({
    1149          206 :                 let owned_fd = open_options.open(path.as_std_path()).await?;
    1150          206 :                 File::from(owned_fd)
    1151            2 :             }))
    1152          412 :         })
    1153          104 :         .await
    1154            2 :     }
    1155              : 
    1156            4 :     async fn test_files<OF, FT>(testname: &str, openfunc: OF) -> anyhow::Result<()>
    1157            4 :     where
    1158            4 :         OF: Fn(Utf8PathBuf, OpenOptions) -> FT,
    1159            4 :         FT: Future<Output = Result<MaybeVirtualFile, std::io::Error>>,
    1160            4 :     {
    1161            4 :         let testdir = crate::config::PageServerConf::test_repo_dir(testname);
    1162            4 :         std::fs::create_dir_all(&testdir)?;
    1163              : 
    1164            4 :         let path_a = testdir.join("file_a");
    1165            4 :         let mut file_a = openfunc(
    1166            4 :             path_a.clone(),
    1167            4 :             OpenOptions::new()
    1168            4 :                 .write(true)
    1169            4 :                 .create(true)
    1170            4 :                 .truncate(true)
    1171            4 :                 .to_owned(),
    1172            4 :         )
    1173            4 :         .await?;
    1174            4 :         file_a.write_all(b"foobar".to_vec()).await?;
    1175              : 
    1176              :         // cannot read from a file opened in write-only mode
    1177            4 :         let _ = file_a.read_string().await.unwrap_err();
    1178              : 
    1179              :         // Close the file and re-open for reading
    1180            4 :         let mut file_a = openfunc(path_a, OpenOptions::new().read(true).to_owned()).await?;
    1181              : 
    1182              :         // cannot write to a file opened in read-only mode
    1183            4 :         let _ = file_a.write_all(b"bar".to_vec()).await.unwrap_err();
    1184            4 : 
    1185            4 :         // Try simple read
    1186            4 :         assert_eq!("foobar", file_a.read_string().await?);
    1187              : 
    1188              :         // It's positioned at the EOF now.
    1189            4 :         assert_eq!("", file_a.read_string().await?);
    1190              : 
    1191              :         // Test seeks.
    1192            4 :         assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
    1193            4 :         assert_eq!("oobar", file_a.read_string().await?);
    1194              : 
    1195            4 :         assert_eq!(file_a.seek(SeekFrom::End(-2)).await?, 4);
    1196            4 :         assert_eq!("ar", file_a.read_string().await?);
    1197              : 
    1198            4 :         assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
    1199            4 :         assert_eq!(file_a.seek(SeekFrom::Current(2)).await?, 3);
    1200            4 :         assert_eq!("bar", file_a.read_string().await?);
    1201              : 
    1202            4 :         assert_eq!(file_a.seek(SeekFrom::Current(-5)).await?, 1);
    1203            4 :         assert_eq!("oobar", file_a.read_string().await?);
    1204              : 
    1205              :         // Test erroneous seeks to before byte 0
    1206            4 :         file_a.seek(SeekFrom::End(-7)).await.unwrap_err();
    1207            4 :         assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
    1208            4 :         file_a.seek(SeekFrom::Current(-2)).await.unwrap_err();
    1209            4 : 
    1210            4 :         // the erroneous seek should have left the position unchanged
    1211            4 :         assert_eq!("oobar", file_a.read_string().await?);
    1212              : 
    1213              :         // Create another test file, and try FileExt functions on it.
    1214            4 :         let path_b = testdir.join("file_b");
    1215            4 :         let mut file_b = openfunc(
    1216            4 :             path_b.clone(),
    1217            4 :             OpenOptions::new()
    1218            4 :                 .read(true)
    1219            4 :                 .write(true)
    1220            4 :                 .create(true)
    1221            4 :                 .truncate(true)
    1222            4 :                 .to_owned(),
    1223            4 :         )
    1224            2 :         .await?;
    1225            4 :         file_b.write_all_at(b"BAR".to_vec(), 3).await?;
    1226            4 :         file_b.write_all_at(b"FOO".to_vec(), 0).await?;
    1227              : 
    1228            4 :         assert_eq!(file_b.read_string_at(2, 3).await?, "OBA");
    1229              : 
    1230              :         // Open a lot of files, enough to cause some evictions. (Or to be precise,
    1231              :         // open the same file many times. The effect is the same.)
    1232              :         //
    1233              :         // leave file_a positioned at offset 1 before we start
    1234            4 :         assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
    1235              : 
    1236            4 :         let mut vfiles = Vec::new();
    1237          404 :         for _ in 0..100 {
    1238          400 :             let mut vfile =
    1239          400 :                 openfunc(path_b.clone(), OpenOptions::new().read(true).to_owned()).await?;
    1240          400 :             assert_eq!("FOOBAR", vfile.read_string().await?);
    1241          400 :             vfiles.push(vfile);
    1242              :         }
    1243              : 
    1244              :         // make sure we opened enough files to definitely cause evictions.
    1245            4 :         assert!(vfiles.len() > TEST_MAX_FILE_DESCRIPTORS * 2);
    1246              : 
    1247              :         // The underlying file descriptor for 'file_a' should be closed now. Try to read
    1248              :         // from it again. We left the file positioned at offset 1 above.
    1249            4 :         assert_eq!("oobar", file_a.read_string().await?);
    1250              : 
    1251              :         // Check that all the other FDs still work too. Use them in random order for
    1252              :         // good measure.
    1253            4 :         vfiles.as_mut_slice().shuffle(&mut thread_rng());
    1254          400 :         for vfile in vfiles.iter_mut() {
    1255          400 :             assert_eq!("OOBAR", vfile.read_string_at(1, 5).await?);
    1256              :         }
    1257              : 
    1258            4 :         Ok(())
    1259            4 :     }
    1260              : 
    1261              :     /// Test using VirtualFiles from many threads concurrently. This tests both using
    1262              :     /// a lot of VirtualFiles concurrently, causing evictions, and also using the same
    1263              :     /// VirtualFile from multiple threads concurrently.
    1264            2 :     #[tokio::test]
    1265            2 :     async fn test_vfile_concurrency() -> Result<(), Error> {
    1266            2 :         const SIZE: usize = 8 * 1024;
    1267            2 :         const VIRTUAL_FILES: usize = 100;
    1268            2 :         const THREADS: usize = 100;
    1269            2 :         const SAMPLE: [u8; SIZE] = [0xADu8; SIZE];
    1270            2 : 
    1271            2 :         let testdir = crate::config::PageServerConf::test_repo_dir("vfile_concurrency");
    1272            2 :         std::fs::create_dir_all(&testdir)?;
    1273            2 : 
    1274            2 :         // Create a test file.
    1275            2 :         let test_file_path = testdir.join("concurrency_test_file");
    1276            2 :         {
    1277            2 :             let file = File::create(&test_file_path)?;
    1278            2 :             file.write_all_at(&SAMPLE, 0)?;
    1279            2 :         }
    1280            2 : 
    1281            2 :         // Open the file many times.
    1282            2 :         let mut files = Vec::new();
    1283          202 :         for _ in 0..VIRTUAL_FILES {
    1284          200 :             let f = VirtualFile::open_with_options(&test_file_path, OpenOptions::new().read(true))
    1285          101 :                 .await?;
    1286          200 :             files.push(f);
    1287            2 :         }
    1288            2 :         let files = Arc::new(files);
    1289            2 : 
    1290            2 :         // Launch many threads, and use the virtual files concurrently in random order.
    1291            2 :         let rt = tokio::runtime::Builder::new_multi_thread()
    1292            2 :             .worker_threads(THREADS)
    1293            2 :             .thread_name("test_vfile_concurrency thread")
    1294            2 :             .build()
    1295            2 :             .unwrap();
    1296            2 :         let mut hdls = Vec::new();
    1297          202 :         for _threadno in 0..THREADS {
    1298          200 :             let files = files.clone();
    1299          200 :             let hdl = rt.spawn(async move {
    1300          200 :                 let mut buf = vec![0u8; SIZE];
    1301          200 :                 let mut rng = rand::rngs::OsRng;
    1302       200000 :                 for _ in 1..1000 {
    1303       199800 :                     let f = &files[rng.gen_range(0..files.len())];
    1304       652455 :                     buf = f.read_exact_at(buf, 0).await.unwrap();
    1305       199800 :                     assert!(buf == SAMPLE);
    1306            2 :                 }
    1307          200 :             });
    1308          200 :             hdls.push(hdl);
    1309          200 :         }
    1310          202 :         for hdl in hdls {
    1311          200 :             hdl.await?;
    1312            2 :         }
    1313            2 :         std::mem::forget(rt);
    1314            2 : 
    1315            2 :         Ok(())
    1316            2 :     }
    1317              : 
    1318            2 :     #[tokio::test]
    1319            2 :     async fn test_atomic_overwrite_basic() {
    1320            2 :         let testdir = crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_basic");
    1321            2 :         std::fs::create_dir_all(&testdir).unwrap();
    1322            2 : 
    1323            2 :         let path = testdir.join("myfile");
    1324            2 :         let tmp_path = testdir.join("myfile.tmp");
    1325            2 : 
    1326            2 :         VirtualFile::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
    1327            2 :             .await
    1328            2 :             .unwrap();
    1329            2 :         let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
    1330            2 :         let post = file.read_string().await.unwrap();
    1331            2 :         assert_eq!(post, "foo");
    1332            2 :         assert!(!tmp_path.exists());
    1333            2 :         drop(file);
    1334            2 : 
    1335            2 :         VirtualFile::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"bar".to_vec())
    1336            2 :             .await
    1337            2 :             .unwrap();
    1338            2 :         let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
    1339            2 :         let post = file.read_string().await.unwrap();
    1340            2 :         assert_eq!(post, "bar");
    1341            2 :         assert!(!tmp_path.exists());
    1342            2 :         drop(file);
    1343            2 :     }
    1344              : 
    1345            2 :     #[tokio::test]
    1346            2 :     async fn test_atomic_overwrite_preexisting_tmp() {
    1347            2 :         let testdir =
    1348            2 :             crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_preexisting_tmp");
    1349            2 :         std::fs::create_dir_all(&testdir).unwrap();
    1350            2 : 
    1351            2 :         let path = testdir.join("myfile");
    1352            2 :         let tmp_path = testdir.join("myfile.tmp");
    1353            2 : 
    1354            2 :         std::fs::write(&tmp_path, "some preexisting junk that should be removed").unwrap();
    1355            2 :         assert!(tmp_path.exists());
    1356            2 : 
    1357            2 :         VirtualFile::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
    1358            2 :             .await
    1359            2 :             .unwrap();
    1360            2 : 
    1361            2 :         let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
    1362            2 :         let post = file.read_string().await.unwrap();
    1363            2 :         assert_eq!(post, "foo");
    1364            2 :         assert!(!tmp_path.exists());
    1365            2 :         drop(file);
    1366            2 :     }
    1367              : }
        

Generated by: LCOV version 2.1-beta