Line data Source code
1 : //!
2 : //! VirtualFile is like a normal File, but it's not bound directly to
3 : //! a file descriptor. Instead, the file is opened when it's read from,
4 : //! and if too many files are open globally in the system, least-recently
5 : //! used ones are closed.
6 : //!
7 : //! To track which files have been recently used, we use the clock algorithm
8 : //! with a 'recently_used' flag on each slot.
9 : //!
10 : //! This is similar to PostgreSQL's virtual file descriptor facility in
11 : //! src/backend/storage/file/fd.c
12 : //!
13 : use crate::metrics::{StorageIoOperation, STORAGE_IO_SIZE, STORAGE_IO_TIME_METRIC};
14 :
15 : use crate::page_cache::PageWriteGuard;
16 : use crate::tenant::TENANTS_SEGMENT_NAME;
17 : use camino::{Utf8Path, Utf8PathBuf};
18 : use once_cell::sync::OnceCell;
19 : use pageserver_api::shard::TenantShardId;
20 : use std::fs::{self, File};
21 : use std::io::{Error, ErrorKind, Seek, SeekFrom};
22 : use tokio_epoll_uring::{BoundedBuf, IoBufMut, Slice};
23 :
24 : use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
25 : use std::os::unix::fs::FileExt;
26 : use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
27 : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
28 : use tokio::time::Instant;
29 : use utils::fs_ext;
30 :
31 : pub use pageserver_api::models::virtual_file as api;
32 : pub(crate) mod io_engine;
33 : mod open_options;
34 : pub(crate) use io_engine::IoEngineKind;
35 : pub(crate) use open_options::*;
36 :
37 : ///
38 : /// A virtual file descriptor. You can use this just like std::fs::File, but internally
39 : /// the underlying file is closed if the system is low on file descriptors,
40 : /// and re-opened when it's accessed again.
41 : ///
42 : /// Like with std::fs::File, multiple threads can read/write the file concurrently,
43 : /// holding just a shared reference the same VirtualFile, using the read_at() / write_at()
44 : /// functions from the FileExt trait. But the functions from the Read/Write/Seek traits
45 : /// require a mutable reference, because they modify the "current position".
46 : ///
47 : /// Each VirtualFile has a physical file descriptor in the global OPEN_FILES array, at the
48 : /// slot that 'handle points to, if the underlying file is currently open. If it's not
49 : /// currently open, the 'handle' can still point to the slot where it was last kept. The
50 : /// 'tag' field is used to detect whether the handle still is valid or not.
51 : ///
52 0 : #[derive(Debug)]
53 : pub struct VirtualFile {
54 : /// Lazy handle to the global file descriptor cache. The slot that this points to
55 : /// might contain our File, or it may be empty, or it may contain a File that
56 : /// belongs to a different VirtualFile.
57 : handle: RwLock<SlotHandle>,
58 :
59 : /// Current file position
60 : pos: u64,
61 :
62 : /// File path and options to use to open it.
63 : ///
64 : /// Note: this only contains the options needed to re-open it. For example,
65 : /// if a new file is created, we only pass the create flag when it's initially
66 : /// opened, in the VirtualFile::create() function, and strip the flag before
67 : /// storing it here.
68 : pub path: Utf8PathBuf,
69 : open_options: OpenOptions,
70 :
71 : // These are strings becase we only use them for metrics, and those expect strings.
72 : // It makes no sense for us to constantly turn the `TimelineId` and `TenantId` into
73 : // strings.
74 : tenant_id: String,
75 : shard_id: String,
76 : timeline_id: String,
77 : }
78 :
79 300473 : #[derive(Debug, PartialEq, Clone, Copy)]
80 : struct SlotHandle {
81 : /// Index into OPEN_FILES.slots
82 : index: usize,
83 :
84 : /// Value of 'tag' in the slot. If slot's tag doesn't match, then the slot has
85 : /// been recycled and no longer contains the FD for this virtual file.
86 : tag: u64,
87 : }
88 :
89 : /// OPEN_FILES is the global array that holds the physical file descriptors that
90 : /// are currently open. Each slot in the array is protected by a separate lock,
91 : /// so that different files can be accessed independently. The lock must be held
92 : /// in write mode to replace the slot with a different file, but a read mode
93 : /// is enough to operate on the file, whether you're reading or writing to it.
94 : ///
95 : /// OPEN_FILES starts in uninitialized state, and it's initialized by
96 : /// the virtual_file::init() function. It must be called exactly once at page
97 : /// server startup.
98 : static OPEN_FILES: OnceCell<OpenFiles> = OnceCell::new();
99 :
100 : struct OpenFiles {
101 : slots: &'static [Slot],
102 :
103 : /// clock arm for the clock algorithm
104 : next: AtomicUsize,
105 : }
106 :
107 : struct Slot {
108 : inner: RwLock<SlotInner>,
109 :
110 : /// has this file been used since last clock sweep?
111 : recently_used: AtomicBool,
112 : }
113 :
114 : struct SlotInner {
115 : /// Counter that's incremented every time a different file is stored here.
116 : /// To avoid the ABA problem.
117 : tag: u64,
118 :
119 : /// the underlying file
120 : file: Option<OwnedFd>,
121 : }
122 :
123 : /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`].
124 : struct PageWriteGuardBuf {
125 : page: PageWriteGuard<'static>,
126 : init_up_to: usize,
127 : }
128 : // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot,
129 : // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved.
130 : unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf {
131 90130 : fn stable_ptr(&self) -> *const u8 {
132 90130 : self.page.as_ptr()
133 90130 : }
134 270390 : fn bytes_init(&self) -> usize {
135 270390 : self.init_up_to
136 270390 : }
137 90130 : fn bytes_total(&self) -> usize {
138 90130 : self.page.len()
139 90130 : }
140 : }
141 : // Safety: see above, plus: the ownership of [`PageWriteGuard`] means exclusive access,
142 : // hence it's safe to hand out the `stable_mut_ptr()`.
143 : unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf {
144 90130 : fn stable_mut_ptr(&mut self) -> *mut u8 {
145 90130 : self.page.as_mut_ptr()
146 90130 : }
147 :
148 90130 : unsafe fn set_init(&mut self, pos: usize) {
149 90130 : assert!(pos <= self.page.len());
150 90130 : self.init_up_to = pos;
151 90130 : }
152 : }
153 :
154 : impl OpenFiles {
155 : /// Find a slot to use, evicting an existing file descriptor if needed.
156 : ///
157 : /// On return, we hold a lock on the slot, and its 'tag' has been updated
158 : /// recently_used has been set. It's all ready for reuse.
159 195991 : async fn find_victim_slot(&self) -> (SlotHandle, RwLockWriteGuard<SlotInner>) {
160 195991 : //
161 195991 : // Run the clock algorithm to find a slot to replace.
162 195991 : //
163 195991 : let num_slots = self.slots.len();
164 195991 : let mut retries = 0;
165 : let mut slot;
166 : let mut slot_guard;
167 : let index;
168 2753478 : loop {
169 2753478 : let next = self.next.fetch_add(1, Ordering::AcqRel) % num_slots;
170 2753478 : slot = &self.slots[next];
171 2753478 :
172 2753478 : // If the recently_used flag on this slot is set, continue the clock
173 2753478 : // sweep. Otherwise try to use this slot. If we cannot acquire the
174 2753478 : // lock, also continue the clock sweep.
175 2753478 : //
176 2753478 : // We only continue in this manner for a while, though. If we loop
177 2753478 : // through the array twice without finding a victim, just pick the
178 2753478 : // next slot and wait until we can reuse it. This way, we avoid
179 2753478 : // spinning in the extreme case that all the slots are busy with an
180 2753478 : // I/O operation.
181 2753478 : if retries < num_slots * 2 {
182 2645617 : if !slot.recently_used.swap(false, Ordering::Release) {
183 2435857 : if let Ok(guard) = slot.inner.try_write() {
184 88130 : slot_guard = guard;
185 88130 : index = next;
186 88130 : break;
187 2347727 : }
188 209760 : }
189 2557487 : retries += 1;
190 : } else {
191 107861 : slot_guard = slot.inner.write().await;
192 107861 : index = next;
193 107861 : break;
194 : }
195 : }
196 :
197 : //
198 : // We now have the victim slot locked. If it was in use previously, close the
199 : // old file.
200 : //
201 195991 : if let Some(old_file) = slot_guard.file.take() {
202 192836 : // the normal path of dropping VirtualFile uses "close", use "close-by-replace" here to
203 192836 : // distinguish the two.
204 192836 : STORAGE_IO_TIME_METRIC
205 192836 : .get(StorageIoOperation::CloseByReplace)
206 192836 : .observe_closure_duration(|| drop(old_file));
207 192836 : }
208 :
209 : // Prepare the slot for reuse and return it
210 195991 : slot_guard.tag += 1;
211 195991 : slot.recently_used.store(true, Ordering::Relaxed);
212 195991 : (
213 195991 : SlotHandle {
214 195991 : index,
215 195991 : tag: slot_guard.tag,
216 195991 : },
217 195991 : slot_guard,
218 195991 : )
219 195991 : }
220 : }
221 :
222 : /// Identify error types that should alwways terminate the process. Other
223 : /// error types may be elegible for retry.
224 0 : pub(crate) fn is_fatal_io_error(e: &std::io::Error) -> bool {
225 0 : use nix::errno::Errno::*;
226 0 : match e.raw_os_error().map(nix::errno::from_i32) {
227 : Some(EIO) => {
228 : // Terminate on EIO because we no longer trust the device to store
229 : // data safely, or to uphold persistence guarantees on fsync.
230 0 : true
231 : }
232 : Some(EROFS) => {
233 : // Terminate on EROFS because a filesystem is usually remounted
234 : // readonly when it has experienced some critical issue, so the same
235 : // logic as EIO applies.
236 0 : true
237 : }
238 : Some(EACCES) => {
239 : // Terminate on EACCESS because we should always have permissions
240 : // for our own data dir: if we don't, then we can't do our job and
241 : // need administrative intervention to fix permissions. Terminating
242 : // is the best way to make sure we stop cleanly rather than going
243 : // into infinite retry loops, and will make it clear to the outside
244 : // world that we need help.
245 0 : true
246 : }
247 : _ => {
248 : // Treat all other local file I/O errors are retryable. This includes:
249 : // - ENOSPC: we stay up and wait for eviction to free some space
250 : // - EINVAL, EBADF, EBADFD: this is a code bug, not a filesystem/hardware issue
251 : // - WriteZero, Interrupted: these are used internally VirtualFile
252 0 : false
253 : }
254 : }
255 0 : }
256 :
257 : /// Call this when the local filesystem gives us an error with an external
258 : /// cause: this includes EIO, EROFS, and EACCESS: all these indicate either
259 : /// bad storage or bad configuration, and we can't fix that from inside
260 : /// a running process.
261 0 : pub(crate) fn on_fatal_io_error(e: &std::io::Error, context: &str) -> ! {
262 0 : tracing::error!("Fatal I/O error: {e}: {context})");
263 0 : std::process::abort();
264 : }
265 :
266 : pub(crate) trait MaybeFatalIo<T> {
267 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T>;
268 : fn fatal_err(self, context: &str) -> T;
269 : }
270 :
271 : impl<T> MaybeFatalIo<T> for std::io::Result<T> {
272 : /// Terminate the process if the result is an error of a fatal type, else pass it through
273 : ///
274 : /// This is appropriate for writes, where we typically want to die on EIO/ACCES etc, but
275 : /// not on ENOSPC.
276 22 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T> {
277 22 : if let Err(e) = &self {
278 0 : if is_fatal_io_error(e) {
279 0 : on_fatal_io_error(e, context);
280 0 : }
281 22 : }
282 22 : self
283 22 : }
284 :
285 : /// Terminate the process on any I/O error.
286 : ///
287 : /// This is appropriate for reads on files that we know exist: they should always work.
288 34 : fn fatal_err(self, context: &str) -> T {
289 34 : match self {
290 34 : Ok(v) => v,
291 0 : Err(e) => {
292 0 : on_fatal_io_error(&e, context);
293 : }
294 : }
295 34 : }
296 : }
297 :
298 : /// Observe duration for the given storage I/O operation
299 : ///
300 : /// Unlike `observe_closure_duration`, this supports async,
301 : /// where "support" means that we measure wall clock time.
302 : macro_rules! observe_duration {
303 : ($op:expr, $($body:tt)*) => {{
304 : let instant = Instant::now();
305 : let result = $($body)*;
306 : let elapsed = instant.elapsed().as_secs_f64();
307 : STORAGE_IO_TIME_METRIC
308 : .get($op)
309 : .observe(elapsed);
310 : result
311 : }}
312 : }
313 :
314 : macro_rules! with_file {
315 : ($this:expr, $op:expr, | $ident:ident | $($body:tt)*) => {{
316 : let $ident = $this.lock_file().await?;
317 : observe_duration!($op, $($body)*)
318 : }};
319 : ($this:expr, $op:expr, | mut $ident:ident | $($body:tt)*) => {{
320 : let mut $ident = $this.lock_file().await?;
321 : observe_duration!($op, $($body)*)
322 : }};
323 : }
324 :
325 : impl VirtualFile {
326 : /// Open a file in read-only mode. Like File::open.
327 480 : pub async fn open(path: &Utf8Path) -> Result<VirtualFile, std::io::Error> {
328 480 : Self::open_with_options(path, OpenOptions::new().read(true)).await
329 480 : }
330 :
331 : /// Create a new file for writing. If the file exists, it will be truncated.
332 : /// Like File::create.
333 504 : pub async fn create(path: &Utf8Path) -> Result<VirtualFile, std::io::Error> {
334 504 : Self::open_with_options(
335 504 : path,
336 504 : OpenOptions::new().write(true).create(true).truncate(true),
337 504 : )
338 300 : .await
339 504 : }
340 :
341 : /// Open a file with given options.
342 : ///
343 : /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt,
344 : /// they will be applied also when the file is subsequently re-opened, not only
345 : /// on the first time. Make sure that's sane!
346 3782 : pub async fn open_with_options(
347 3782 : path: &Utf8Path,
348 3782 : open_options: &OpenOptions,
349 3782 : ) -> Result<VirtualFile, std::io::Error> {
350 3782 : let path_str = path.to_string();
351 3782 : let parts = path_str.split('/').collect::<Vec<&str>>();
352 3782 : let (tenant_id, shard_id, timeline_id) =
353 3782 : if parts.len() > 5 && parts[parts.len() - 5] == TENANTS_SEGMENT_NAME {
354 2454 : let tenant_shard_part = parts[parts.len() - 4];
355 2454 : let (tenant_id, shard_id) = match tenant_shard_part.parse::<TenantShardId>() {
356 2454 : Ok(tenant_shard_id) => (
357 2454 : tenant_shard_id.tenant_id.to_string(),
358 2454 : format!("{}", tenant_shard_id.shard_slug()),
359 2454 : ),
360 : Err(_) => {
361 : // Malformed path: this ID is just for observability, so tolerate it
362 : // and pass through
363 0 : (tenant_shard_part.to_string(), "*".to_string())
364 : }
365 : };
366 2454 : (tenant_id, shard_id, parts[parts.len() - 2].to_string())
367 : } else {
368 1328 : ("*".to_string(), "*".to_string(), "*".to_string())
369 : };
370 3782 : let (handle, mut slot_guard) = get_open_files().find_victim_slot().await;
371 :
372 : // NB: there is also StorageIoOperation::OpenAfterReplace which is for the case
373 : // where our caller doesn't get to use the returned VirtualFile before its
374 : // slot gets re-used by someone else.
375 3782 : let file = observe_duration!(StorageIoOperation::Open, {
376 3782 : open_options.open(path.as_std_path()).await?
377 : });
378 :
379 : // Strip all options other than read and write.
380 : //
381 : // It would perhaps be nicer to check just for the read and write flags
382 : // explicitly, but OpenOptions doesn't contain any functions to read flags,
383 : // only to set them.
384 3782 : let mut reopen_options = open_options.clone();
385 3782 : reopen_options.create(false);
386 3782 : reopen_options.create_new(false);
387 3782 : reopen_options.truncate(false);
388 3782 :
389 3782 : let vfile = VirtualFile {
390 3782 : handle: RwLock::new(handle),
391 3782 : pos: 0,
392 3782 : path: path.to_path_buf(),
393 3782 : open_options: reopen_options,
394 3782 : tenant_id,
395 3782 : shard_id,
396 3782 : timeline_id,
397 3782 : };
398 3782 :
399 3782 : // TODO: Under pressure, it's likely the slot will get re-used and
400 3782 : // the underlying file closed before they get around to using it.
401 3782 : // => https://github.com/neondatabase/neon/issues/6065
402 3782 : slot_guard.file.replace(file);
403 3782 :
404 3782 : Ok(vfile)
405 3782 : }
406 :
407 : /// Writes a file to the specified `final_path` in a crash safe fasion
408 : ///
409 : /// The file is first written to the specified tmp_path, and in a second
410 : /// step, the tmp path is renamed to the final path. As renames are
411 : /// atomic, a crash during the write operation will never leave behind a
412 : /// partially written file.
413 840 : pub async fn crashsafe_overwrite<B: BoundedBuf>(
414 840 : final_path: &Utf8Path,
415 840 : tmp_path: &Utf8Path,
416 840 : content: B,
417 840 : ) -> std::io::Result<()> {
418 840 : let Some(final_path_parent) = final_path.parent() else {
419 0 : return Err(std::io::Error::from_raw_os_error(
420 0 : nix::errno::Errno::EINVAL as i32,
421 0 : ));
422 : };
423 840 : std::fs::remove_file(tmp_path).or_else(fs_ext::ignore_not_found)?;
424 840 : let mut file = Self::open_with_options(
425 840 : tmp_path,
426 840 : OpenOptions::new()
427 840 : .write(true)
428 840 : // Use `create_new` so that, if we race with ourselves or something else,
429 840 : // we bail out instead of causing damage.
430 840 : .create_new(true),
431 840 : )
432 512 : .await?;
433 840 : let (_content, res) = file.write_all(content).await;
434 840 : res?;
435 840 : file.sync_all().await?;
436 840 : drop(file); // before the rename, that's important!
437 840 : // renames are atomic
438 840 : std::fs::rename(tmp_path, final_path)?;
439 : // Only open final path parent dirfd now, so that this operation only
440 : // ever holds one VirtualFile fd at a time. That's important because
441 : // the current `find_victim_slot` impl might pick the same slot for both
442 : // VirtualFile., and it eventually does a blocking write lock instead of
443 : // try_lock.
444 840 : let final_parent_dirfd =
445 840 : Self::open_with_options(final_path_parent, OpenOptions::new().read(true)).await?;
446 840 : final_parent_dirfd.sync_all().await?;
447 840 : Ok(())
448 840 : }
449 :
450 : /// Call File::sync_all() on the underlying File.
451 2230 : pub async fn sync_all(&self) -> Result<(), Error> {
452 2230 : with_file!(self, StorageIoOperation::Fsync, |file_guard| file_guard
453 2230 : .with_std_file(|std_file| std_file.sync_all()))
454 2230 : }
455 :
456 550 : pub async fn metadata(&self) -> Result<fs::Metadata, Error> {
457 550 : with_file!(self, StorageIoOperation::Metadata, |file_guard| file_guard
458 550 : .with_std_file(|std_file| std_file.metadata()))
459 550 : }
460 :
461 : /// Helper function internal to `VirtualFile` that looks up the underlying File,
462 : /// opens it and evicts some other File if necessary. The passed parameter is
463 : /// assumed to be a function available for the physical `File`.
464 : ///
465 : /// We are doing it via a macro as Rust doesn't support async closures that
466 : /// take on parameters with lifetimes.
467 417950 : async fn lock_file(&self) -> Result<FileGuard, Error> {
468 417950 : let open_files = get_open_files();
469 :
470 192209 : let mut handle_guard = {
471 : // Read the cached slot handle, and see if the slot that it points to still
472 : // contains our File.
473 : //
474 : // We only need to hold the handle lock while we read the current handle. If
475 : // another thread closes the file and recycles the slot for a different file,
476 : // we will notice that the handle we read is no longer valid and retry.
477 417950 : let mut handle = *self.handle.read().await;
478 526214 : loop {
479 526214 : // Check if the slot contains our File
480 526214 : {
481 526214 : let slot = &open_files.slots[handle.index];
482 526214 : let slot_guard = slot.inner.read().await;
483 526214 : if slot_guard.tag == handle.tag && slot_guard.file.is_some() {
484 : // Found a cached file descriptor.
485 225741 : slot.recently_used.store(true, Ordering::Relaxed);
486 225741 : return Ok(FileGuard { slot_guard });
487 300473 : }
488 : }
489 :
490 : // The slot didn't contain our File. We will have to open it ourselves,
491 : // but before that, grab a write lock on handle in the VirtualFile, so
492 : // that no other thread will try to concurrently open the same file.
493 300473 : let handle_guard = self.handle.write().await;
494 :
495 : // If another thread changed the handle while we were not holding the lock,
496 : // then the handle might now be valid again. Loop back to retry.
497 300473 : if *handle_guard != handle {
498 108264 : handle = *handle_guard;
499 108264 : continue;
500 192209 : }
501 192209 : break handle_guard;
502 : }
503 : };
504 :
505 : // We need to open the file ourselves. The handle in the VirtualFile is
506 : // now locked in write-mode. Find a free slot to put it in.
507 192209 : let (handle, mut slot_guard) = open_files.find_victim_slot().await;
508 :
509 : // Re-open the physical file.
510 : // NB: we use StorageIoOperation::OpenAferReplace for this to distinguish this
511 : // case from StorageIoOperation::Open. This helps with identifying thrashing
512 : // of the virtual file descriptor cache.
513 192209 : let file = observe_duration!(StorageIoOperation::OpenAfterReplace, {
514 192209 : self.open_options.open(self.path.as_std_path()).await?
515 : });
516 :
517 : // Store the File in the slot and update the handle in the VirtualFile
518 : // to point to it.
519 192209 : slot_guard.file.replace(file);
520 192209 :
521 192209 : *handle_guard = handle;
522 192209 :
523 192209 : return Ok(FileGuard {
524 192209 : slot_guard: slot_guard.downgrade(),
525 192209 : });
526 417950 : }
527 :
528 0 : pub fn remove(self) {
529 0 : let path = self.path.clone();
530 0 : drop(self);
531 0 : std::fs::remove_file(path).expect("failed to remove the virtual file");
532 0 : }
533 :
534 1668 : pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
535 1668 : match pos {
536 1658 : SeekFrom::Start(offset) => {
537 1658 : self.pos = offset;
538 1658 : }
539 4 : SeekFrom::End(offset) => {
540 4 : self.pos = with_file!(self, StorageIoOperation::Seek, |mut file_guard| file_guard
541 4 : .with_std_file_mut(|std_file| std_file.seek(SeekFrom::End(offset))))?
542 : }
543 6 : SeekFrom::Current(offset) => {
544 6 : let pos = self.pos as i128 + offset as i128;
545 6 : if pos < 0 {
546 2 : return Err(Error::new(
547 2 : ErrorKind::InvalidInput,
548 2 : "offset would be negative",
549 2 : ));
550 4 : }
551 4 : if pos > u64::MAX as i128 {
552 0 : return Err(Error::new(ErrorKind::InvalidInput, "offset overflow"));
553 4 : }
554 4 : self.pos = pos as u64;
555 : }
556 : }
557 1664 : Ok(self.pos)
558 1668 : }
559 :
560 310332 : pub async fn read_exact_at<B>(&self, buf: B, offset: u64) -> Result<B, Error>
561 310332 : where
562 310332 : B: IoBufMut + Send,
563 310332 : {
564 310332 : let (buf, res) =
565 725927 : read_exact_at_impl(buf, offset, |buf, offset| self.read_at(buf, offset)).await;
566 310332 : res.map(|()| buf)
567 310332 : }
568 :
569 : /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`].
570 90130 : pub async fn read_exact_at_page(
571 90130 : &self,
572 90130 : page: PageWriteGuard<'static>,
573 90130 : offset: u64,
574 90130 : ) -> Result<PageWriteGuard<'static>, Error> {
575 90130 : let buf = PageWriteGuardBuf {
576 90130 : page,
577 90130 : init_up_to: 0,
578 90130 : };
579 90130 : let res = self.read_exact_at(buf, offset).await;
580 90130 : res.map(|PageWriteGuardBuf { page, .. }| page)
581 90130 : .map_err(|e| Error::new(ErrorKind::Other, e))
582 90130 : }
583 :
584 : // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235
585 35260 : pub async fn write_all_at<B: BoundedBuf>(
586 35260 : &self,
587 35260 : buf: B,
588 35260 : mut offset: u64,
589 35260 : ) -> (B::Buf, Result<(), Error>) {
590 35260 : let buf_len = buf.bytes_init();
591 35260 : if buf_len == 0 {
592 0 : return (Slice::into_inner(buf.slice_full()), Ok(()));
593 35260 : }
594 35260 : let mut buf = buf.slice(0..buf_len);
595 70520 : while !buf.is_empty() {
596 : // TODO: push `buf` further down
597 35260 : match self.write_at(&buf, offset).await {
598 : Ok(0) => {
599 0 : return (
600 0 : Slice::into_inner(buf),
601 0 : Err(Error::new(
602 0 : std::io::ErrorKind::WriteZero,
603 0 : "failed to write whole buffer",
604 0 : )),
605 0 : );
606 : }
607 35260 : Ok(n) => {
608 35260 : buf = buf.slice(n..);
609 35260 : offset += n as u64;
610 35260 : }
611 0 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
612 0 : Err(e) => return (Slice::into_inner(buf), Err(e)),
613 : }
614 : }
615 35260 : (Slice::into_inner(buf), Ok(()))
616 35260 : }
617 :
618 : /// Writes `buf.slice(0..buf.bytes_init())`.
619 : /// Returns the IoBuf that is underlying the BoundedBuf `buf`.
620 : /// I.e., the returned value's `bytes_init()` method returns something different than the `bytes_init()` that was passed in.
621 : /// It's quite brittle and easy to mis-use, so, we return the size in the Ok() variant.
622 69160 : pub async fn write_all<B: BoundedBuf>(&mut self, buf: B) -> (B::Buf, Result<usize, Error>) {
623 69160 : let nbytes = buf.bytes_init();
624 69160 : if nbytes == 0 {
625 30 : return (Slice::into_inner(buf.slice_full()), Ok(0));
626 69130 : }
627 69130 : let mut buf = buf.slice(0..nbytes);
628 138258 : while !buf.is_empty() {
629 : // TODO: push `Slice` further down
630 69130 : match self.write(&buf).await {
631 : Ok(0) => {
632 0 : return (
633 0 : Slice::into_inner(buf),
634 0 : Err(Error::new(
635 0 : std::io::ErrorKind::WriteZero,
636 0 : "failed to write whole buffer",
637 0 : )),
638 0 : );
639 : }
640 69128 : Ok(n) => {
641 69128 : buf = buf.slice(n..);
642 69128 : }
643 2 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
644 2 : Err(e) => return (Slice::into_inner(buf), Err(e)),
645 : }
646 : }
647 69128 : (Slice::into_inner(buf), Ok(nbytes))
648 69160 : }
649 :
650 69130 : async fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
651 69130 : let pos = self.pos;
652 69130 : let n = self.write_at(buf, pos).await?;
653 69128 : self.pos += n as u64;
654 69128 : Ok(n)
655 69130 : }
656 :
657 310776 : pub(crate) async fn read_at<B>(&self, buf: B, offset: u64) -> (B, Result<usize, Error>)
658 310776 : where
659 310776 : B: tokio_epoll_uring::BoundedBufMut + Send,
660 310776 : {
661 570669 : let file_guard = match self.lock_file().await {
662 310776 : Ok(file_guard) => file_guard,
663 0 : Err(e) => return (buf, Err(e)),
664 : };
665 :
666 310776 : observe_duration!(StorageIoOperation::Read, {
667 310776 : let ((_file_guard, buf), res) = io_engine::get().read_at(file_guard, offset, buf).await;
668 310776 : if let Ok(size) = res {
669 310774 : STORAGE_IO_SIZE
670 310774 : .with_label_values(&[
671 310774 : "read",
672 310774 : &self.tenant_id,
673 310774 : &self.shard_id,
674 310774 : &self.timeline_id,
675 310774 : ])
676 310774 : .add(size as i64);
677 310774 : }
678 310776 : (buf, res)
679 : })
680 310776 : }
681 :
682 104390 : async fn write_at(&self, buf: &[u8], offset: u64) -> Result<usize, Error> {
683 104390 : let result = with_file!(self, StorageIoOperation::Write, |file_guard| {
684 104390 : file_guard.with_std_file(|std_file| std_file.write_at(buf, offset))
685 : });
686 104390 : if let Ok(size) = result {
687 104388 : STORAGE_IO_SIZE
688 104388 : .with_label_values(&["write", &self.tenant_id, &self.shard_id, &self.timeline_id])
689 104388 : .add(size as i64);
690 104388 : }
691 104390 : result
692 104390 : }
693 : }
694 :
695 : // Adapted from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135
696 310340 : pub async fn read_exact_at_impl<B, F, Fut>(
697 310340 : buf: B,
698 310340 : mut offset: u64,
699 310340 : mut read_at: F,
700 310340 : ) -> (B, std::io::Result<()>)
701 310340 : where
702 310340 : B: IoBufMut + Send,
703 310340 : F: FnMut(tokio_epoll_uring::Slice<B>, u64) -> Fut,
704 310340 : Fut: std::future::Future<Output = (tokio_epoll_uring::Slice<B>, std::io::Result<usize>)>,
705 310340 : {
706 310340 : let mut buf: tokio_epoll_uring::Slice<B> = buf.slice_full(); // includes all the uninitialized memory
707 620682 : while buf.bytes_total() != 0 {
708 : let res;
709 725927 : (buf, res) = read_at(buf, offset).await;
710 0 : match res {
711 2 : Ok(0) => break,
712 310342 : Ok(n) => {
713 310342 : buf = buf.slice(n..);
714 310342 : offset += n as u64;
715 310342 : }
716 0 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
717 0 : Err(e) => return (buf.into_inner(), Err(e)),
718 : }
719 : }
720 : // NB: don't use `buf.is_empty()` here; it is from the
721 : // `impl Deref for Slice { Target = [u8] }`; the the &[u8]
722 : // returned by it only covers the initialized portion of `buf`.
723 : // Whereas we're interested in ensuring that we filled the entire
724 : // buffer that the user passed in.
725 310340 : if buf.bytes_total() != 0 {
726 2 : (
727 2 : buf.into_inner(),
728 2 : Err(std::io::Error::new(
729 2 : std::io::ErrorKind::UnexpectedEof,
730 2 : "failed to fill whole buffer",
731 2 : )),
732 2 : )
733 : } else {
734 310338 : assert_eq!(buf.len(), buf.bytes_total());
735 310338 : (buf.into_inner(), Ok(()))
736 : }
737 310340 : }
738 :
739 : #[cfg(test)]
740 : mod test_read_exact_at_impl {
741 :
742 : use std::{collections::VecDeque, sync::Arc};
743 :
744 : use tokio_epoll_uring::{BoundedBuf, BoundedBufMut};
745 :
746 : use super::read_exact_at_impl;
747 :
748 : struct Expectation {
749 : offset: u64,
750 : bytes_total: usize,
751 : result: std::io::Result<Vec<u8>>,
752 : }
753 : struct MockReadAt {
754 : expectations: VecDeque<Expectation>,
755 : }
756 :
757 : impl MockReadAt {
758 12 : async fn read_at(
759 12 : &mut self,
760 12 : mut buf: tokio_epoll_uring::Slice<Vec<u8>>,
761 12 : offset: u64,
762 12 : ) -> (tokio_epoll_uring::Slice<Vec<u8>>, std::io::Result<usize>) {
763 12 : let exp = self
764 12 : .expectations
765 12 : .pop_front()
766 12 : .expect("read_at called but we have no expectations left");
767 12 : assert_eq!(exp.offset, offset);
768 12 : assert_eq!(exp.bytes_total, buf.bytes_total());
769 12 : match exp.result {
770 12 : Ok(bytes) => {
771 12 : assert!(bytes.len() <= buf.bytes_total());
772 12 : buf.put_slice(&bytes);
773 12 : (buf, Ok(bytes.len()))
774 : }
775 0 : Err(e) => (buf, Err(e)),
776 : }
777 12 : }
778 : }
779 :
780 : impl Drop for MockReadAt {
781 8 : fn drop(&mut self) {
782 8 : assert_eq!(self.expectations.len(), 0);
783 8 : }
784 : }
785 :
786 2 : #[tokio::test]
787 2 : async fn test_basic() {
788 2 : let buf = Vec::with_capacity(5);
789 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
790 2 : expectations: VecDeque::from(vec![Expectation {
791 2 : offset: 0,
792 2 : bytes_total: 5,
793 2 : result: Ok(vec![b'a', b'b', b'c', b'd', b'e']),
794 2 : }]),
795 2 : }));
796 2 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
797 2 : let mock_read_at = Arc::clone(&mock_read_at);
798 2 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
799 2 : })
800 2 : .await;
801 2 : assert!(res.is_ok());
802 2 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd', b'e']);
803 2 : }
804 :
805 2 : #[tokio::test]
806 2 : async fn test_empty_buf_issues_no_syscall() {
807 2 : let buf = Vec::new();
808 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
809 2 : expectations: VecDeque::new(),
810 2 : }));
811 2 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
812 0 : let mock_read_at = Arc::clone(&mock_read_at);
813 2 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
814 2 : })
815 2 : .await;
816 2 : assert!(res.is_ok());
817 2 : }
818 :
819 2 : #[tokio::test]
820 2 : async fn test_two_read_at_calls_needed_until_buf_filled() {
821 2 : let buf = Vec::with_capacity(4);
822 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
823 2 : expectations: VecDeque::from(vec![
824 2 : Expectation {
825 2 : offset: 0,
826 2 : bytes_total: 4,
827 2 : result: Ok(vec![b'a', b'b']),
828 2 : },
829 2 : Expectation {
830 2 : offset: 2,
831 2 : bytes_total: 2,
832 2 : result: Ok(vec![b'c', b'd']),
833 2 : },
834 2 : ]),
835 2 : }));
836 4 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
837 4 : let mock_read_at = Arc::clone(&mock_read_at);
838 4 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
839 4 : })
840 2 : .await;
841 2 : assert!(res.is_ok());
842 2 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd']);
843 2 : }
844 :
845 2 : #[tokio::test]
846 2 : async fn test_eof_before_buffer_full() {
847 2 : let buf = Vec::with_capacity(3);
848 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
849 2 : expectations: VecDeque::from(vec![
850 2 : Expectation {
851 2 : offset: 0,
852 2 : bytes_total: 3,
853 2 : result: Ok(vec![b'a']),
854 2 : },
855 2 : Expectation {
856 2 : offset: 1,
857 2 : bytes_total: 2,
858 2 : result: Ok(vec![b'b']),
859 2 : },
860 2 : Expectation {
861 2 : offset: 2,
862 2 : bytes_total: 1,
863 2 : result: Ok(vec![]),
864 2 : },
865 2 : ]),
866 2 : }));
867 6 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
868 6 : let mock_read_at = Arc::clone(&mock_read_at);
869 6 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
870 6 : })
871 2 : .await;
872 2 : let Err(err) = res else {
873 2 : panic!("should return an error");
874 2 : };
875 2 : assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof);
876 2 : assert_eq!(format!("{err}"), "failed to fill whole buffer");
877 2 : // buffer contents on error are unspecified
878 2 : }
879 : }
880 :
881 : struct FileGuard {
882 : slot_guard: RwLockReadGuard<'static, SlotInner>,
883 : }
884 :
885 : impl AsRef<OwnedFd> for FileGuard {
886 417950 : fn as_ref(&self) -> &OwnedFd {
887 417950 : // This unwrap is safe because we only create `FileGuard`s
888 417950 : // if we know that the file is Some.
889 417950 : self.slot_guard.file.as_ref().unwrap()
890 417950 : }
891 : }
892 :
893 : impl FileGuard {
894 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
895 262481 : fn with_std_file<F, R>(&self, with: F) -> R
896 262481 : where
897 262481 : F: FnOnce(&File) -> R,
898 262481 : {
899 262481 : // SAFETY:
900 262481 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
901 262481 : // - `&` usage below: `self` is `&`, hence Rust typesystem guarantees there are is no `&mut`
902 262481 : let file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
903 262481 : let res = with(&file);
904 262481 : let _ = file.into_raw_fd();
905 262481 : res
906 262481 : }
907 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
908 4 : fn with_std_file_mut<F, R>(&mut self, with: F) -> R
909 4 : where
910 4 : F: FnOnce(&mut File) -> R,
911 4 : {
912 4 : // SAFETY:
913 4 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
914 4 : // - &mut usage below: `self` is `&mut`, hence this call is the only task/thread that has control over the underlying fd
915 4 : let mut file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
916 4 : let res = with(&mut file);
917 4 : let _ = file.into_raw_fd();
918 4 : res
919 4 : }
920 : }
921 :
922 : impl tokio_epoll_uring::IoFd for FileGuard {
923 155465 : unsafe fn as_fd(&self) -> RawFd {
924 155465 : let owned_fd: &OwnedFd = self.as_ref();
925 155465 : owned_fd.as_raw_fd()
926 155465 : }
927 : }
928 :
929 : #[cfg(test)]
930 : impl VirtualFile {
931 20200 : pub(crate) async fn read_blk(
932 20200 : &self,
933 20200 : blknum: u32,
934 20200 : ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
935 20200 : use crate::page_cache::PAGE_SZ;
936 20200 : let buf = vec![0; PAGE_SZ];
937 20200 : let buf = self
938 20200 : .read_exact_at(buf, blknum as u64 * (PAGE_SZ as u64))
939 10256 : .await?;
940 20200 : Ok(crate::tenant::block_io::BlockLease::Vec(buf))
941 20200 : }
942 :
943 224 : async fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<(), Error> {
944 224 : let mut tmp = vec![0; 128];
945 : loop {
946 : let res;
947 444 : (tmp, res) = self.read_at(tmp, self.pos).await;
948 2 : match res {
949 222 : Ok(0) => return Ok(()),
950 220 : Ok(n) => {
951 220 : self.pos += n as u64;
952 220 : buf.extend_from_slice(&tmp[..n]);
953 220 : }
954 2 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
955 2 : Err(e) => return Err(e),
956 : }
957 : }
958 224 : }
959 : }
960 :
961 : impl Drop for VirtualFile {
962 : /// If a VirtualFile is dropped, close the underlying file if it was open.
963 3514 : fn drop(&mut self) {
964 3514 : let handle = self.handle.get_mut();
965 3514 :
966 3514 : fn clean_slot(slot: &Slot, mut slot_guard: RwLockWriteGuard<'_, SlotInner>, tag: u64) {
967 3514 : if slot_guard.tag == tag {
968 3514 : slot.recently_used.store(false, Ordering::Relaxed);
969 3514 : // there is also operation "close-by-replace" for closes done on eviction for
970 3514 : // comparison.
971 3514 : if let Some(fd) = slot_guard.file.take() {
972 3069 : STORAGE_IO_TIME_METRIC
973 3069 : .get(StorageIoOperation::Close)
974 3069 : .observe_closure_duration(|| drop(fd));
975 3069 : }
976 3514 : }
977 3514 : }
978 3514 :
979 3514 : // We don't have async drop so we cannot directly await the lock here.
980 3514 : // Instead, first do a best-effort attempt at closing the underlying
981 3514 : // file descriptor by using `try_write`, and if that fails, spawn
982 3514 : // a tokio task to do it asynchronously: we just want it to be
983 3514 : // cleaned up eventually.
984 3514 : // Most of the time, the `try_lock` should succeed though,
985 3514 : // as we have `&mut self` access. In other words, if the slot
986 3514 : // is still occupied by our file, there should be no access from
987 3514 : // other I/O operations; the only other possible place to lock
988 3514 : // the slot is the lock algorithm looking for free slots.
989 3514 : let slot = &get_open_files().slots[handle.index];
990 3514 : if let Ok(slot_guard) = slot.inner.try_write() {
991 3514 : clean_slot(slot, slot_guard, handle.tag);
992 3514 : } else {
993 0 : let tag = handle.tag;
994 0 : tokio::spawn(async move {
995 0 : let slot_guard = slot.inner.write().await;
996 0 : clean_slot(slot, slot_guard, tag);
997 0 : });
998 0 : };
999 3514 : }
1000 : }
1001 :
1002 : impl OpenFiles {
1003 104 : fn new(num_slots: usize) -> OpenFiles {
1004 104 : let mut slots = Box::new(Vec::with_capacity(num_slots));
1005 1040 : for _ in 0..num_slots {
1006 1040 : let slot = Slot {
1007 1040 : recently_used: AtomicBool::new(false),
1008 1040 : inner: RwLock::new(SlotInner { tag: 0, file: None }),
1009 1040 : };
1010 1040 : slots.push(slot);
1011 1040 : }
1012 :
1013 104 : OpenFiles {
1014 104 : next: AtomicUsize::new(0),
1015 104 : slots: Box::leak(slots),
1016 104 : }
1017 104 : }
1018 : }
1019 :
1020 : ///
1021 : /// Initialize the virtual file module. This must be called once at page
1022 : /// server startup.
1023 : ///
1024 : #[cfg(not(test))]
1025 0 : pub fn init(num_slots: usize, engine: IoEngineKind) {
1026 0 : if OPEN_FILES.set(OpenFiles::new(num_slots)).is_err() {
1027 0 : panic!("virtual_file::init called twice");
1028 0 : }
1029 0 : io_engine::init(engine);
1030 0 : crate::metrics::virtual_file_descriptor_cache::SIZE_MAX.set(num_slots as u64);
1031 0 : }
1032 :
1033 : const TEST_MAX_FILE_DESCRIPTORS: usize = 10;
1034 :
1035 : // Get a handle to the global slots array.
1036 425246 : fn get_open_files() -> &'static OpenFiles {
1037 425246 : //
1038 425246 : // In unit tests, page server startup doesn't happen and no one calls
1039 425246 : // virtual_file::init(). Initialize it here, with a small array.
1040 425246 : //
1041 425246 : // This applies to the virtual file tests below, but all other unit
1042 425246 : // tests too, so the virtual file facility is always usable in
1043 425246 : // unit tests.
1044 425246 : //
1045 425246 : if cfg!(test) {
1046 425246 : OPEN_FILES.get_or_init(|| OpenFiles::new(TEST_MAX_FILE_DESCRIPTORS))
1047 : } else {
1048 0 : OPEN_FILES.get().expect("virtual_file::init not called yet")
1049 : }
1050 425246 : }
1051 :
1052 : #[cfg(test)]
1053 : mod tests {
1054 : use super::*;
1055 : use rand::seq::SliceRandom;
1056 : use rand::thread_rng;
1057 : use rand::Rng;
1058 : use std::future::Future;
1059 : use std::io::Write;
1060 : use std::sync::Arc;
1061 :
1062 : enum MaybeVirtualFile {
1063 : VirtualFile(VirtualFile),
1064 : File(File),
1065 : }
1066 :
1067 : impl From<VirtualFile> for MaybeVirtualFile {
1068 6 : fn from(vf: VirtualFile) -> Self {
1069 6 : MaybeVirtualFile::VirtualFile(vf)
1070 6 : }
1071 : }
1072 :
1073 : impl MaybeVirtualFile {
1074 404 : async fn read_exact_at(&self, mut buf: Vec<u8>, offset: u64) -> Result<Vec<u8>, Error> {
1075 404 : match self {
1076 203 : MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(buf, offset).await,
1077 202 : MaybeVirtualFile::File(file) => file.read_exact_at(&mut buf, offset).map(|()| buf),
1078 : }
1079 404 : }
1080 8 : async fn write_all_at<B: BoundedBuf>(&self, buf: B, offset: u64) -> Result<(), Error> {
1081 8 : match self {
1082 4 : MaybeVirtualFile::VirtualFile(file) => {
1083 4 : let (_buf, res) = file.write_all_at(buf, offset).await;
1084 4 : res
1085 : }
1086 4 : MaybeVirtualFile::File(file) => {
1087 4 : let buf_len = buf.bytes_init();
1088 4 : if buf_len == 0 {
1089 0 : return Ok(());
1090 4 : }
1091 4 : file.write_all_at(&buf.slice(0..buf_len), offset)
1092 : }
1093 : }
1094 8 : }
1095 36 : async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
1096 36 : match self {
1097 18 : MaybeVirtualFile::VirtualFile(file) => file.seek(pos).await,
1098 18 : MaybeVirtualFile::File(file) => file.seek(pos),
1099 : }
1100 36 : }
1101 8 : async fn write_all<B: BoundedBuf>(&mut self, buf: B) -> Result<(), Error> {
1102 8 : match self {
1103 4 : MaybeVirtualFile::VirtualFile(file) => {
1104 4 : let (_buf, res) = file.write_all(buf).await;
1105 4 : res.map(|_| ())
1106 : }
1107 4 : MaybeVirtualFile::File(file) => {
1108 4 : let buf_len = buf.bytes_init();
1109 4 : if buf_len == 0 {
1110 0 : return Ok(());
1111 4 : }
1112 4 : file.write_all(&buf.slice(0..buf_len))
1113 : }
1114 : }
1115 8 : }
1116 :
1117 : // Helper function to slurp contents of a file, starting at the current position,
1118 : // into a string
1119 442 : async fn read_string(&mut self) -> Result<String, Error> {
1120 442 : use std::io::Read;
1121 442 : let mut buf = String::new();
1122 442 : match self {
1123 224 : MaybeVirtualFile::VirtualFile(file) => {
1124 224 : let mut buf = Vec::new();
1125 226 : file.read_to_end(&mut buf).await?;
1126 222 : return Ok(String::from_utf8(buf).unwrap());
1127 : }
1128 218 : MaybeVirtualFile::File(file) => {
1129 218 : file.read_to_string(&mut buf)?;
1130 : }
1131 : }
1132 216 : Ok(buf)
1133 442 : }
1134 :
1135 : // Helper function to slurp a portion of a file into a string
1136 404 : async fn read_string_at(&mut self, pos: u64, len: usize) -> Result<String, Error> {
1137 404 : let buf = vec![0; len];
1138 404 : let buf = self.read_exact_at(buf, pos).await?;
1139 404 : Ok(String::from_utf8(buf).unwrap())
1140 404 : }
1141 : }
1142 :
1143 2 : #[tokio::test]
1144 2 : async fn test_virtual_files() -> anyhow::Result<()> {
1145 2 : // The real work is done in the test_files() helper function. This
1146 2 : // allows us to run the same set of tests against a native File, and
1147 2 : // VirtualFile. We trust the native Files and wouldn't need to test them,
1148 2 : // but this allows us to verify that the operations return the same
1149 2 : // results with VirtualFiles as with native Files. (Except that with
1150 2 : // native files, you will run out of file descriptors if the ulimit
1151 2 : // is low enough.)
1152 206 : test_files("virtual_files", |path, open_options| async move {
1153 206 : let vf = VirtualFile::open_with_options(&path, &open_options).await?;
1154 206 : Ok(MaybeVirtualFile::VirtualFile(vf))
1155 412 : })
1156 527 : .await
1157 2 : }
1158 :
1159 2 : #[tokio::test]
1160 2 : async fn test_physical_files() -> anyhow::Result<()> {
1161 206 : test_files("physical_files", |path, open_options| async move {
1162 206 : Ok(MaybeVirtualFile::File({
1163 206 : let owned_fd = open_options.open(path.as_std_path()).await?;
1164 206 : File::from(owned_fd)
1165 2 : }))
1166 412 : })
1167 104 : .await
1168 2 : }
1169 :
1170 4 : async fn test_files<OF, FT>(testname: &str, openfunc: OF) -> anyhow::Result<()>
1171 4 : where
1172 4 : OF: Fn(Utf8PathBuf, OpenOptions) -> FT,
1173 4 : FT: Future<Output = Result<MaybeVirtualFile, std::io::Error>>,
1174 4 : {
1175 4 : let testdir = crate::config::PageServerConf::test_repo_dir(testname);
1176 4 : std::fs::create_dir_all(&testdir)?;
1177 :
1178 4 : let path_a = testdir.join("file_a");
1179 4 : let mut file_a = openfunc(
1180 4 : path_a.clone(),
1181 4 : OpenOptions::new()
1182 4 : .write(true)
1183 4 : .create(true)
1184 4 : .truncate(true)
1185 4 : .to_owned(),
1186 4 : )
1187 4 : .await?;
1188 4 : file_a.write_all(b"foobar".to_vec()).await?;
1189 :
1190 : // cannot read from a file opened in write-only mode
1191 4 : let _ = file_a.read_string().await.unwrap_err();
1192 :
1193 : // Close the file and re-open for reading
1194 4 : let mut file_a = openfunc(path_a, OpenOptions::new().read(true).to_owned()).await?;
1195 :
1196 : // cannot write to a file opened in read-only mode
1197 4 : let _ = file_a.write_all(b"bar".to_vec()).await.unwrap_err();
1198 4 :
1199 4 : // Try simple read
1200 4 : assert_eq!("foobar", file_a.read_string().await?);
1201 :
1202 : // It's positioned at the EOF now.
1203 4 : assert_eq!("", file_a.read_string().await?);
1204 :
1205 : // Test seeks.
1206 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1207 4 : assert_eq!("oobar", file_a.read_string().await?);
1208 :
1209 4 : assert_eq!(file_a.seek(SeekFrom::End(-2)).await?, 4);
1210 4 : assert_eq!("ar", file_a.read_string().await?);
1211 :
1212 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1213 4 : assert_eq!(file_a.seek(SeekFrom::Current(2)).await?, 3);
1214 4 : assert_eq!("bar", file_a.read_string().await?);
1215 :
1216 4 : assert_eq!(file_a.seek(SeekFrom::Current(-5)).await?, 1);
1217 4 : assert_eq!("oobar", file_a.read_string().await?);
1218 :
1219 : // Test erroneous seeks to before byte 0
1220 4 : file_a.seek(SeekFrom::End(-7)).await.unwrap_err();
1221 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1222 4 : file_a.seek(SeekFrom::Current(-2)).await.unwrap_err();
1223 4 :
1224 4 : // the erroneous seek should have left the position unchanged
1225 4 : assert_eq!("oobar", file_a.read_string().await?);
1226 :
1227 : // Create another test file, and try FileExt functions on it.
1228 4 : let path_b = testdir.join("file_b");
1229 4 : let mut file_b = openfunc(
1230 4 : path_b.clone(),
1231 4 : OpenOptions::new()
1232 4 : .read(true)
1233 4 : .write(true)
1234 4 : .create(true)
1235 4 : .truncate(true)
1236 4 : .to_owned(),
1237 4 : )
1238 2 : .await?;
1239 4 : file_b.write_all_at(b"BAR".to_vec(), 3).await?;
1240 4 : file_b.write_all_at(b"FOO".to_vec(), 0).await?;
1241 :
1242 4 : assert_eq!(file_b.read_string_at(2, 3).await?, "OBA");
1243 :
1244 : // Open a lot of files, enough to cause some evictions. (Or to be precise,
1245 : // open the same file many times. The effect is the same.)
1246 : //
1247 : // leave file_a positioned at offset 1 before we start
1248 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1249 :
1250 4 : let mut vfiles = Vec::new();
1251 404 : for _ in 0..100 {
1252 400 : let mut vfile =
1253 400 : openfunc(path_b.clone(), OpenOptions::new().read(true).to_owned()).await?;
1254 400 : assert_eq!("FOOBAR", vfile.read_string().await?);
1255 400 : vfiles.push(vfile);
1256 : }
1257 :
1258 : // make sure we opened enough files to definitely cause evictions.
1259 4 : assert!(vfiles.len() > TEST_MAX_FILE_DESCRIPTORS * 2);
1260 :
1261 : // The underlying file descriptor for 'file_a' should be closed now. Try to read
1262 : // from it again. We left the file positioned at offset 1 above.
1263 4 : assert_eq!("oobar", file_a.read_string().await?);
1264 :
1265 : // Check that all the other FDs still work too. Use them in random order for
1266 : // good measure.
1267 4 : vfiles.as_mut_slice().shuffle(&mut thread_rng());
1268 400 : for vfile in vfiles.iter_mut() {
1269 400 : assert_eq!("OOBAR", vfile.read_string_at(1, 5).await?);
1270 : }
1271 :
1272 4 : Ok(())
1273 4 : }
1274 :
1275 : /// Test using VirtualFiles from many threads concurrently. This tests both using
1276 : /// a lot of VirtualFiles concurrently, causing evictions, and also using the same
1277 : /// VirtualFile from multiple threads concurrently.
1278 2 : #[tokio::test]
1279 2 : async fn test_vfile_concurrency() -> Result<(), Error> {
1280 2 : const SIZE: usize = 8 * 1024;
1281 2 : const VIRTUAL_FILES: usize = 100;
1282 2 : const THREADS: usize = 100;
1283 2 : const SAMPLE: [u8; SIZE] = [0xADu8; SIZE];
1284 2 :
1285 2 : let testdir = crate::config::PageServerConf::test_repo_dir("vfile_concurrency");
1286 2 : std::fs::create_dir_all(&testdir)?;
1287 2 :
1288 2 : // Create a test file.
1289 2 : let test_file_path = testdir.join("concurrency_test_file");
1290 2 : {
1291 2 : let file = File::create(&test_file_path)?;
1292 2 : file.write_all_at(&SAMPLE, 0)?;
1293 2 : }
1294 2 :
1295 2 : // Open the file many times.
1296 2 : let mut files = Vec::new();
1297 202 : for _ in 0..VIRTUAL_FILES {
1298 200 : let f = VirtualFile::open_with_options(&test_file_path, OpenOptions::new().read(true))
1299 101 : .await?;
1300 200 : files.push(f);
1301 2 : }
1302 2 : let files = Arc::new(files);
1303 2 :
1304 2 : // Launch many threads, and use the virtual files concurrently in random order.
1305 2 : let rt = tokio::runtime::Builder::new_multi_thread()
1306 2 : .worker_threads(THREADS)
1307 2 : .thread_name("test_vfile_concurrency thread")
1308 2 : .build()
1309 2 : .unwrap();
1310 2 : let mut hdls = Vec::new();
1311 202 : for _threadno in 0..THREADS {
1312 200 : let files = files.clone();
1313 200 : let hdl = rt.spawn(async move {
1314 200 : let mut buf = vec![0u8; SIZE];
1315 200 : let mut rng = rand::rngs::OsRng;
1316 200000 : for _ in 1..1000 {
1317 199800 : let f = &files[rng.gen_range(0..files.len())];
1318 663944 : buf = f.read_exact_at(buf, 0).await.unwrap();
1319 199800 : assert!(buf == SAMPLE);
1320 2 : }
1321 200 : });
1322 200 : hdls.push(hdl);
1323 200 : }
1324 202 : for hdl in hdls {
1325 200 : hdl.await?;
1326 2 : }
1327 2 : std::mem::forget(rt);
1328 2 :
1329 2 : Ok(())
1330 2 : }
1331 :
1332 2 : #[tokio::test]
1333 2 : async fn test_atomic_overwrite_basic() {
1334 2 : let testdir = crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_basic");
1335 2 : std::fs::create_dir_all(&testdir).unwrap();
1336 2 :
1337 2 : let path = testdir.join("myfile");
1338 2 : let tmp_path = testdir.join("myfile.tmp");
1339 2 :
1340 2 : VirtualFile::crashsafe_overwrite(&path, &tmp_path, b"foo".to_vec())
1341 3 : .await
1342 2 : .unwrap();
1343 2 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
1344 2 : let post = file.read_string().await.unwrap();
1345 2 : assert_eq!(post, "foo");
1346 2 : assert!(!tmp_path.exists());
1347 2 : drop(file);
1348 2 :
1349 2 : VirtualFile::crashsafe_overwrite(&path, &tmp_path, b"bar".to_vec())
1350 2 : .await
1351 2 : .unwrap();
1352 2 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
1353 2 : let post = file.read_string().await.unwrap();
1354 2 : assert_eq!(post, "bar");
1355 2 : assert!(!tmp_path.exists());
1356 2 : drop(file);
1357 2 : }
1358 :
1359 2 : #[tokio::test]
1360 2 : async fn test_atomic_overwrite_preexisting_tmp() {
1361 2 : let testdir =
1362 2 : crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_preexisting_tmp");
1363 2 : std::fs::create_dir_all(&testdir).unwrap();
1364 2 :
1365 2 : let path = testdir.join("myfile");
1366 2 : let tmp_path = testdir.join("myfile.tmp");
1367 2 :
1368 2 : std::fs::write(&tmp_path, "some preexisting junk that should be removed").unwrap();
1369 2 : assert!(tmp_path.exists());
1370 2 :
1371 2 : VirtualFile::crashsafe_overwrite(&path, &tmp_path, b"foo".to_vec())
1372 3 : .await
1373 2 : .unwrap();
1374 2 :
1375 2 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
1376 2 : let post = file.read_string().await.unwrap();
1377 2 : assert_eq!(post, "foo");
1378 2 : assert!(!tmp_path.exists());
1379 2 : drop(file);
1380 2 : }
1381 : }
|