Line data Source code
1 : //!
2 : //! VirtualFile is like a normal File, but it's not bound directly to
3 : //! a file descriptor. Instead, the file is opened when it's read from,
4 : //! and if too many files are open globally in the system, least-recently
5 : //! used ones are closed.
6 : //!
7 : //! To track which files have been recently used, we use the clock algorithm
8 : //! with a 'recently_used' flag on each slot.
9 : //!
10 : //! This is similar to PostgreSQL's virtual file descriptor facility in
11 : //! src/backend/storage/file/fd.c
12 : //!
13 : use crate::metrics::{StorageIoOperation, STORAGE_IO_SIZE, STORAGE_IO_TIME_METRIC};
14 :
15 : use crate::page_cache::PageWriteGuard;
16 : use crate::tenant::TENANTS_SEGMENT_NAME;
17 : use camino::{Utf8Path, Utf8PathBuf};
18 : use once_cell::sync::OnceCell;
19 : use pageserver_api::shard::TenantShardId;
20 : use std::fs::{self, File};
21 : use std::io::{Error, ErrorKind, Seek, SeekFrom};
22 : use tokio_epoll_uring::{BoundedBuf, IoBuf, IoBufMut, Slice};
23 :
24 : use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
25 : use std::os::unix::fs::FileExt;
26 : use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
27 : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
28 : use tokio::time::Instant;
29 :
30 : pub use pageserver_api::models::virtual_file as api;
31 : pub(crate) mod io_engine;
32 : mod open_options;
33 : pub(crate) use io_engine::IoEngineKind;
34 : pub(crate) use open_options::*;
35 :
36 : ///
37 : /// A virtual file descriptor. You can use this just like std::fs::File, but internally
38 : /// the underlying file is closed if the system is low on file descriptors,
39 : /// and re-opened when it's accessed again.
40 : ///
41 : /// Like with std::fs::File, multiple threads can read/write the file concurrently,
42 : /// holding just a shared reference the same VirtualFile, using the read_at() / write_at()
43 : /// functions from the FileExt trait. But the functions from the Read/Write/Seek traits
44 : /// require a mutable reference, because they modify the "current position".
45 : ///
46 : /// Each VirtualFile has a physical file descriptor in the global OPEN_FILES array, at the
47 : /// slot that 'handle points to, if the underlying file is currently open. If it's not
48 : /// currently open, the 'handle' can still point to the slot where it was last kept. The
49 : /// 'tag' field is used to detect whether the handle still is valid or not.
50 : ///
51 0 : #[derive(Debug)]
52 : pub struct VirtualFile {
53 : /// Lazy handle to the global file descriptor cache. The slot that this points to
54 : /// might contain our File, or it may be empty, or it may contain a File that
55 : /// belongs to a different VirtualFile.
56 : handle: RwLock<SlotHandle>,
57 :
58 : /// Current file position
59 : pos: u64,
60 :
61 : /// File path and options to use to open it.
62 : ///
63 : /// Note: this only contains the options needed to re-open it. For example,
64 : /// if a new file is created, we only pass the create flag when it's initially
65 : /// opened, in the VirtualFile::create() function, and strip the flag before
66 : /// storing it here.
67 : pub path: Utf8PathBuf,
68 : open_options: OpenOptions,
69 :
70 : // These are strings becase we only use them for metrics, and those expect strings.
71 : // It makes no sense for us to constantly turn the `TimelineId` and `TenantId` into
72 : // strings.
73 : tenant_id: String,
74 : shard_id: String,
75 : timeline_id: String,
76 : }
77 :
78 296375 : #[derive(Debug, PartialEq, Clone, Copy)]
79 : struct SlotHandle {
80 : /// Index into OPEN_FILES.slots
81 : index: usize,
82 :
83 : /// Value of 'tag' in the slot. If slot's tag doesn't match, then the slot has
84 : /// been recycled and no longer contains the FD for this virtual file.
85 : tag: u64,
86 : }
87 :
88 : /// OPEN_FILES is the global array that holds the physical file descriptors that
89 : /// are currently open. Each slot in the array is protected by a separate lock,
90 : /// so that different files can be accessed independently. The lock must be held
91 : /// in write mode to replace the slot with a different file, but a read mode
92 : /// is enough to operate on the file, whether you're reading or writing to it.
93 : ///
94 : /// OPEN_FILES starts in uninitialized state, and it's initialized by
95 : /// the virtual_file::init() function. It must be called exactly once at page
96 : /// server startup.
97 : static OPEN_FILES: OnceCell<OpenFiles> = OnceCell::new();
98 :
99 : struct OpenFiles {
100 : slots: &'static [Slot],
101 :
102 : /// clock arm for the clock algorithm
103 : next: AtomicUsize,
104 : }
105 :
106 : struct Slot {
107 : inner: RwLock<SlotInner>,
108 :
109 : /// has this file been used since last clock sweep?
110 : recently_used: AtomicBool,
111 : }
112 :
113 : struct SlotInner {
114 : /// Counter that's incremented every time a different file is stored here.
115 : /// To avoid the ABA problem.
116 : tag: u64,
117 :
118 : /// the underlying file
119 : file: Option<OwnedFd>,
120 : }
121 :
122 : /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`].
123 : struct PageWriteGuardBuf {
124 : page: PageWriteGuard<'static>,
125 : init_up_to: usize,
126 : }
127 : // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot,
128 : // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved.
129 : unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf {
130 90082 : fn stable_ptr(&self) -> *const u8 {
131 90082 : self.page.as_ptr()
132 90082 : }
133 270246 : fn bytes_init(&self) -> usize {
134 270246 : self.init_up_to
135 270246 : }
136 90082 : fn bytes_total(&self) -> usize {
137 90082 : self.page.len()
138 90082 : }
139 : }
140 : // Safety: see above, plus: the ownership of [`PageWriteGuard`] means exclusive access,
141 : // hence it's safe to hand out the `stable_mut_ptr()`.
142 : unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf {
143 90082 : fn stable_mut_ptr(&mut self) -> *mut u8 {
144 90082 : self.page.as_mut_ptr()
145 90082 : }
146 :
147 90082 : unsafe fn set_init(&mut self, pos: usize) {
148 90082 : assert!(pos <= self.page.len());
149 90082 : self.init_up_to = pos;
150 90082 : }
151 : }
152 :
153 : impl OpenFiles {
154 : /// Find a slot to use, evicting an existing file descriptor if needed.
155 : ///
156 : /// On return, we hold a lock on the slot, and its 'tag' has been updated
157 : /// recently_used has been set. It's all ready for reuse.
158 192705 : async fn find_victim_slot(&self) -> (SlotHandle, RwLockWriteGuard<SlotInner>) {
159 192705 : //
160 192705 : // Run the clock algorithm to find a slot to replace.
161 192705 : //
162 192705 : let num_slots = self.slots.len();
163 192705 : let mut retries = 0;
164 : let mut slot;
165 : let mut slot_guard;
166 : let index;
167 2635924 : loop {
168 2635924 : let next = self.next.fetch_add(1, Ordering::AcqRel) % num_slots;
169 2635924 : slot = &self.slots[next];
170 2635924 :
171 2635924 : // If the recently_used flag on this slot is set, continue the clock
172 2635924 : // sweep. Otherwise try to use this slot. If we cannot acquire the
173 2635924 : // lock, also continue the clock sweep.
174 2635924 : //
175 2635924 : // We only continue in this manner for a while, though. If we loop
176 2635924 : // through the array twice without finding a victim, just pick the
177 2635924 : // next slot and wait until we can reuse it. This way, we avoid
178 2635924 : // spinning in the extreme case that all the slots are busy with an
179 2635924 : // I/O operation.
180 2635924 : if retries < num_slots * 2 {
181 2533369 : if !slot.recently_used.swap(false, Ordering::Release) {
182 2324668 : if let Ok(guard) = slot.inner.try_write() {
183 90150 : slot_guard = guard;
184 90150 : index = next;
185 90150 : break;
186 2234518 : }
187 208701 : }
188 2443219 : retries += 1;
189 : } else {
190 102555 : slot_guard = slot.inner.write().await;
191 102555 : index = next;
192 102555 : break;
193 : }
194 : }
195 :
196 : //
197 : // We now have the victim slot locked. If it was in use previously, close the
198 : // old file.
199 : //
200 192705 : if let Some(old_file) = slot_guard.file.take() {
201 191175 : // the normal path of dropping VirtualFile uses "close", use "close-by-replace" here to
202 191175 : // distinguish the two.
203 191175 : STORAGE_IO_TIME_METRIC
204 191175 : .get(StorageIoOperation::CloseByReplace)
205 191175 : .observe_closure_duration(|| drop(old_file));
206 191175 : }
207 :
208 : // Prepare the slot for reuse and return it
209 192705 : slot_guard.tag += 1;
210 192705 : slot.recently_used.store(true, Ordering::Relaxed);
211 192705 : (
212 192705 : SlotHandle {
213 192705 : index,
214 192705 : tag: slot_guard.tag,
215 192705 : },
216 192705 : slot_guard,
217 192705 : )
218 192705 : }
219 : }
220 :
221 : /// Identify error types that should alwways terminate the process. Other
222 : /// error types may be elegible for retry.
223 0 : pub(crate) fn is_fatal_io_error(e: &std::io::Error) -> bool {
224 0 : use nix::errno::Errno::*;
225 0 : match e.raw_os_error().map(nix::errno::from_i32) {
226 : Some(EIO) => {
227 : // Terminate on EIO because we no longer trust the device to store
228 : // data safely, or to uphold persistence guarantees on fsync.
229 0 : true
230 : }
231 : Some(EROFS) => {
232 : // Terminate on EROFS because a filesystem is usually remounted
233 : // readonly when it has experienced some critical issue, so the same
234 : // logic as EIO applies.
235 0 : true
236 : }
237 : Some(EACCES) => {
238 : // Terminate on EACCESS because we should always have permissions
239 : // for our own data dir: if we don't, then we can't do our job and
240 : // need administrative intervention to fix permissions. Terminating
241 : // is the best way to make sure we stop cleanly rather than going
242 : // into infinite retry loops, and will make it clear to the outside
243 : // world that we need help.
244 0 : true
245 : }
246 : _ => {
247 : // Treat all other local file I/O errors are retryable. This includes:
248 : // - ENOSPC: we stay up and wait for eviction to free some space
249 : // - EINVAL, EBADF, EBADFD: this is a code bug, not a filesystem/hardware issue
250 : // - WriteZero, Interrupted: these are used internally VirtualFile
251 0 : false
252 : }
253 : }
254 0 : }
255 :
256 : /// Call this when the local filesystem gives us an error with an external
257 : /// cause: this includes EIO, EROFS, and EACCESS: all these indicate either
258 : /// bad storage or bad configuration, and we can't fix that from inside
259 : /// a running process.
260 0 : pub(crate) fn on_fatal_io_error(e: &std::io::Error, context: &str) -> ! {
261 0 : tracing::error!("Fatal I/O error: {e}: {context})");
262 0 : std::process::abort();
263 : }
264 :
265 : pub(crate) trait MaybeFatalIo<T> {
266 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T>;
267 : fn fatal_err(self, context: &str) -> T;
268 : }
269 :
270 : impl<T> MaybeFatalIo<T> for std::io::Result<T> {
271 : /// Terminate the process if the result is an error of a fatal type, else pass it through
272 : ///
273 : /// This is appropriate for writes, where we typically want to die on EIO/ACCES etc, but
274 : /// not on ENOSPC.
275 23 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T> {
276 23 : if let Err(e) = &self {
277 0 : if is_fatal_io_error(e) {
278 0 : on_fatal_io_error(e, context);
279 0 : }
280 23 : }
281 23 : self
282 23 : }
283 :
284 : /// Terminate the process on any I/O error.
285 : ///
286 : /// This is appropriate for reads on files that we know exist: they should always work.
287 36 : fn fatal_err(self, context: &str) -> T {
288 36 : match self {
289 36 : Ok(v) => v,
290 0 : Err(e) => {
291 0 : on_fatal_io_error(&e, context);
292 : }
293 : }
294 36 : }
295 : }
296 :
297 : /// Observe duration for the given storage I/O operation
298 : ///
299 : /// Unlike `observe_closure_duration`, this supports async,
300 : /// where "support" means that we measure wall clock time.
301 : macro_rules! observe_duration {
302 : ($op:expr, $($body:tt)*) => {{
303 : let instant = Instant::now();
304 : let result = $($body)*;
305 : let elapsed = instant.elapsed().as_secs_f64();
306 : STORAGE_IO_TIME_METRIC
307 : .get($op)
308 : .observe(elapsed);
309 : result
310 : }}
311 : }
312 :
313 : macro_rules! with_file {
314 : ($this:expr, $op:expr, | $ident:ident | $($body:tt)*) => {{
315 : let $ident = $this.lock_file().await?;
316 : observe_duration!($op, $($body)*)
317 : }};
318 : ($this:expr, $op:expr, | mut $ident:ident | $($body:tt)*) => {{
319 : let mut $ident = $this.lock_file().await?;
320 : observe_duration!($op, $($body)*)
321 : }};
322 : }
323 :
324 : impl VirtualFile {
325 : /// Open a file in read-only mode. Like File::open.
326 480 : pub async fn open(path: &Utf8Path) -> Result<VirtualFile, std::io::Error> {
327 480 : Self::open_with_options(path, OpenOptions::new().read(true)).await
328 480 : }
329 :
330 : /// Create a new file for writing. If the file exists, it will be truncated.
331 : /// Like File::create.
332 504 : pub async fn create(path: &Utf8Path) -> Result<VirtualFile, std::io::Error> {
333 504 : Self::open_with_options(
334 504 : path,
335 504 : OpenOptions::new().write(true).create(true).truncate(true),
336 504 : )
337 300 : .await
338 504 : }
339 :
340 : /// Open a file with given options.
341 : ///
342 : /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt,
343 : /// they will be applied also when the file is subsequently re-opened, not only
344 : /// on the first time. Make sure that's sane!
345 2110 : pub async fn open_with_options(
346 2110 : path: &Utf8Path,
347 2110 : open_options: &OpenOptions,
348 2110 : ) -> Result<VirtualFile, std::io::Error> {
349 2110 : let path_str = path.to_string();
350 2110 : let parts = path_str.split('/').collect::<Vec<&str>>();
351 2110 : let (tenant_id, shard_id, timeline_id) =
352 2110 : if parts.len() > 5 && parts[parts.len() - 5] == TENANTS_SEGMENT_NAME {
353 1650 : let tenant_shard_part = parts[parts.len() - 4];
354 1650 : let (tenant_id, shard_id) = match tenant_shard_part.parse::<TenantShardId>() {
355 1650 : Ok(tenant_shard_id) => (
356 1650 : tenant_shard_id.tenant_id.to_string(),
357 1650 : format!("{}", tenant_shard_id.shard_slug()),
358 1650 : ),
359 : Err(_) => {
360 : // Malformed path: this ID is just for observability, so tolerate it
361 : // and pass through
362 0 : (tenant_shard_part.to_string(), "*".to_string())
363 : }
364 : };
365 1650 : (tenant_id, shard_id, parts[parts.len() - 2].to_string())
366 : } else {
367 460 : ("*".to_string(), "*".to_string(), "*".to_string())
368 : };
369 2110 : let (handle, mut slot_guard) = get_open_files().find_victim_slot().await;
370 :
371 : // NB: there is also StorageIoOperation::OpenAfterReplace which is for the case
372 : // where our caller doesn't get to use the returned VirtualFile before its
373 : // slot gets re-used by someone else.
374 2110 : let file = observe_duration!(StorageIoOperation::Open, {
375 2110 : open_options.open(path.as_std_path()).await?
376 : });
377 :
378 : // Strip all options other than read and write.
379 : //
380 : // It would perhaps be nicer to check just for the read and write flags
381 : // explicitly, but OpenOptions doesn't contain any functions to read flags,
382 : // only to set them.
383 2110 : let mut reopen_options = open_options.clone();
384 2110 : reopen_options.create(false);
385 2110 : reopen_options.create_new(false);
386 2110 : reopen_options.truncate(false);
387 2110 :
388 2110 : let vfile = VirtualFile {
389 2110 : handle: RwLock::new(handle),
390 2110 : pos: 0,
391 2110 : path: path.to_path_buf(),
392 2110 : open_options: reopen_options,
393 2110 : tenant_id,
394 2110 : shard_id,
395 2110 : timeline_id,
396 2110 : };
397 2110 :
398 2110 : // TODO: Under pressure, it's likely the slot will get re-used and
399 2110 : // the underlying file closed before they get around to using it.
400 2110 : // => https://github.com/neondatabase/neon/issues/6065
401 2110 : slot_guard.file.replace(file);
402 2110 :
403 2110 : Ok(vfile)
404 2110 : }
405 :
406 : /// Async version of [`::utils::crashsafe::overwrite`].
407 : ///
408 : /// # NB:
409 : ///
410 : /// Doesn't actually use the [`VirtualFile`] file descriptor cache, but,
411 : /// it did at an earlier time.
412 : /// And it will use this module's [`io_engine`] in the near future, so, leaving it here.
413 30 : pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>(
414 30 : final_path: Utf8PathBuf,
415 30 : tmp_path: Utf8PathBuf,
416 30 : content: B,
417 30 : ) -> std::io::Result<()> {
418 30 : // TODO: use tokio_epoll_uring if configured as `io_engine`.
419 30 : // See https://github.com/neondatabase/neon/issues/6663
420 30 :
421 30 : tokio::task::spawn_blocking(move || {
422 30 : let slice_storage;
423 30 : let content_len = content.bytes_init();
424 30 : let content = if content.bytes_init() > 0 {
425 30 : slice_storage = Some(content.slice(0..content_len));
426 30 : slice_storage.as_deref().expect("just set it to Some()")
427 : } else {
428 0 : &[]
429 : };
430 30 : utils::crashsafe::overwrite(&final_path, &tmp_path, content)
431 30 : })
432 29 : .await
433 29 : .expect("blocking task is never aborted")
434 29 : }
435 :
436 : /// Call File::sync_all() on the underlying File.
437 554 : pub async fn sync_all(&self) -> Result<(), Error> {
438 554 : with_file!(self, StorageIoOperation::Fsync, |file_guard| file_guard
439 554 : .with_std_file(|std_file| std_file.sync_all()))
440 554 : }
441 :
442 554 : pub async fn metadata(&self) -> Result<fs::Metadata, Error> {
443 554 : with_file!(self, StorageIoOperation::Metadata, |file_guard| file_guard
444 554 : .with_std_file(|std_file| std_file.metadata()))
445 554 : }
446 :
447 : /// Helper function internal to `VirtualFile` that looks up the underlying File,
448 : /// opens it and evicts some other File if necessary. The passed parameter is
449 : /// assumed to be a function available for the physical `File`.
450 : ///
451 : /// We are doing it via a macro as Rust doesn't support async closures that
452 : /// take on parameters with lifetimes.
453 415477 : async fn lock_file(&self) -> Result<FileGuard, Error> {
454 415477 : let open_files = get_open_files();
455 :
456 190595 : let mut handle_guard = {
457 : // Read the cached slot handle, and see if the slot that it points to still
458 : // contains our File.
459 : //
460 : // We only need to hold the handle lock while we read the current handle. If
461 : // another thread closes the file and recycles the slot for a different file,
462 : // we will notice that the handle we read is no longer valid and retry.
463 415477 : let mut handle = *self.handle.read().await;
464 521257 : loop {
465 521257 : // Check if the slot contains our File
466 521257 : {
467 521257 : let slot = &open_files.slots[handle.index];
468 521257 : let slot_guard = slot.inner.read().await;
469 521257 : if slot_guard.tag == handle.tag && slot_guard.file.is_some() {
470 : // Found a cached file descriptor.
471 224882 : slot.recently_used.store(true, Ordering::Relaxed);
472 224882 : return Ok(FileGuard { slot_guard });
473 296375 : }
474 : }
475 :
476 : // The slot didn't contain our File. We will have to open it ourselves,
477 : // but before that, grab a write lock on handle in the VirtualFile, so
478 : // that no other thread will try to concurrently open the same file.
479 296375 : let handle_guard = self.handle.write().await;
480 :
481 : // If another thread changed the handle while we were not holding the lock,
482 : // then the handle might now be valid again. Loop back to retry.
483 296375 : if *handle_guard != handle {
484 105780 : handle = *handle_guard;
485 105780 : continue;
486 190595 : }
487 190595 : break handle_guard;
488 : }
489 : };
490 :
491 : // We need to open the file ourselves. The handle in the VirtualFile is
492 : // now locked in write-mode. Find a free slot to put it in.
493 190595 : let (handle, mut slot_guard) = open_files.find_victim_slot().await;
494 :
495 : // Re-open the physical file.
496 : // NB: we use StorageIoOperation::OpenAferReplace for this to distinguish this
497 : // case from StorageIoOperation::Open. This helps with identifying thrashing
498 : // of the virtual file descriptor cache.
499 190595 : let file = observe_duration!(StorageIoOperation::OpenAfterReplace, {
500 190595 : self.open_options.open(self.path.as_std_path()).await?
501 : });
502 :
503 : // Store the File in the slot and update the handle in the VirtualFile
504 : // to point to it.
505 190595 : slot_guard.file.replace(file);
506 190595 :
507 190595 : *handle_guard = handle;
508 190595 :
509 190595 : return Ok(FileGuard {
510 190595 : slot_guard: slot_guard.downgrade(),
511 190595 : });
512 415477 : }
513 :
514 0 : pub fn remove(self) {
515 0 : let path = self.path.clone();
516 0 : drop(self);
517 0 : std::fs::remove_file(path).expect("failed to remove the virtual file");
518 0 : }
519 :
520 1680 : pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
521 1680 : match pos {
522 1670 : SeekFrom::Start(offset) => {
523 1670 : self.pos = offset;
524 1670 : }
525 4 : SeekFrom::End(offset) => {
526 4 : self.pos = with_file!(self, StorageIoOperation::Seek, |mut file_guard| file_guard
527 4 : .with_std_file_mut(|std_file| std_file.seek(SeekFrom::End(offset))))?
528 : }
529 6 : SeekFrom::Current(offset) => {
530 6 : let pos = self.pos as i128 + offset as i128;
531 6 : if pos < 0 {
532 2 : return Err(Error::new(
533 2 : ErrorKind::InvalidInput,
534 2 : "offset would be negative",
535 2 : ));
536 4 : }
537 4 : if pos > u64::MAX as i128 {
538 0 : return Err(Error::new(ErrorKind::InvalidInput, "offset overflow"));
539 4 : }
540 4 : self.pos = pos as u64;
541 : }
542 : }
543 1676 : Ok(self.pos)
544 1680 : }
545 :
546 310284 : pub async fn read_exact_at<B>(&self, buf: B, offset: u64) -> Result<B, Error>
547 310284 : where
548 310284 : B: IoBufMut + Send,
549 310284 : {
550 310284 : let (buf, res) =
551 708071 : read_exact_at_impl(buf, offset, None, |buf, offset| self.read_at(buf, offset)).await;
552 310284 : res.map(|()| buf)
553 310284 : }
554 :
555 10 : pub async fn read_exact_at_n<B>(&self, buf: B, offset: u64, count: usize) -> Result<B, Error>
556 10 : where
557 10 : B: IoBufMut + Send,
558 10 : {
559 10 : let (buf, res) = read_exact_at_impl(buf, offset, Some(count), |buf, offset| {
560 10 : self.read_at(buf, offset)
561 10 : })
562 5 : .await;
563 10 : res.map(|()| buf)
564 10 : }
565 :
566 : /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`].
567 90082 : pub async fn read_exact_at_page(
568 90082 : &self,
569 90082 : page: PageWriteGuard<'static>,
570 90082 : offset: u64,
571 90082 : ) -> Result<PageWriteGuard<'static>, Error> {
572 90082 : let buf = PageWriteGuardBuf {
573 90082 : page,
574 90082 : init_up_to: 0,
575 90082 : };
576 90082 : let res = self.read_exact_at(buf, offset).await;
577 90082 : res.map(|PageWriteGuardBuf { page, .. }| page)
578 90082 : .map_err(|e| Error::new(ErrorKind::Other, e))
579 90082 : }
580 :
581 : // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235
582 35260 : pub async fn write_all_at<B: BoundedBuf>(
583 35260 : &self,
584 35260 : buf: B,
585 35260 : mut offset: u64,
586 35260 : ) -> (B::Buf, Result<(), Error>) {
587 35260 : let buf_len = buf.bytes_init();
588 35260 : if buf_len == 0 {
589 0 : return (Slice::into_inner(buf.slice_full()), Ok(()));
590 35260 : }
591 35260 : let mut buf = buf.slice(0..buf_len);
592 70520 : while !buf.is_empty() {
593 : // TODO: push `buf` further down
594 35260 : match self.write_at(&buf, offset).await {
595 : Ok(0) => {
596 0 : return (
597 0 : Slice::into_inner(buf),
598 0 : Err(Error::new(
599 0 : std::io::ErrorKind::WriteZero,
600 0 : "failed to write whole buffer",
601 0 : )),
602 0 : );
603 : }
604 35260 : Ok(n) => {
605 35260 : buf = buf.slice(n..);
606 35260 : offset += n as u64;
607 35260 : }
608 0 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
609 0 : Err(e) => return (Slice::into_inner(buf), Err(e)),
610 : }
611 : }
612 35260 : (Slice::into_inner(buf), Ok(()))
613 35260 : }
614 :
615 : /// Writes `buf.slice(0..buf.bytes_init())`.
616 : /// Returns the IoBuf that is underlying the BoundedBuf `buf`.
617 : /// I.e., the returned value's `bytes_init()` method returns something different than the `bytes_init()` that was passed in.
618 : /// It's quite brittle and easy to mis-use, so, we return the size in the Ok() variant.
619 68397 : pub async fn write_all<B: BoundedBuf>(&mut self, buf: B) -> (B::Buf, Result<usize, Error>) {
620 68397 : let nbytes = buf.bytes_init();
621 68397 : if nbytes == 0 {
622 30 : return (Slice::into_inner(buf.slice_full()), Ok(0));
623 68367 : }
624 68367 : let mut buf = buf.slice(0..nbytes);
625 136732 : while !buf.is_empty() {
626 : // TODO: push `Slice` further down
627 68367 : match self.write(&buf).await {
628 : Ok(0) => {
629 0 : return (
630 0 : Slice::into_inner(buf),
631 0 : Err(Error::new(
632 0 : std::io::ErrorKind::WriteZero,
633 0 : "failed to write whole buffer",
634 0 : )),
635 0 : );
636 : }
637 68365 : Ok(n) => {
638 68365 : buf = buf.slice(n..);
639 68365 : }
640 2 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
641 2 : Err(e) => return (Slice::into_inner(buf), Err(e)),
642 : }
643 : }
644 68365 : (Slice::into_inner(buf), Ok(nbytes))
645 68397 : }
646 :
647 68367 : async fn write(&mut self, buf: &[u8]) -> Result<usize, std::io::Error> {
648 68367 : let pos = self.pos;
649 68367 : let n = self.write_at(buf, pos).await?;
650 68365 : self.pos += n as u64;
651 68365 : Ok(n)
652 68367 : }
653 :
654 310738 : pub(crate) async fn read_at<B>(&self, buf: B, offset: u64) -> (B, Result<usize, Error>)
655 310738 : where
656 310738 : B: tokio_epoll_uring::BoundedBufMut + Send,
657 310738 : {
658 553009 : let file_guard = match self.lock_file().await {
659 310738 : Ok(file_guard) => file_guard,
660 0 : Err(e) => return (buf, Err(e)),
661 : };
662 :
663 310738 : observe_duration!(StorageIoOperation::Read, {
664 310738 : let ((_file_guard, buf), res) = io_engine::get().read_at(file_guard, offset, buf).await;
665 310738 : if let Ok(size) = res {
666 310736 : STORAGE_IO_SIZE
667 310736 : .with_label_values(&[
668 310736 : "read",
669 310736 : &self.tenant_id,
670 310736 : &self.shard_id,
671 310736 : &self.timeline_id,
672 310736 : ])
673 310736 : .add(size as i64);
674 310736 : }
675 310738 : (buf, res)
676 : })
677 310738 : }
678 :
679 103627 : async fn write_at(&self, buf: &[u8], offset: u64) -> Result<usize, Error> {
680 103627 : let result = with_file!(self, StorageIoOperation::Write, |file_guard| {
681 103627 : file_guard.with_std_file(|std_file| std_file.write_at(buf, offset))
682 : });
683 103627 : if let Ok(size) = result {
684 103625 : STORAGE_IO_SIZE
685 103625 : .with_label_values(&["write", &self.tenant_id, &self.shard_id, &self.timeline_id])
686 103625 : .add(size as i64);
687 103625 : }
688 103627 : result
689 103627 : }
690 : }
691 :
692 : // Adapted from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135
693 310304 : pub async fn read_exact_at_impl<B, F, Fut>(
694 310304 : buf: B,
695 310304 : mut offset: u64,
696 310304 : count: Option<usize>,
697 310304 : mut read_at: F,
698 310304 : ) -> (B, std::io::Result<()>)
699 310304 : where
700 310304 : B: IoBufMut + Send,
701 310304 : F: FnMut(tokio_epoll_uring::Slice<B>, u64) -> Fut,
702 310304 : Fut: std::future::Future<Output = (tokio_epoll_uring::Slice<B>, std::io::Result<usize>)>,
703 310304 : {
704 310304 : let mut buf: tokio_epoll_uring::Slice<B> = match count {
705 12 : Some(count) => {
706 12 : assert!(count <= buf.bytes_total());
707 12 : assert!(count > 0);
708 12 : buf.slice(..count) // may include uninitialized memory
709 : }
710 310292 : None => buf.slice_full(), // includes all the uninitialized memory
711 : };
712 :
713 620610 : while buf.bytes_total() != 0 {
714 : let res;
715 708076 : (buf, res) = read_at(buf, offset).await;
716 0 : match res {
717 2 : Ok(0) => break,
718 310306 : Ok(n) => {
719 310306 : buf = buf.slice(n..);
720 310306 : offset += n as u64;
721 310306 : }
722 0 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
723 0 : Err(e) => return (buf.into_inner(), Err(e)),
724 : }
725 : }
726 : // NB: don't use `buf.is_empty()` here; it is from the
727 : // `impl Deref for Slice { Target = [u8] }`; the the &[u8]
728 : // returned by it only covers the initialized portion of `buf`.
729 : // Whereas we're interested in ensuring that we filled the entire
730 : // buffer that the user passed in.
731 310304 : if buf.bytes_total() != 0 {
732 2 : (
733 2 : buf.into_inner(),
734 2 : Err(std::io::Error::new(
735 2 : std::io::ErrorKind::UnexpectedEof,
736 2 : "failed to fill whole buffer",
737 2 : )),
738 2 : )
739 : } else {
740 310302 : assert_eq!(buf.len(), buf.bytes_total());
741 310302 : (buf.into_inner(), Ok(()))
742 : }
743 310304 : }
744 :
745 : #[cfg(test)]
746 : mod test_read_exact_at_impl {
747 :
748 : use std::{collections::VecDeque, sync::Arc};
749 :
750 : use tokio_epoll_uring::{BoundedBuf, BoundedBufMut};
751 :
752 : use super::read_exact_at_impl;
753 :
754 : struct Expectation {
755 : offset: u64,
756 : bytes_total: usize,
757 : result: std::io::Result<Vec<u8>>,
758 : }
759 : struct MockReadAt {
760 : expectations: VecDeque<Expectation>,
761 : }
762 :
763 : impl MockReadAt {
764 14 : async fn read_at(
765 14 : &mut self,
766 14 : mut buf: tokio_epoll_uring::Slice<Vec<u8>>,
767 14 : offset: u64,
768 14 : ) -> (tokio_epoll_uring::Slice<Vec<u8>>, std::io::Result<usize>) {
769 14 : let exp = self
770 14 : .expectations
771 14 : .pop_front()
772 14 : .expect("read_at called but we have no expectations left");
773 14 : assert_eq!(exp.offset, offset);
774 14 : assert_eq!(exp.bytes_total, buf.bytes_total());
775 14 : match exp.result {
776 14 : Ok(bytes) => {
777 14 : assert!(bytes.len() <= buf.bytes_total());
778 14 : buf.put_slice(&bytes);
779 14 : (buf, Ok(bytes.len()))
780 : }
781 0 : Err(e) => (buf, Err(e)),
782 : }
783 14 : }
784 : }
785 :
786 : impl Drop for MockReadAt {
787 10 : fn drop(&mut self) {
788 10 : assert_eq!(self.expectations.len(), 0);
789 10 : }
790 : }
791 :
792 2 : #[tokio::test]
793 2 : async fn test_basic() {
794 2 : let buf = Vec::with_capacity(5);
795 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
796 2 : expectations: VecDeque::from(vec![Expectation {
797 2 : offset: 0,
798 2 : bytes_total: 5,
799 2 : result: Ok(vec![b'a', b'b', b'c', b'd', b'e']),
800 2 : }]),
801 2 : }));
802 2 : let (buf, res) = read_exact_at_impl(buf, 0, None, |buf, offset| {
803 2 : let mock_read_at = Arc::clone(&mock_read_at);
804 2 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
805 2 : })
806 2 : .await;
807 2 : assert!(res.is_ok());
808 2 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd', b'e']);
809 2 : }
810 :
811 2 : #[tokio::test]
812 2 : async fn test_with_count() {
813 2 : let buf = Vec::with_capacity(5);
814 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
815 2 : expectations: VecDeque::from(vec![Expectation {
816 2 : offset: 0,
817 2 : bytes_total: 3,
818 2 : result: Ok(vec![b'a', b'b', b'c']),
819 2 : }]),
820 2 : }));
821 2 :
822 2 : let (buf, res) = read_exact_at_impl(buf, 0, Some(3), |buf, offset| {
823 2 : let mock_read_at = Arc::clone(&mock_read_at);
824 2 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
825 2 : })
826 2 : .await;
827 2 : assert!(res.is_ok());
828 2 : assert_eq!(buf, vec![b'a', b'b', b'c']);
829 2 : }
830 :
831 2 : #[tokio::test]
832 2 : async fn test_empty_buf_issues_no_syscall() {
833 2 : let buf = Vec::new();
834 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
835 2 : expectations: VecDeque::new(),
836 2 : }));
837 2 : let (_buf, res) = read_exact_at_impl(buf, 0, None, |buf, offset| {
838 0 : let mock_read_at = Arc::clone(&mock_read_at);
839 2 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
840 2 : })
841 2 : .await;
842 2 : assert!(res.is_ok());
843 2 : }
844 :
845 2 : #[tokio::test]
846 2 : async fn test_two_read_at_calls_needed_until_buf_filled() {
847 2 : let buf = Vec::with_capacity(4);
848 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
849 2 : expectations: VecDeque::from(vec![
850 2 : Expectation {
851 2 : offset: 0,
852 2 : bytes_total: 4,
853 2 : result: Ok(vec![b'a', b'b']),
854 2 : },
855 2 : Expectation {
856 2 : offset: 2,
857 2 : bytes_total: 2,
858 2 : result: Ok(vec![b'c', b'd']),
859 2 : },
860 2 : ]),
861 2 : }));
862 4 : let (buf, res) = read_exact_at_impl(buf, 0, None, |buf, offset| {
863 4 : let mock_read_at = Arc::clone(&mock_read_at);
864 4 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
865 4 : })
866 2 : .await;
867 2 : assert!(res.is_ok());
868 2 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd']);
869 2 : }
870 :
871 2 : #[tokio::test]
872 2 : async fn test_eof_before_buffer_full() {
873 2 : let buf = Vec::with_capacity(3);
874 2 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
875 2 : expectations: VecDeque::from(vec![
876 2 : Expectation {
877 2 : offset: 0,
878 2 : bytes_total: 3,
879 2 : result: Ok(vec![b'a']),
880 2 : },
881 2 : Expectation {
882 2 : offset: 1,
883 2 : bytes_total: 2,
884 2 : result: Ok(vec![b'b']),
885 2 : },
886 2 : Expectation {
887 2 : offset: 2,
888 2 : bytes_total: 1,
889 2 : result: Ok(vec![]),
890 2 : },
891 2 : ]),
892 2 : }));
893 6 : let (_buf, res) = read_exact_at_impl(buf, 0, None, |buf, offset| {
894 6 : let mock_read_at = Arc::clone(&mock_read_at);
895 6 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
896 6 : })
897 2 : .await;
898 2 : let Err(err) = res else {
899 2 : panic!("should return an error");
900 2 : };
901 2 : assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof);
902 2 : assert_eq!(format!("{err}"), "failed to fill whole buffer");
903 2 : // buffer contents on error are unspecified
904 2 : }
905 : }
906 :
907 : struct FileGuard {
908 : slot_guard: RwLockReadGuard<'static, SlotInner>,
909 : }
910 :
911 : impl AsRef<OwnedFd> for FileGuard {
912 415477 : fn as_ref(&self) -> &OwnedFd {
913 415477 : // This unwrap is safe because we only create `FileGuard`s
914 415477 : // if we know that the file is Some.
915 415477 : self.slot_guard.file.as_ref().unwrap()
916 415477 : }
917 : }
918 :
919 : impl FileGuard {
920 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
921 260198 : fn with_std_file<F, R>(&self, with: F) -> R
922 260198 : where
923 260198 : F: FnOnce(&File) -> R,
924 260198 : {
925 260198 : // SAFETY:
926 260198 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
927 260198 : // - `&` usage below: `self` is `&`, hence Rust typesystem guarantees there are is no `&mut`
928 260198 : let file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
929 260198 : let res = with(&file);
930 260198 : let _ = file.into_raw_fd();
931 260198 : res
932 260198 : }
933 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
934 4 : fn with_std_file_mut<F, R>(&mut self, with: F) -> R
935 4 : where
936 4 : F: FnOnce(&mut File) -> R,
937 4 : {
938 4 : // SAFETY:
939 4 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
940 4 : // - &mut usage below: `self` is `&mut`, hence this call is the only task/thread that has control over the underlying fd
941 4 : let mut file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
942 4 : let res = with(&mut file);
943 4 : let _ = file.into_raw_fd();
944 4 : res
945 4 : }
946 : }
947 :
948 : impl tokio_epoll_uring::IoFd for FileGuard {
949 155275 : unsafe fn as_fd(&self) -> RawFd {
950 155275 : let owned_fd: &OwnedFd = self.as_ref();
951 155275 : owned_fd.as_raw_fd()
952 155275 : }
953 : }
954 :
955 : #[cfg(test)]
956 : impl VirtualFile {
957 20200 : pub(crate) async fn read_blk(
958 20200 : &self,
959 20200 : blknum: u32,
960 20200 : ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
961 20200 : use crate::page_cache::PAGE_SZ;
962 20200 : let buf = vec![0; PAGE_SZ];
963 20200 : let buf = self
964 20200 : .read_exact_at(buf, blknum as u64 * (PAGE_SZ as u64))
965 10256 : .await?;
966 20200 : Ok(crate::tenant::block_io::BlockLease::Vec(buf))
967 20200 : }
968 :
969 224 : async fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<(), Error> {
970 224 : let mut tmp = vec![0; 128];
971 : loop {
972 : let res;
973 444 : (tmp, res) = self.read_at(tmp, self.pos).await;
974 2 : match res {
975 222 : Ok(0) => return Ok(()),
976 220 : Ok(n) => {
977 220 : self.pos += n as u64;
978 220 : buf.extend_from_slice(&tmp[..n]);
979 220 : }
980 2 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
981 2 : Err(e) => return Err(e),
982 : }
983 : }
984 224 : }
985 : }
986 :
987 : impl Drop for VirtualFile {
988 : /// If a VirtualFile is dropped, close the underlying file if it was open.
989 1842 : fn drop(&mut self) {
990 1842 : let handle = self.handle.get_mut();
991 1842 :
992 1842 : fn clean_slot(slot: &Slot, mut slot_guard: RwLockWriteGuard<'_, SlotInner>, tag: u64) {
993 1842 : if slot_guard.tag == tag {
994 1842 : slot.recently_used.store(false, Ordering::Relaxed);
995 1842 : // there is also operation "close-by-replace" for closes done on eviction for
996 1842 : // comparison.
997 1842 : if let Some(fd) = slot_guard.file.take() {
998 1434 : STORAGE_IO_TIME_METRIC
999 1434 : .get(StorageIoOperation::Close)
1000 1434 : .observe_closure_duration(|| drop(fd));
1001 1434 : }
1002 1842 : }
1003 1842 : }
1004 1842 :
1005 1842 : // We don't have async drop so we cannot directly await the lock here.
1006 1842 : // Instead, first do a best-effort attempt at closing the underlying
1007 1842 : // file descriptor by using `try_write`, and if that fails, spawn
1008 1842 : // a tokio task to do it asynchronously: we just want it to be
1009 1842 : // cleaned up eventually.
1010 1842 : // Most of the time, the `try_lock` should succeed though,
1011 1842 : // as we have `&mut self` access. In other words, if the slot
1012 1842 : // is still occupied by our file, there should be no access from
1013 1842 : // other I/O operations; the only other possible place to lock
1014 1842 : // the slot is the lock algorithm looking for free slots.
1015 1842 : let slot = &get_open_files().slots[handle.index];
1016 1842 : if let Ok(slot_guard) = slot.inner.try_write() {
1017 1842 : clean_slot(slot, slot_guard, handle.tag);
1018 1842 : } else {
1019 0 : let tag = handle.tag;
1020 0 : tokio::spawn(async move {
1021 0 : let slot_guard = slot.inner.write().await;
1022 0 : clean_slot(slot, slot_guard, tag);
1023 0 : });
1024 0 : };
1025 1842 : }
1026 : }
1027 :
1028 : impl OpenFiles {
1029 100 : fn new(num_slots: usize) -> OpenFiles {
1030 100 : let mut slots = Box::new(Vec::with_capacity(num_slots));
1031 1000 : for _ in 0..num_slots {
1032 1000 : let slot = Slot {
1033 1000 : recently_used: AtomicBool::new(false),
1034 1000 : inner: RwLock::new(SlotInner { tag: 0, file: None }),
1035 1000 : };
1036 1000 : slots.push(slot);
1037 1000 : }
1038 :
1039 100 : OpenFiles {
1040 100 : next: AtomicUsize::new(0),
1041 100 : slots: Box::leak(slots),
1042 100 : }
1043 100 : }
1044 : }
1045 :
1046 : ///
1047 : /// Initialize the virtual file module. This must be called once at page
1048 : /// server startup.
1049 : ///
1050 : #[cfg(not(test))]
1051 0 : pub fn init(num_slots: usize, engine: IoEngineKind) {
1052 0 : if OPEN_FILES.set(OpenFiles::new(num_slots)).is_err() {
1053 0 : panic!("virtual_file::init called twice");
1054 0 : }
1055 0 : io_engine::init(engine);
1056 0 : crate::metrics::virtual_file_descriptor_cache::SIZE_MAX.set(num_slots as u64);
1057 0 : }
1058 :
1059 : const TEST_MAX_FILE_DESCRIPTORS: usize = 10;
1060 :
1061 : // Get a handle to the global slots array.
1062 419429 : fn get_open_files() -> &'static OpenFiles {
1063 419429 : //
1064 419429 : // In unit tests, page server startup doesn't happen and no one calls
1065 419429 : // virtual_file::init(). Initialize it here, with a small array.
1066 419429 : //
1067 419429 : // This applies to the virtual file tests below, but all other unit
1068 419429 : // tests too, so the virtual file facility is always usable in
1069 419429 : // unit tests.
1070 419429 : //
1071 419429 : if cfg!(test) {
1072 419429 : OPEN_FILES.get_or_init(|| OpenFiles::new(TEST_MAX_FILE_DESCRIPTORS))
1073 : } else {
1074 0 : OPEN_FILES.get().expect("virtual_file::init not called yet")
1075 : }
1076 419429 : }
1077 :
1078 : #[cfg(test)]
1079 : mod tests {
1080 : use super::*;
1081 : use rand::seq::SliceRandom;
1082 : use rand::thread_rng;
1083 : use rand::Rng;
1084 : use std::future::Future;
1085 : use std::io::Write;
1086 : use std::sync::Arc;
1087 :
1088 : enum MaybeVirtualFile {
1089 : VirtualFile(VirtualFile),
1090 : File(File),
1091 : }
1092 :
1093 : impl From<VirtualFile> for MaybeVirtualFile {
1094 6 : fn from(vf: VirtualFile) -> Self {
1095 6 : MaybeVirtualFile::VirtualFile(vf)
1096 6 : }
1097 : }
1098 :
1099 : impl MaybeVirtualFile {
1100 404 : async fn read_exact_at(&self, mut buf: Vec<u8>, offset: u64) -> Result<Vec<u8>, Error> {
1101 404 : match self {
1102 203 : MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(buf, offset).await,
1103 202 : MaybeVirtualFile::File(file) => file.read_exact_at(&mut buf, offset).map(|()| buf),
1104 : }
1105 404 : }
1106 8 : async fn write_all_at<B: BoundedBuf>(&self, buf: B, offset: u64) -> Result<(), Error> {
1107 8 : match self {
1108 4 : MaybeVirtualFile::VirtualFile(file) => {
1109 4 : let (_buf, res) = file.write_all_at(buf, offset).await;
1110 4 : res
1111 : }
1112 4 : MaybeVirtualFile::File(file) => {
1113 4 : let buf_len = buf.bytes_init();
1114 4 : if buf_len == 0 {
1115 0 : return Ok(());
1116 4 : }
1117 4 : file.write_all_at(&buf.slice(0..buf_len), offset)
1118 : }
1119 : }
1120 8 : }
1121 36 : async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
1122 36 : match self {
1123 18 : MaybeVirtualFile::VirtualFile(file) => file.seek(pos).await,
1124 18 : MaybeVirtualFile::File(file) => file.seek(pos),
1125 : }
1126 36 : }
1127 8 : async fn write_all<B: BoundedBuf>(&mut self, buf: B) -> Result<(), Error> {
1128 8 : match self {
1129 4 : MaybeVirtualFile::VirtualFile(file) => {
1130 4 : let (_buf, res) = file.write_all(buf).await;
1131 4 : res.map(|_| ())
1132 : }
1133 4 : MaybeVirtualFile::File(file) => {
1134 4 : let buf_len = buf.bytes_init();
1135 4 : if buf_len == 0 {
1136 0 : return Ok(());
1137 4 : }
1138 4 : file.write_all(&buf.slice(0..buf_len))
1139 : }
1140 : }
1141 8 : }
1142 :
1143 : // Helper function to slurp contents of a file, starting at the current position,
1144 : // into a string
1145 442 : async fn read_string(&mut self) -> Result<String, Error> {
1146 442 : use std::io::Read;
1147 442 : let mut buf = String::new();
1148 442 : match self {
1149 224 : MaybeVirtualFile::VirtualFile(file) => {
1150 224 : let mut buf = Vec::new();
1151 226 : file.read_to_end(&mut buf).await?;
1152 222 : return Ok(String::from_utf8(buf).unwrap());
1153 : }
1154 218 : MaybeVirtualFile::File(file) => {
1155 218 : file.read_to_string(&mut buf)?;
1156 : }
1157 : }
1158 216 : Ok(buf)
1159 442 : }
1160 :
1161 : // Helper function to slurp a portion of a file into a string
1162 404 : async fn read_string_at(&mut self, pos: u64, len: usize) -> Result<String, Error> {
1163 404 : let buf = vec![0; len];
1164 404 : let buf = self.read_exact_at(buf, pos).await?;
1165 404 : Ok(String::from_utf8(buf).unwrap())
1166 404 : }
1167 : }
1168 :
1169 2 : #[tokio::test]
1170 2 : async fn test_virtual_files() -> anyhow::Result<()> {
1171 2 : // The real work is done in the test_files() helper function. This
1172 2 : // allows us to run the same set of tests against a native File, and
1173 2 : // VirtualFile. We trust the native Files and wouldn't need to test them,
1174 2 : // but this allows us to verify that the operations return the same
1175 2 : // results with VirtualFiles as with native Files. (Except that with
1176 2 : // native files, you will run out of file descriptors if the ulimit
1177 2 : // is low enough.)
1178 206 : test_files("virtual_files", |path, open_options| async move {
1179 206 : let vf = VirtualFile::open_with_options(&path, &open_options).await?;
1180 206 : Ok(MaybeVirtualFile::VirtualFile(vf))
1181 412 : })
1182 527 : .await
1183 2 : }
1184 :
1185 2 : #[tokio::test]
1186 2 : async fn test_physical_files() -> anyhow::Result<()> {
1187 206 : test_files("physical_files", |path, open_options| async move {
1188 206 : Ok(MaybeVirtualFile::File({
1189 206 : let owned_fd = open_options.open(path.as_std_path()).await?;
1190 206 : File::from(owned_fd)
1191 2 : }))
1192 412 : })
1193 104 : .await
1194 2 : }
1195 :
1196 4 : async fn test_files<OF, FT>(testname: &str, openfunc: OF) -> anyhow::Result<()>
1197 4 : where
1198 4 : OF: Fn(Utf8PathBuf, OpenOptions) -> FT,
1199 4 : FT: Future<Output = Result<MaybeVirtualFile, std::io::Error>>,
1200 4 : {
1201 4 : let testdir = crate::config::PageServerConf::test_repo_dir(testname);
1202 4 : std::fs::create_dir_all(&testdir)?;
1203 :
1204 4 : let path_a = testdir.join("file_a");
1205 4 : let mut file_a = openfunc(
1206 4 : path_a.clone(),
1207 4 : OpenOptions::new()
1208 4 : .write(true)
1209 4 : .create(true)
1210 4 : .truncate(true)
1211 4 : .to_owned(),
1212 4 : )
1213 4 : .await?;
1214 4 : file_a.write_all(b"foobar".to_vec()).await?;
1215 :
1216 : // cannot read from a file opened in write-only mode
1217 4 : let _ = file_a.read_string().await.unwrap_err();
1218 :
1219 : // Close the file and re-open for reading
1220 4 : let mut file_a = openfunc(path_a, OpenOptions::new().read(true).to_owned()).await?;
1221 :
1222 : // cannot write to a file opened in read-only mode
1223 4 : let _ = file_a.write_all(b"bar".to_vec()).await.unwrap_err();
1224 4 :
1225 4 : // Try simple read
1226 4 : assert_eq!("foobar", file_a.read_string().await?);
1227 :
1228 : // It's positioned at the EOF now.
1229 4 : assert_eq!("", file_a.read_string().await?);
1230 :
1231 : // Test seeks.
1232 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1233 4 : assert_eq!("oobar", file_a.read_string().await?);
1234 :
1235 4 : assert_eq!(file_a.seek(SeekFrom::End(-2)).await?, 4);
1236 4 : assert_eq!("ar", file_a.read_string().await?);
1237 :
1238 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1239 4 : assert_eq!(file_a.seek(SeekFrom::Current(2)).await?, 3);
1240 4 : assert_eq!("bar", file_a.read_string().await?);
1241 :
1242 4 : assert_eq!(file_a.seek(SeekFrom::Current(-5)).await?, 1);
1243 4 : assert_eq!("oobar", file_a.read_string().await?);
1244 :
1245 : // Test erroneous seeks to before byte 0
1246 4 : file_a.seek(SeekFrom::End(-7)).await.unwrap_err();
1247 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1248 4 : file_a.seek(SeekFrom::Current(-2)).await.unwrap_err();
1249 4 :
1250 4 : // the erroneous seek should have left the position unchanged
1251 4 : assert_eq!("oobar", file_a.read_string().await?);
1252 :
1253 : // Create another test file, and try FileExt functions on it.
1254 4 : let path_b = testdir.join("file_b");
1255 4 : let mut file_b = openfunc(
1256 4 : path_b.clone(),
1257 4 : OpenOptions::new()
1258 4 : .read(true)
1259 4 : .write(true)
1260 4 : .create(true)
1261 4 : .truncate(true)
1262 4 : .to_owned(),
1263 4 : )
1264 2 : .await?;
1265 4 : file_b.write_all_at(b"BAR".to_vec(), 3).await?;
1266 4 : file_b.write_all_at(b"FOO".to_vec(), 0).await?;
1267 :
1268 4 : assert_eq!(file_b.read_string_at(2, 3).await?, "OBA");
1269 :
1270 : // Open a lot of files, enough to cause some evictions. (Or to be precise,
1271 : // open the same file many times. The effect is the same.)
1272 : //
1273 : // leave file_a positioned at offset 1 before we start
1274 4 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1275 :
1276 4 : let mut vfiles = Vec::new();
1277 404 : for _ in 0..100 {
1278 400 : let mut vfile =
1279 400 : openfunc(path_b.clone(), OpenOptions::new().read(true).to_owned()).await?;
1280 400 : assert_eq!("FOOBAR", vfile.read_string().await?);
1281 400 : vfiles.push(vfile);
1282 : }
1283 :
1284 : // make sure we opened enough files to definitely cause evictions.
1285 4 : assert!(vfiles.len() > TEST_MAX_FILE_DESCRIPTORS * 2);
1286 :
1287 : // The underlying file descriptor for 'file_a' should be closed now. Try to read
1288 : // from it again. We left the file positioned at offset 1 above.
1289 4 : assert_eq!("oobar", file_a.read_string().await?);
1290 :
1291 : // Check that all the other FDs still work too. Use them in random order for
1292 : // good measure.
1293 4 : vfiles.as_mut_slice().shuffle(&mut thread_rng());
1294 400 : for vfile in vfiles.iter_mut() {
1295 400 : assert_eq!("OOBAR", vfile.read_string_at(1, 5).await?);
1296 : }
1297 :
1298 4 : Ok(())
1299 4 : }
1300 :
1301 : /// Test using VirtualFiles from many threads concurrently. This tests both using
1302 : /// a lot of VirtualFiles concurrently, causing evictions, and also using the same
1303 : /// VirtualFile from multiple threads concurrently.
1304 2 : #[tokio::test]
1305 2 : async fn test_vfile_concurrency() -> Result<(), Error> {
1306 2 : const SIZE: usize = 8 * 1024;
1307 2 : const VIRTUAL_FILES: usize = 100;
1308 2 : const THREADS: usize = 100;
1309 2 : const SAMPLE: [u8; SIZE] = [0xADu8; SIZE];
1310 2 :
1311 2 : let testdir = crate::config::PageServerConf::test_repo_dir("vfile_concurrency");
1312 2 : std::fs::create_dir_all(&testdir)?;
1313 2 :
1314 2 : // Create a test file.
1315 2 : let test_file_path = testdir.join("concurrency_test_file");
1316 2 : {
1317 2 : let file = File::create(&test_file_path)?;
1318 2 : file.write_all_at(&SAMPLE, 0)?;
1319 2 : }
1320 2 :
1321 2 : // Open the file many times.
1322 2 : let mut files = Vec::new();
1323 202 : for _ in 0..VIRTUAL_FILES {
1324 200 : let f = VirtualFile::open_with_options(&test_file_path, OpenOptions::new().read(true))
1325 101 : .await?;
1326 200 : files.push(f);
1327 2 : }
1328 2 : let files = Arc::new(files);
1329 2 :
1330 2 : // Launch many threads, and use the virtual files concurrently in random order.
1331 2 : let rt = tokio::runtime::Builder::new_multi_thread()
1332 2 : .worker_threads(THREADS)
1333 2 : .thread_name("test_vfile_concurrency thread")
1334 2 : .build()
1335 2 : .unwrap();
1336 2 : let mut hdls = Vec::new();
1337 202 : for _threadno in 0..THREADS {
1338 200 : let files = files.clone();
1339 200 : let hdl = rt.spawn(async move {
1340 200 : let mut buf = vec![0u8; SIZE];
1341 200 : let mut rng = rand::rngs::OsRng;
1342 200000 : for _ in 1..1000 {
1343 199800 : let f = &files[rng.gen_range(0..files.len())];
1344 646306 : buf = f.read_exact_at(buf, 0).await.unwrap();
1345 199800 : assert!(buf == SAMPLE);
1346 2 : }
1347 200 : });
1348 200 : hdls.push(hdl);
1349 200 : }
1350 202 : for hdl in hdls {
1351 200 : hdl.await?;
1352 2 : }
1353 2 : std::mem::forget(rt);
1354 2 :
1355 2 : Ok(())
1356 2 : }
1357 :
1358 2 : #[tokio::test]
1359 2 : async fn test_atomic_overwrite_basic() {
1360 2 : let testdir = crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_basic");
1361 2 : std::fs::create_dir_all(&testdir).unwrap();
1362 2 :
1363 2 : let path = testdir.join("myfile");
1364 2 : let tmp_path = testdir.join("myfile.tmp");
1365 2 :
1366 2 : VirtualFile::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1367 2 : .await
1368 2 : .unwrap();
1369 2 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
1370 2 : let post = file.read_string().await.unwrap();
1371 2 : assert_eq!(post, "foo");
1372 2 : assert!(!tmp_path.exists());
1373 2 : drop(file);
1374 2 :
1375 2 : VirtualFile::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"bar".to_vec())
1376 2 : .await
1377 2 : .unwrap();
1378 2 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
1379 2 : let post = file.read_string().await.unwrap();
1380 2 : assert_eq!(post, "bar");
1381 2 : assert!(!tmp_path.exists());
1382 2 : drop(file);
1383 2 : }
1384 :
1385 2 : #[tokio::test]
1386 2 : async fn test_atomic_overwrite_preexisting_tmp() {
1387 2 : let testdir =
1388 2 : crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_preexisting_tmp");
1389 2 : std::fs::create_dir_all(&testdir).unwrap();
1390 2 :
1391 2 : let path = testdir.join("myfile");
1392 2 : let tmp_path = testdir.join("myfile.tmp");
1393 2 :
1394 2 : std::fs::write(&tmp_path, "some preexisting junk that should be removed").unwrap();
1395 2 : assert!(tmp_path.exists());
1396 2 :
1397 2 : VirtualFile::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1398 2 : .await
1399 2 : .unwrap();
1400 2 :
1401 2 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path).await.unwrap());
1402 2 : let post = file.read_string().await.unwrap();
1403 2 : assert_eq!(post, "foo");
1404 2 : assert!(!tmp_path.exists());
1405 2 : drop(file);
1406 2 : }
1407 : }
|