Line data Source code
1 : //! VirtualFile is like a normal File, but it's not bound directly to
2 : //! a file descriptor.
3 : //!
4 : //! Instead, the file is opened when it's read from,
5 : //! and if too many files are open globally in the system, least-recently
6 : //! used ones are closed.
7 : //!
8 : //! To track which files have been recently used, we use the clock algorithm
9 : //! with a 'recently_used' flag on each slot.
10 : //!
11 : //! This is similar to PostgreSQL's virtual file descriptor facility in
12 : //! src/backend/storage/file/fd.c
13 : //!
14 : use std::fs::File;
15 : use std::io::{Error, ErrorKind, Seek, SeekFrom};
16 : use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
17 : #[cfg(target_os = "linux")]
18 : use std::os::unix::fs::OpenOptionsExt;
19 : use std::sync::atomic::{AtomicBool, AtomicU8, AtomicUsize, Ordering};
20 :
21 : use camino::{Utf8Path, Utf8PathBuf};
22 : use once_cell::sync::OnceCell;
23 : use owned_buffers_io::aligned_buffer::buffer::AlignedBuffer;
24 : use owned_buffers_io::aligned_buffer::{AlignedBufferMut, AlignedSlice, ConstAlign};
25 : use owned_buffers_io::io_buf_aligned::{IoBufAligned, IoBufAlignedMut};
26 : use owned_buffers_io::io_buf_ext::FullSlice;
27 : use pageserver_api::config::defaults::DEFAULT_IO_BUFFER_ALIGNMENT;
28 : pub use pageserver_api::models::virtual_file as api;
29 : use pageserver_api::shard::TenantShardId;
30 : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
31 : use tokio::time::Instant;
32 : use tokio_epoll_uring::{BoundedBuf, IoBuf, IoBufMut, Slice};
33 :
34 : use crate::context::RequestContext;
35 : use crate::metrics::{STORAGE_IO_SIZE, STORAGE_IO_TIME_METRIC, StorageIoOperation};
36 : use crate::page_cache::{PAGE_SZ, PageWriteGuard};
37 : use crate::tenant::TENANTS_SEGMENT_NAME;
38 : pub(crate) mod io_engine;
39 : pub use io_engine::{
40 : FeatureTestResult as IoEngineFeatureTestResult, feature_test as io_engine_feature_test,
41 : io_engine_for_bench,
42 : };
43 : mod metadata;
44 : mod open_options;
45 : pub(crate) use api::IoMode;
46 : pub(crate) use io_engine::IoEngineKind;
47 : pub(crate) use metadata::Metadata;
48 : pub(crate) use open_options::*;
49 :
50 : use self::owned_buffers_io::write::OwnedAsyncWriter;
51 :
52 : pub(crate) mod owned_buffers_io {
53 : //! Abstractions for IO with owned buffers.
54 : //!
55 : //! Not actually tied to [`crate::virtual_file`] specifically, but, it's the primary
56 : //! reason we need this abstraction.
57 : //!
58 : //! Over time, this could move into the `tokio-epoll-uring` crate, maybe `uring-common`,
59 : //! but for the time being we're proving out the primitives in the neon.git repo
60 : //! for faster iteration.
61 :
62 : pub(crate) mod aligned_buffer;
63 : pub(crate) mod io_buf_aligned;
64 : pub(crate) mod io_buf_ext;
65 : pub(crate) mod slice;
66 : pub(crate) mod write;
67 : }
68 :
69 : #[derive(Debug)]
70 : pub struct VirtualFile {
71 : inner: VirtualFileInner,
72 : _mode: IoMode,
73 : }
74 :
75 : impl VirtualFile {
76 : /// Open a file in read-only mode. Like File::open.
77 2112 : pub async fn open<P: AsRef<Utf8Path>>(
78 2112 : path: P,
79 2112 : ctx: &RequestContext,
80 2112 : ) -> Result<Self, std::io::Error> {
81 2112 : let inner = VirtualFileInner::open(path, ctx).await?;
82 2112 : Ok(VirtualFile {
83 2112 : inner,
84 2112 : _mode: IoMode::Buffered,
85 2112 : })
86 2112 : }
87 :
88 : /// Open a file in read-only mode. Like File::open.
89 : ///
90 : /// `O_DIRECT` will be enabled base on `virtual_file_io_mode`.
91 2464 : pub async fn open_v2<P: AsRef<Utf8Path>>(
92 2464 : path: P,
93 2464 : ctx: &RequestContext,
94 2464 : ) -> Result<Self, std::io::Error> {
95 2464 : Self::open_with_options_v2(path.as_ref(), OpenOptions::new().read(true), ctx).await
96 2464 : }
97 :
98 3030 : pub async fn create<P: AsRef<Utf8Path>>(
99 3030 : path: P,
100 3030 : ctx: &RequestContext,
101 3030 : ) -> Result<Self, std::io::Error> {
102 3030 : let inner = VirtualFileInner::create(path, ctx).await?;
103 3030 : Ok(VirtualFile {
104 3030 : inner,
105 3030 : _mode: IoMode::Buffered,
106 3030 : })
107 3030 : }
108 :
109 0 : pub async fn create_v2<P: AsRef<Utf8Path>>(
110 0 : path: P,
111 0 : ctx: &RequestContext,
112 0 : ) -> Result<Self, std::io::Error> {
113 0 : VirtualFile::open_with_options_v2(
114 0 : path.as_ref(),
115 0 : OpenOptions::new().write(true).create(true).truncate(true),
116 0 : ctx,
117 0 : )
118 0 : .await
119 0 : }
120 :
121 1616 : pub async fn open_with_options<P: AsRef<Utf8Path>>(
122 1616 : path: P,
123 1616 : open_options: &OpenOptions,
124 1616 : ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
125 1616 : ) -> Result<Self, std::io::Error> {
126 1616 : let inner = VirtualFileInner::open_with_options(path, open_options, ctx).await?;
127 1616 : Ok(VirtualFile {
128 1616 : inner,
129 1616 : _mode: IoMode::Buffered,
130 1616 : })
131 1616 : }
132 :
133 5096 : pub async fn open_with_options_v2<P: AsRef<Utf8Path>>(
134 5096 : path: P,
135 5096 : open_options: &OpenOptions,
136 5096 : ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
137 5096 : ) -> Result<Self, std::io::Error> {
138 5096 : let file = match get_io_mode() {
139 : IoMode::Buffered => {
140 5096 : let inner = VirtualFileInner::open_with_options(path, open_options, ctx).await?;
141 5096 : VirtualFile {
142 5096 : inner,
143 5096 : _mode: IoMode::Buffered,
144 5096 : }
145 : }
146 : #[cfg(target_os = "linux")]
147 : IoMode::Direct => {
148 0 : let inner = VirtualFileInner::open_with_options(
149 0 : path,
150 0 : open_options.clone().custom_flags(nix::libc::O_DIRECT),
151 0 : ctx,
152 0 : )
153 0 : .await?;
154 0 : VirtualFile {
155 0 : inner,
156 0 : _mode: IoMode::Direct,
157 0 : }
158 : }
159 : };
160 5096 : Ok(file)
161 5096 : }
162 :
163 2388 : pub fn path(&self) -> &Utf8Path {
164 2388 : self.inner.path.as_path()
165 2388 : }
166 :
167 44 : pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>(
168 44 : final_path: Utf8PathBuf,
169 44 : tmp_path: Utf8PathBuf,
170 44 : content: B,
171 44 : ) -> std::io::Result<()> {
172 44 : VirtualFileInner::crashsafe_overwrite(final_path, tmp_path, content).await
173 44 : }
174 :
175 5650 : pub async fn sync_all(&self) -> Result<(), Error> {
176 5650 : if SYNC_MODE.load(std::sync::atomic::Ordering::Relaxed) == SyncMode::UnsafeNoSync as u8 {
177 0 : return Ok(());
178 5650 : }
179 5650 : self.inner.sync_all().await
180 5650 : }
181 :
182 0 : pub async fn sync_data(&self) -> Result<(), Error> {
183 0 : if SYNC_MODE.load(std::sync::atomic::Ordering::Relaxed) == SyncMode::UnsafeNoSync as u8 {
184 0 : return Ok(());
185 0 : }
186 0 : self.inner.sync_data().await
187 0 : }
188 :
189 3620 : pub async fn metadata(&self) -> Result<Metadata, Error> {
190 3620 : self.inner.metadata().await
191 3620 : }
192 :
193 524 : pub fn remove(self) {
194 524 : self.inner.remove();
195 524 : }
196 :
197 11408 : pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
198 11408 : self.inner.seek(pos).await
199 11408 : }
200 :
201 461652 : pub async fn read_exact_at<Buf>(
202 461652 : &self,
203 461652 : slice: Slice<Buf>,
204 461652 : offset: u64,
205 461652 : ctx: &RequestContext,
206 461652 : ) -> Result<Slice<Buf>, Error>
207 461652 : where
208 461652 : Buf: IoBufAlignedMut + Send,
209 461652 : {
210 461652 : self.inner.read_exact_at(slice, offset, ctx).await
211 461652 : }
212 :
213 63578 : pub async fn read_exact_at_page(
214 63578 : &self,
215 63578 : page: PageWriteGuard<'static>,
216 63578 : offset: u64,
217 63578 : ctx: &RequestContext,
218 63578 : ) -> Result<PageWriteGuard<'static>, Error> {
219 63578 : self.inner.read_exact_at_page(page, offset, ctx).await
220 63578 : }
221 :
222 13222 : pub async fn write_all_at<Buf: IoBufAligned + Send>(
223 13222 : &self,
224 13222 : buf: FullSlice<Buf>,
225 13222 : offset: u64,
226 13222 : ctx: &RequestContext,
227 13222 : ) -> (FullSlice<Buf>, Result<(), Error>) {
228 13222 : self.inner.write_all_at(buf, offset, ctx).await
229 13222 : }
230 :
231 2261038 : pub async fn write_all<Buf: IoBuf + Send>(
232 2261038 : &mut self,
233 2261038 : buf: FullSlice<Buf>,
234 2261038 : ctx: &RequestContext,
235 2261038 : ) -> (FullSlice<Buf>, Result<usize, Error>) {
236 2261038 : self.inner.write_all(buf, ctx).await
237 2261038 : }
238 :
239 448 : async fn read_to_end(&mut self, buf: &mut Vec<u8>, ctx: &RequestContext) -> Result<(), Error> {
240 448 : self.inner.read_to_end(buf, ctx).await
241 448 : }
242 :
243 0 : pub(crate) async fn read_to_string(
244 0 : &mut self,
245 0 : ctx: &RequestContext,
246 0 : ) -> Result<String, anyhow::Error> {
247 0 : let mut buf = Vec::new();
248 0 : self.read_to_end(&mut buf, ctx).await?;
249 0 : Ok(String::from_utf8(buf)?)
250 0 : }
251 : }
252 :
253 : /// Indicates whether to enable fsync, fdatasync, or O_SYNC/O_DSYNC when writing
254 : /// files. Switching this off is unsafe and only used for testing on machines
255 : /// with slow drives.
256 : #[repr(u8)]
257 : pub enum SyncMode {
258 : Sync,
259 : UnsafeNoSync,
260 : }
261 :
262 : impl TryFrom<u8> for SyncMode {
263 : type Error = u8;
264 :
265 0 : fn try_from(value: u8) -> Result<Self, Self::Error> {
266 0 : Ok(match value {
267 0 : v if v == (SyncMode::Sync as u8) => SyncMode::Sync,
268 0 : v if v == (SyncMode::UnsafeNoSync as u8) => SyncMode::UnsafeNoSync,
269 0 : x => return Err(x),
270 : })
271 0 : }
272 : }
273 :
274 : ///
275 : /// A virtual file descriptor. You can use this just like std::fs::File, but internally
276 : /// the underlying file is closed if the system is low on file descriptors,
277 : /// and re-opened when it's accessed again.
278 : ///
279 : /// Like with std::fs::File, multiple threads can read/write the file concurrently,
280 : /// holding just a shared reference the same VirtualFile, using the read_at() / write_at()
281 : /// functions from the FileExt trait. But the functions from the Read/Write/Seek traits
282 : /// require a mutable reference, because they modify the "current position".
283 : ///
284 : /// Each VirtualFile has a physical file descriptor in the global OPEN_FILES array, at the
285 : /// slot that 'handle points to, if the underlying file is currently open. If it's not
286 : /// currently open, the 'handle' can still point to the slot where it was last kept. The
287 : /// 'tag' field is used to detect whether the handle still is valid or not.
288 : ///
289 : #[derive(Debug)]
290 : pub struct VirtualFileInner {
291 : /// Lazy handle to the global file descriptor cache. The slot that this points to
292 : /// might contain our File, or it may be empty, or it may contain a File that
293 : /// belongs to a different VirtualFile.
294 : handle: RwLock<SlotHandle>,
295 :
296 : /// Current file position
297 : pos: u64,
298 :
299 : /// File path and options to use to open it.
300 : ///
301 : /// Note: this only contains the options needed to re-open it. For example,
302 : /// if a new file is created, we only pass the create flag when it's initially
303 : /// opened, in the VirtualFile::create() function, and strip the flag before
304 : /// storing it here.
305 : pub path: Utf8PathBuf,
306 : open_options: OpenOptions,
307 :
308 : // These are strings becase we only use them for metrics, and those expect strings.
309 : // It makes no sense for us to constantly turn the `TimelineId` and `TenantId` into
310 : // strings.
311 : tenant_id: String,
312 : shard_id: String,
313 : timeline_id: String,
314 : }
315 :
316 : #[derive(Debug, PartialEq, Clone, Copy)]
317 : struct SlotHandle {
318 : /// Index into OPEN_FILES.slots
319 : index: usize,
320 :
321 : /// Value of 'tag' in the slot. If slot's tag doesn't match, then the slot has
322 : /// been recycled and no longer contains the FD for this virtual file.
323 : tag: u64,
324 : }
325 :
326 : /// OPEN_FILES is the global array that holds the physical file descriptors that
327 : /// are currently open. Each slot in the array is protected by a separate lock,
328 : /// so that different files can be accessed independently. The lock must be held
329 : /// in write mode to replace the slot with a different file, but a read mode
330 : /// is enough to operate on the file, whether you're reading or writing to it.
331 : ///
332 : /// OPEN_FILES starts in uninitialized state, and it's initialized by
333 : /// the virtual_file::init() function. It must be called exactly once at page
334 : /// server startup.
335 : static OPEN_FILES: OnceCell<OpenFiles> = OnceCell::new();
336 :
337 : struct OpenFiles {
338 : slots: &'static [Slot],
339 :
340 : /// clock arm for the clock algorithm
341 : next: AtomicUsize,
342 : }
343 :
344 : struct Slot {
345 : inner: RwLock<SlotInner>,
346 :
347 : /// has this file been used since last clock sweep?
348 : recently_used: AtomicBool,
349 : }
350 :
351 : struct SlotInner {
352 : /// Counter that's incremented every time a different file is stored here.
353 : /// To avoid the ABA problem.
354 : tag: u64,
355 :
356 : /// the underlying file
357 : file: Option<OwnedFd>,
358 : }
359 :
360 : /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`].
361 : struct PageWriteGuardBuf {
362 : page: PageWriteGuard<'static>,
363 : }
364 : // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot,
365 : // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved.
366 : // Page cache pages are zero-initialized, so, wrt uninitialized memory we're good.
367 : // (Page cache tracks separately whether the contents are valid, see `PageWriteGuard::mark_valid`.)
368 : unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf {
369 254410 : fn stable_ptr(&self) -> *const u8 {
370 254410 : self.page.as_ptr()
371 254410 : }
372 476982 : fn bytes_init(&self) -> usize {
373 476982 : self.page.len()
374 476982 : }
375 190734 : fn bytes_total(&self) -> usize {
376 190734 : self.page.len()
377 190734 : }
378 : }
379 : // Safety: see above, plus: the ownership of [`PageWriteGuard`] means exclusive access,
380 : // hence it's safe to hand out the `stable_mut_ptr()`.
381 : unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf {
382 95416 : fn stable_mut_ptr(&mut self) -> *mut u8 {
383 95416 : self.page.as_mut_ptr()
384 95416 : }
385 :
386 63578 : unsafe fn set_init(&mut self, pos: usize) {
387 63578 : // There shouldn't really be any reason to call this API since bytes_init() == bytes_total().
388 63578 : assert!(pos <= self.page.len());
389 63578 : }
390 : }
391 :
392 : impl OpenFiles {
393 : /// Find a slot to use, evicting an existing file descriptor if needed.
394 : ///
395 : /// On return, we hold a lock on the slot, and its 'tag' has been updated
396 : /// recently_used has been set. It's all ready for reuse.
397 387547 : async fn find_victim_slot(&self) -> (SlotHandle, RwLockWriteGuard<SlotInner>) {
398 387547 : //
399 387547 : // Run the clock algorithm to find a slot to replace.
400 387547 : //
401 387547 : let num_slots = self.slots.len();
402 387547 : let mut retries = 0;
403 : let mut slot;
404 : let mut slot_guard;
405 : let index;
406 : loop {
407 4794069 : let next = self.next.fetch_add(1, Ordering::AcqRel) % num_slots;
408 4794069 : slot = &self.slots[next];
409 4794069 :
410 4794069 : // If the recently_used flag on this slot is set, continue the clock
411 4794069 : // sweep. Otherwise try to use this slot. If we cannot acquire the
412 4794069 : // lock, also continue the clock sweep.
413 4794069 : //
414 4794069 : // We only continue in this manner for a while, though. If we loop
415 4794069 : // through the array twice without finding a victim, just pick the
416 4794069 : // next slot and wait until we can reuse it. This way, we avoid
417 4794069 : // spinning in the extreme case that all the slots are busy with an
418 4794069 : // I/O operation.
419 4794069 : if retries < num_slots * 2 {
420 4608995 : if !slot.recently_used.swap(false, Ordering::Release) {
421 4194077 : if let Ok(guard) = slot.inner.try_write() {
422 202473 : slot_guard = guard;
423 202473 : index = next;
424 202473 : break;
425 3991604 : }
426 414918 : }
427 4406522 : retries += 1;
428 : } else {
429 185074 : slot_guard = slot.inner.write().await;
430 185074 : index = next;
431 185074 : break;
432 : }
433 : }
434 :
435 : //
436 : // We now have the victim slot locked. If it was in use previously, close the
437 : // old file.
438 : //
439 387547 : if let Some(old_file) = slot_guard.file.take() {
440 377533 : // the normal path of dropping VirtualFile uses "close", use "close-by-replace" here to
441 377533 : // distinguish the two.
442 377533 : STORAGE_IO_TIME_METRIC
443 377533 : .get(StorageIoOperation::CloseByReplace)
444 377533 : .observe_closure_duration(|| drop(old_file));
445 377533 : }
446 :
447 : // Prepare the slot for reuse and return it
448 387547 : slot_guard.tag += 1;
449 387547 : slot.recently_used.store(true, Ordering::Relaxed);
450 387547 : (
451 387547 : SlotHandle {
452 387547 : index,
453 387547 : tag: slot_guard.tag,
454 387547 : },
455 387547 : slot_guard,
456 387547 : )
457 387547 : }
458 : }
459 :
460 : /// Identify error types that should alwways terminate the process. Other
461 : /// error types may be elegible for retry.
462 8 : pub(crate) fn is_fatal_io_error(e: &std::io::Error) -> bool {
463 : use nix::errno::Errno::*;
464 8 : match e.raw_os_error().map(nix::errno::from_i32) {
465 : Some(EIO) => {
466 : // Terminate on EIO because we no longer trust the device to store
467 : // data safely, or to uphold persistence guarantees on fsync.
468 0 : true
469 : }
470 : Some(EROFS) => {
471 : // Terminate on EROFS because a filesystem is usually remounted
472 : // readonly when it has experienced some critical issue, so the same
473 : // logic as EIO applies.
474 0 : true
475 : }
476 : Some(EACCES) => {
477 : // Terminate on EACCESS because we should always have permissions
478 : // for our own data dir: if we don't, then we can't do our job and
479 : // need administrative intervention to fix permissions. Terminating
480 : // is the best way to make sure we stop cleanly rather than going
481 : // into infinite retry loops, and will make it clear to the outside
482 : // world that we need help.
483 0 : true
484 : }
485 : _ => {
486 : // Treat all other local file I/O errors are retryable. This includes:
487 : // - ENOSPC: we stay up and wait for eviction to free some space
488 : // - EINVAL, EBADF, EBADFD: this is a code bug, not a filesystem/hardware issue
489 : // - WriteZero, Interrupted: these are used internally VirtualFile
490 8 : false
491 : }
492 : }
493 8 : }
494 :
495 : /// Call this when the local filesystem gives us an error with an external
496 : /// cause: this includes EIO, EROFS, and EACCESS: all these indicate either
497 : /// bad storage or bad configuration, and we can't fix that from inside
498 : /// a running process.
499 0 : pub(crate) fn on_fatal_io_error(e: &std::io::Error, context: &str) -> ! {
500 0 : let backtrace = std::backtrace::Backtrace::force_capture();
501 0 : tracing::error!("Fatal I/O error: {e}: {context})\n{backtrace}");
502 0 : std::process::abort();
503 : }
504 :
505 : pub(crate) trait MaybeFatalIo<T> {
506 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T>;
507 : fn fatal_err(self, context: &str) -> T;
508 : }
509 :
510 : impl<T> MaybeFatalIo<T> for std::io::Result<T> {
511 : /// Terminate the process if the result is an error of a fatal type, else pass it through
512 : ///
513 : /// This is appropriate for writes, where we typically want to die on EIO/ACCES etc, but
514 : /// not on ENOSPC.
515 4218692 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T> {
516 4218692 : if let Err(e) = &self {
517 8 : if is_fatal_io_error(e) {
518 0 : on_fatal_io_error(e, context);
519 8 : }
520 4218684 : }
521 4218692 : self
522 4218692 : }
523 :
524 : /// Terminate the process on any I/O error.
525 : ///
526 : /// This is appropriate for reads on files that we know exist: they should always work.
527 4116 : fn fatal_err(self, context: &str) -> T {
528 4116 : match self {
529 4116 : Ok(v) => v,
530 0 : Err(e) => {
531 0 : on_fatal_io_error(&e, context);
532 : }
533 : }
534 4116 : }
535 : }
536 :
537 : /// Observe duration for the given storage I/O operation
538 : ///
539 : /// Unlike `observe_closure_duration`, this supports async,
540 : /// where "support" means that we measure wall clock time.
541 : macro_rules! observe_duration {
542 : ($op:expr, $($body:tt)*) => {{
543 : let instant = Instant::now();
544 : let result = $($body)*;
545 : let elapsed = instant.elapsed().as_secs_f64();
546 : STORAGE_IO_TIME_METRIC
547 : .get($op)
548 : .observe(elapsed);
549 : result
550 : }}
551 : }
552 :
553 : macro_rules! with_file {
554 : ($this:expr, $op:expr, | $ident:ident | $($body:tt)*) => {{
555 : let $ident = $this.lock_file().await?;
556 : observe_duration!($op, $($body)*)
557 : }};
558 : ($this:expr, $op:expr, | mut $ident:ident | $($body:tt)*) => {{
559 : let mut $ident = $this.lock_file().await?;
560 : observe_duration!($op, $($body)*)
561 : }};
562 : }
563 :
564 : impl VirtualFileInner {
565 : /// Open a file in read-only mode. Like File::open.
566 2112 : pub async fn open<P: AsRef<Utf8Path>>(
567 2112 : path: P,
568 2112 : ctx: &RequestContext,
569 2112 : ) -> Result<VirtualFileInner, std::io::Error> {
570 2112 : Self::open_with_options(path.as_ref(), OpenOptions::new().read(true), ctx).await
571 2112 : }
572 :
573 : /// Create a new file for writing. If the file exists, it will be truncated.
574 : /// Like File::create.
575 3030 : pub async fn create<P: AsRef<Utf8Path>>(
576 3030 : path: P,
577 3030 : ctx: &RequestContext,
578 3030 : ) -> Result<VirtualFileInner, std::io::Error> {
579 3030 : Self::open_with_options(
580 3030 : path.as_ref(),
581 3030 : OpenOptions::new().write(true).create(true).truncate(true),
582 3030 : ctx,
583 3030 : )
584 3030 : .await
585 3030 : }
586 :
587 : /// Open a file with given options.
588 : ///
589 : /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt,
590 : /// they will be applied also when the file is subsequently re-opened, not only
591 : /// on the first time. Make sure that's sane!
592 12254 : pub async fn open_with_options<P: AsRef<Utf8Path>>(
593 12254 : path: P,
594 12254 : open_options: &OpenOptions,
595 12254 : _ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
596 12254 : ) -> Result<VirtualFileInner, std::io::Error> {
597 12254 : let path_ref = path.as_ref();
598 12254 : let path_str = path_ref.to_string();
599 12254 : let parts = path_str.split('/').collect::<Vec<&str>>();
600 12254 : let (tenant_id, shard_id, timeline_id) =
601 12254 : if parts.len() > 5 && parts[parts.len() - 5] == TENANTS_SEGMENT_NAME {
602 9250 : let tenant_shard_part = parts[parts.len() - 4];
603 9250 : let (tenant_id, shard_id) = match tenant_shard_part.parse::<TenantShardId>() {
604 9250 : Ok(tenant_shard_id) => (
605 9250 : tenant_shard_id.tenant_id.to_string(),
606 9250 : format!("{}", tenant_shard_id.shard_slug()),
607 9250 : ),
608 : Err(_) => {
609 : // Malformed path: this ID is just for observability, so tolerate it
610 : // and pass through
611 0 : (tenant_shard_part.to_string(), "*".to_string())
612 : }
613 : };
614 9250 : (tenant_id, shard_id, parts[parts.len() - 2].to_string())
615 : } else {
616 3004 : ("*".to_string(), "*".to_string(), "*".to_string())
617 : };
618 12254 : let (handle, mut slot_guard) = get_open_files().find_victim_slot().await;
619 :
620 : // NB: there is also StorageIoOperation::OpenAfterReplace which is for the case
621 : // where our caller doesn't get to use the returned VirtualFile before its
622 : // slot gets re-used by someone else.
623 12254 : let file = observe_duration!(StorageIoOperation::Open, {
624 12254 : open_options.open(path_ref.as_std_path()).await?
625 : });
626 :
627 : // Strip all options other than read and write.
628 : //
629 : // It would perhaps be nicer to check just for the read and write flags
630 : // explicitly, but OpenOptions doesn't contain any functions to read flags,
631 : // only to set them.
632 12254 : let mut reopen_options = open_options.clone();
633 12254 : reopen_options.create(false);
634 12254 : reopen_options.create_new(false);
635 12254 : reopen_options.truncate(false);
636 12254 :
637 12254 : let vfile = VirtualFileInner {
638 12254 : handle: RwLock::new(handle),
639 12254 : pos: 0,
640 12254 : path: path_ref.to_path_buf(),
641 12254 : open_options: reopen_options,
642 12254 : tenant_id,
643 12254 : shard_id,
644 12254 : timeline_id,
645 12254 : };
646 12254 :
647 12254 : // TODO: Under pressure, it's likely the slot will get re-used and
648 12254 : // the underlying file closed before they get around to using it.
649 12254 : // => https://github.com/neondatabase/neon/issues/6065
650 12254 : slot_guard.file.replace(file);
651 12254 :
652 12254 : Ok(vfile)
653 12254 : }
654 :
655 : /// Async version of [`::utils::crashsafe::overwrite`].
656 : ///
657 : /// # NB:
658 : ///
659 : /// Doesn't actually use the [`VirtualFile`] file descriptor cache, but,
660 : /// it did at an earlier time.
661 : /// And it will use this module's [`io_engine`] in the near future, so, leaving it here.
662 56 : pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>(
663 56 : final_path: Utf8PathBuf,
664 56 : tmp_path: Utf8PathBuf,
665 56 : content: B,
666 56 : ) -> std::io::Result<()> {
667 56 : // TODO: use tokio_epoll_uring if configured as `io_engine`.
668 56 : // See https://github.com/neondatabase/neon/issues/6663
669 56 :
670 56 : tokio::task::spawn_blocking(move || {
671 56 : let slice_storage;
672 56 : let content_len = content.bytes_init();
673 56 : let content = if content.bytes_init() > 0 {
674 56 : slice_storage = Some(content.slice(0..content_len));
675 56 : slice_storage.as_deref().expect("just set it to Some()")
676 : } else {
677 0 : &[]
678 : };
679 56 : utils::crashsafe::overwrite(&final_path, &tmp_path, content)
680 56 : .maybe_fatal_err("crashsafe_overwrite")
681 56 : })
682 56 : .await
683 56 : .expect("blocking task is never aborted")
684 56 : }
685 :
686 : /// Call File::sync_all() on the underlying File.
687 5650 : pub async fn sync_all(&self) -> Result<(), Error> {
688 5650 : with_file!(self, StorageIoOperation::Fsync, |file_guard| {
689 5650 : let (_file_guard, res) = io_engine::get().sync_all(file_guard).await;
690 5650 : res.maybe_fatal_err("sync_all")
691 : })
692 5650 : }
693 :
694 : /// Call File::sync_data() on the underlying File.
695 0 : pub async fn sync_data(&self) -> Result<(), Error> {
696 0 : with_file!(self, StorageIoOperation::Fsync, |file_guard| {
697 0 : let (_file_guard, res) = io_engine::get().sync_data(file_guard).await;
698 0 : res.maybe_fatal_err("sync_data")
699 : })
700 0 : }
701 :
702 3620 : pub async fn metadata(&self) -> Result<Metadata, Error> {
703 3620 : with_file!(self, StorageIoOperation::Metadata, |file_guard| {
704 3620 : let (_file_guard, res) = io_engine::get().metadata(file_guard).await;
705 3620 : res
706 : })
707 3620 : }
708 :
709 : /// Helper function internal to `VirtualFile` that looks up the underlying File,
710 : /// opens it and evicts some other File if necessary. The passed parameter is
711 : /// assumed to be a function available for the physical `File`.
712 : ///
713 : /// We are doing it via a macro as Rust doesn't support async closures that
714 : /// take on parameters with lifetimes.
715 3251012 : async fn lock_file(&self) -> Result<FileGuard, Error> {
716 3251012 : let open_files = get_open_files();
717 :
718 375293 : let mut handle_guard = {
719 : // Read the cached slot handle, and see if the slot that it points to still
720 : // contains our File.
721 : //
722 : // We only need to hold the handle lock while we read the current handle. If
723 : // another thread closes the file and recycles the slot for a different file,
724 : // we will notice that the handle we read is no longer valid and retry.
725 3251012 : let mut handle = *self.handle.read().await;
726 : loop {
727 : // Check if the slot contains our File
728 : {
729 3447468 : let slot = &open_files.slots[handle.index];
730 3447468 : let slot_guard = slot.inner.read().await;
731 3447468 : if slot_guard.tag == handle.tag && slot_guard.file.is_some() {
732 : // Found a cached file descriptor.
733 2875719 : slot.recently_used.store(true, Ordering::Relaxed);
734 2875719 : return Ok(FileGuard { slot_guard });
735 571749 : }
736 : }
737 :
738 : // The slot didn't contain our File. We will have to open it ourselves,
739 : // but before that, grab a write lock on handle in the VirtualFile, so
740 : // that no other thread will try to concurrently open the same file.
741 571749 : let handle_guard = self.handle.write().await;
742 :
743 : // If another thread changed the handle while we were not holding the lock,
744 : // then the handle might now be valid again. Loop back to retry.
745 571749 : if *handle_guard != handle {
746 196456 : handle = *handle_guard;
747 196456 : continue;
748 375293 : }
749 375293 : break handle_guard;
750 : }
751 : };
752 :
753 : // We need to open the file ourselves. The handle in the VirtualFile is
754 : // now locked in write-mode. Find a free slot to put it in.
755 375293 : let (handle, mut slot_guard) = open_files.find_victim_slot().await;
756 :
757 : // Re-open the physical file.
758 : // NB: we use StorageIoOperation::OpenAferReplace for this to distinguish this
759 : // case from StorageIoOperation::Open. This helps with identifying thrashing
760 : // of the virtual file descriptor cache.
761 375293 : let file = observe_duration!(StorageIoOperation::OpenAfterReplace, {
762 375293 : self.open_options.open(self.path.as_std_path()).await?
763 : });
764 :
765 : // Store the File in the slot and update the handle in the VirtualFile
766 : // to point to it.
767 375293 : slot_guard.file.replace(file);
768 375293 :
769 375293 : *handle_guard = handle;
770 375293 :
771 375293 : Ok(FileGuard {
772 375293 : slot_guard: slot_guard.downgrade(),
773 375293 : })
774 3251012 : }
775 :
776 524 : pub fn remove(self) {
777 524 : let path = self.path.clone();
778 524 : drop(self);
779 524 : std::fs::remove_file(path).expect("failed to remove the virtual file");
780 524 : }
781 :
782 11408 : pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
783 11408 : match pos {
784 11388 : SeekFrom::Start(offset) => {
785 11388 : self.pos = offset;
786 11388 : }
787 8 : SeekFrom::End(offset) => {
788 8 : self.pos = with_file!(self, StorageIoOperation::Seek, |mut file_guard| file_guard
789 8 : .with_std_file_mut(|std_file| std_file.seek(SeekFrom::End(offset))))?
790 : }
791 12 : SeekFrom::Current(offset) => {
792 12 : let pos = self.pos as i128 + offset as i128;
793 12 : if pos < 0 {
794 4 : return Err(Error::new(
795 4 : ErrorKind::InvalidInput,
796 4 : "offset would be negative",
797 4 : ));
798 8 : }
799 8 : if pos > u64::MAX as i128 {
800 0 : return Err(Error::new(ErrorKind::InvalidInput, "offset overflow"));
801 8 : }
802 8 : self.pos = pos as u64;
803 : }
804 : }
805 11400 : Ok(self.pos)
806 11408 : }
807 :
808 : /// Read the file contents in range `offset..(offset + slice.bytes_total())` into `slice[0..slice.bytes_total()]`.
809 : ///
810 : /// The returned `Slice<Buf>` is equivalent to the input `slice`, i.e., it's the same view into the same buffer.
811 966662 : pub async fn read_exact_at<Buf>(
812 966662 : &self,
813 966662 : slice: Slice<Buf>,
814 966662 : offset: u64,
815 966662 : ctx: &RequestContext,
816 966662 : ) -> Result<Slice<Buf>, Error>
817 966662 : where
818 966662 : Buf: IoBufAlignedMut + Send,
819 966662 : {
820 966662 : let assert_we_return_original_bounds = if cfg!(debug_assertions) {
821 966662 : Some((slice.stable_ptr() as usize, slice.bytes_total()))
822 : } else {
823 0 : None
824 : };
825 :
826 966662 : let original_bounds = slice.bounds();
827 966662 : let (buf, res) =
828 966662 : read_exact_at_impl(slice, offset, |buf, offset| self.read_at(buf, offset, ctx)).await;
829 966662 : let res = res.map(|_| buf.slice(original_bounds));
830 :
831 966662 : if let Some(original_bounds) = assert_we_return_original_bounds {
832 966662 : if let Ok(slice) = &res {
833 966662 : let returned_bounds = (slice.stable_ptr() as usize, slice.bytes_total());
834 966662 : assert_eq!(original_bounds, returned_bounds);
835 0 : }
836 0 : }
837 :
838 966662 : res
839 966662 : }
840 :
841 : /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`].
842 63578 : pub async fn read_exact_at_page(
843 63578 : &self,
844 63578 : page: PageWriteGuard<'static>,
845 63578 : offset: u64,
846 63578 : ctx: &RequestContext,
847 63578 : ) -> Result<PageWriteGuard<'static>, Error> {
848 63578 : let buf = PageWriteGuardBuf { page }.slice_full();
849 63578 : debug_assert_eq!(buf.bytes_total(), PAGE_SZ);
850 63578 : self.read_exact_at(buf, offset, ctx)
851 63578 : .await
852 63578 : .map(|slice| slice.into_inner().page)
853 63578 : }
854 :
855 : // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235
856 13222 : pub async fn write_all_at<Buf: IoBuf + Send>(
857 13222 : &self,
858 13222 : buf: FullSlice<Buf>,
859 13222 : mut offset: u64,
860 13222 : ctx: &RequestContext,
861 13222 : ) -> (FullSlice<Buf>, Result<(), Error>) {
862 13222 : let buf = buf.into_raw_slice();
863 13222 : let bounds = buf.bounds();
864 13222 : let restore =
865 13222 : |buf: Slice<_>| FullSlice::must_new(Slice::from_buf_bounds(buf.into_inner(), bounds));
866 13222 : let mut buf = buf;
867 26444 : while !buf.is_empty() {
868 13222 : let (tmp, res) = self.write_at(FullSlice::must_new(buf), offset, ctx).await;
869 13222 : buf = tmp.into_raw_slice();
870 0 : match res {
871 : Ok(0) => {
872 0 : return (
873 0 : restore(buf),
874 0 : Err(Error::new(
875 0 : std::io::ErrorKind::WriteZero,
876 0 : "failed to write whole buffer",
877 0 : )),
878 0 : );
879 : }
880 13222 : Ok(n) => {
881 13222 : buf = buf.slice(n..);
882 13222 : offset += n as u64;
883 13222 : }
884 0 : Err(e) if e.kind() == std::io::ErrorKind::Interrupted => {}
885 0 : Err(e) => return (restore(buf), Err(e)),
886 : }
887 : }
888 13222 : (restore(buf), Ok(()))
889 13222 : }
890 :
891 : /// Writes `buf` to the file at the current offset.
892 : ///
893 : /// Panics if there is an uninitialized range in `buf`, as that is most likely a bug in the caller.
894 2261038 : pub async fn write_all<Buf: IoBuf + Send>(
895 2261038 : &mut self,
896 2261038 : buf: FullSlice<Buf>,
897 2261038 : ctx: &RequestContext,
898 2261038 : ) -> (FullSlice<Buf>, Result<usize, Error>) {
899 2261038 : let buf = buf.into_raw_slice();
900 2261038 : let bounds = buf.bounds();
901 2261038 : let restore =
902 2261038 : |buf: Slice<_>| FullSlice::must_new(Slice::from_buf_bounds(buf.into_inner(), bounds));
903 2261038 : let nbytes = buf.len();
904 2261038 : let mut buf = buf;
905 4521996 : while !buf.is_empty() {
906 2260962 : let (tmp, res) = self.write(FullSlice::must_new(buf), ctx).await;
907 2260962 : buf = tmp.into_raw_slice();
908 4 : match res {
909 : Ok(0) => {
910 0 : return (
911 0 : restore(buf),
912 0 : Err(Error::new(
913 0 : std::io::ErrorKind::WriteZero,
914 0 : "failed to write whole buffer",
915 0 : )),
916 0 : );
917 : }
918 2260958 : Ok(n) => {
919 2260958 : buf = buf.slice(n..);
920 2260958 : }
921 4 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
922 4 : Err(e) => return (restore(buf), Err(e)),
923 : }
924 : }
925 2261034 : (restore(buf), Ok(nbytes))
926 2261038 : }
927 :
928 2260962 : async fn write<B: IoBuf + Send>(
929 2260962 : &mut self,
930 2260962 : buf: FullSlice<B>,
931 2260962 : ctx: &RequestContext,
932 2260962 : ) -> (FullSlice<B>, Result<usize, std::io::Error>) {
933 2260962 : let pos = self.pos;
934 2260962 : let (buf, res) = self.write_at(buf, pos, ctx).await;
935 2260962 : let n = match res {
936 2260958 : Ok(n) => n,
937 4 : Err(e) => return (buf, Err(e)),
938 : };
939 2260958 : self.pos += n as u64;
940 2260958 : (buf, Ok(n))
941 2260962 : }
942 :
943 967550 : pub(crate) async fn read_at<Buf>(
944 967550 : &self,
945 967550 : buf: tokio_epoll_uring::Slice<Buf>,
946 967550 : offset: u64,
947 967550 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
948 967550 : ) -> (tokio_epoll_uring::Slice<Buf>, Result<usize, Error>)
949 967550 : where
950 967550 : Buf: tokio_epoll_uring::IoBufMut + Send,
951 967550 : {
952 967550 : let file_guard = match self
953 967550 : .lock_file()
954 967550 : .await
955 967550 : .maybe_fatal_err("lock_file inside VirtualFileInner::read_at")
956 : {
957 967550 : Ok(file_guard) => file_guard,
958 0 : Err(e) => return (buf, Err(e)),
959 : };
960 :
961 967550 : observe_duration!(StorageIoOperation::Read, {
962 967550 : let ((_file_guard, buf), res) = io_engine::get().read_at(file_guard, offset, buf).await;
963 967550 : let res = res.maybe_fatal_err("io_engine read_at inside VirtualFileInner::read_at");
964 967550 : if let Ok(size) = res {
965 967546 : STORAGE_IO_SIZE
966 967546 : .with_label_values(&[
967 967546 : "read",
968 967546 : &self.tenant_id,
969 967546 : &self.shard_id,
970 967546 : &self.timeline_id,
971 967546 : ])
972 967546 : .add(size as i64);
973 967546 : }
974 967550 : (buf, res)
975 : })
976 967550 : }
977 :
978 : /// The function aborts the process if the error is fatal.
979 2274184 : async fn write_at<B: IoBuf + Send>(
980 2274184 : &self,
981 2274184 : buf: FullSlice<B>,
982 2274184 : offset: u64,
983 2274184 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
984 2274184 : ) -> (FullSlice<B>, Result<usize, Error>) {
985 2274184 : let (slice, result) = self.write_at_inner(buf, offset, _ctx).await;
986 2274184 : let result = result.maybe_fatal_err("write_at");
987 2274184 : (slice, result)
988 2274184 : }
989 :
990 2274184 : async fn write_at_inner<B: IoBuf + Send>(
991 2274184 : &self,
992 2274184 : buf: FullSlice<B>,
993 2274184 : offset: u64,
994 2274184 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
995 2274184 : ) -> (FullSlice<B>, Result<usize, Error>) {
996 2274184 : let file_guard = match self.lock_file().await {
997 2274184 : Ok(file_guard) => file_guard,
998 0 : Err(e) => return (buf, Err(e)),
999 : };
1000 2274184 : observe_duration!(StorageIoOperation::Write, {
1001 2274184 : let ((_file_guard, buf), result) =
1002 2274184 : io_engine::get().write_at(file_guard, offset, buf).await;
1003 2274184 : if let Ok(size) = result {
1004 2274180 : STORAGE_IO_SIZE
1005 2274180 : .with_label_values(&[
1006 2274180 : "write",
1007 2274180 : &self.tenant_id,
1008 2274180 : &self.shard_id,
1009 2274180 : &self.timeline_id,
1010 2274180 : ])
1011 2274180 : .add(size as i64);
1012 2274180 : }
1013 2274184 : (buf, result)
1014 : })
1015 2274184 : }
1016 :
1017 448 : async fn read_to_end(&mut self, buf: &mut Vec<u8>, ctx: &RequestContext) -> Result<(), Error> {
1018 448 : let mut tmp = vec![0; 128];
1019 : loop {
1020 888 : let slice = tmp.slice(..128);
1021 888 : let (slice, res) = self.read_at(slice, self.pos, ctx).await;
1022 4 : match res {
1023 444 : Ok(0) => return Ok(()),
1024 440 : Ok(n) => {
1025 440 : self.pos += n as u64;
1026 440 : buf.extend_from_slice(&slice[..n]);
1027 440 : }
1028 4 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
1029 4 : Err(e) => return Err(e),
1030 : }
1031 440 : tmp = slice.into_inner();
1032 : }
1033 448 : }
1034 : }
1035 :
1036 : // Adapted from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135
1037 966678 : pub async fn read_exact_at_impl<Buf, F, Fut>(
1038 966678 : mut buf: tokio_epoll_uring::Slice<Buf>,
1039 966678 : mut offset: u64,
1040 966678 : mut read_at: F,
1041 966678 : ) -> (Buf, std::io::Result<()>)
1042 966678 : where
1043 966678 : Buf: IoBufMut + Send,
1044 966678 : F: FnMut(tokio_epoll_uring::Slice<Buf>, u64) -> Fut,
1045 966678 : Fut: std::future::Future<Output = (tokio_epoll_uring::Slice<Buf>, std::io::Result<usize>)>,
1046 966678 : {
1047 1933360 : while buf.bytes_total() != 0 {
1048 : let res;
1049 966686 : (buf, res) = read_at(buf, offset).await;
1050 0 : match res {
1051 4 : Ok(0) => break,
1052 966682 : Ok(n) => {
1053 966682 : buf = buf.slice(n..);
1054 966682 : offset += n as u64;
1055 966682 : }
1056 0 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
1057 0 : Err(e) => return (buf.into_inner(), Err(e)),
1058 : }
1059 : }
1060 : // NB: don't use `buf.is_empty()` here; it is from the
1061 : // `impl Deref for Slice { Target = [u8] }`; the &[u8]
1062 : // returned by it only covers the initialized portion of `buf`.
1063 : // Whereas we're interested in ensuring that we filled the entire
1064 : // buffer that the user passed in.
1065 966678 : if buf.bytes_total() != 0 {
1066 4 : (
1067 4 : buf.into_inner(),
1068 4 : Err(std::io::Error::new(
1069 4 : std::io::ErrorKind::UnexpectedEof,
1070 4 : "failed to fill whole buffer",
1071 4 : )),
1072 4 : )
1073 : } else {
1074 966674 : assert_eq!(buf.len(), buf.bytes_total());
1075 966674 : (buf.into_inner(), Ok(()))
1076 : }
1077 966678 : }
1078 :
1079 : #[cfg(test)]
1080 : mod test_read_exact_at_impl {
1081 :
1082 : use std::collections::VecDeque;
1083 : use std::sync::Arc;
1084 :
1085 : use tokio_epoll_uring::{BoundedBuf, BoundedBufMut};
1086 :
1087 : use super::read_exact_at_impl;
1088 :
1089 : struct Expectation {
1090 : offset: u64,
1091 : bytes_total: usize,
1092 : result: std::io::Result<Vec<u8>>,
1093 : }
1094 : struct MockReadAt {
1095 : expectations: VecDeque<Expectation>,
1096 : }
1097 :
1098 : impl MockReadAt {
1099 24 : async fn read_at(
1100 24 : &mut self,
1101 24 : mut buf: tokio_epoll_uring::Slice<Vec<u8>>,
1102 24 : offset: u64,
1103 24 : ) -> (tokio_epoll_uring::Slice<Vec<u8>>, std::io::Result<usize>) {
1104 24 : let exp = self
1105 24 : .expectations
1106 24 : .pop_front()
1107 24 : .expect("read_at called but we have no expectations left");
1108 24 : assert_eq!(exp.offset, offset);
1109 24 : assert_eq!(exp.bytes_total, buf.bytes_total());
1110 24 : match exp.result {
1111 24 : Ok(bytes) => {
1112 24 : assert!(bytes.len() <= buf.bytes_total());
1113 24 : buf.put_slice(&bytes);
1114 24 : (buf, Ok(bytes.len()))
1115 : }
1116 0 : Err(e) => (buf, Err(e)),
1117 : }
1118 24 : }
1119 : }
1120 :
1121 : impl Drop for MockReadAt {
1122 16 : fn drop(&mut self) {
1123 16 : assert_eq!(self.expectations.len(), 0);
1124 16 : }
1125 : }
1126 :
1127 : #[tokio::test]
1128 4 : async fn test_basic() {
1129 4 : let buf = Vec::with_capacity(5).slice_full();
1130 4 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1131 4 : expectations: VecDeque::from(vec![Expectation {
1132 4 : offset: 0,
1133 4 : bytes_total: 5,
1134 4 : result: Ok(vec![b'a', b'b', b'c', b'd', b'e']),
1135 4 : }]),
1136 4 : }));
1137 4 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1138 4 : let mock_read_at = Arc::clone(&mock_read_at);
1139 4 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1140 4 : })
1141 4 : .await;
1142 4 : assert!(res.is_ok());
1143 4 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd', b'e']);
1144 4 : }
1145 :
1146 : #[tokio::test]
1147 4 : async fn test_empty_buf_issues_no_syscall() {
1148 4 : let buf = Vec::new().slice_full();
1149 4 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1150 4 : expectations: VecDeque::new(),
1151 4 : }));
1152 4 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1153 0 : let mock_read_at = Arc::clone(&mock_read_at);
1154 4 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1155 4 : })
1156 4 : .await;
1157 4 : assert!(res.is_ok());
1158 4 : }
1159 :
1160 : #[tokio::test]
1161 4 : async fn test_two_read_at_calls_needed_until_buf_filled() {
1162 4 : let buf = Vec::with_capacity(4).slice_full();
1163 4 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1164 4 : expectations: VecDeque::from(vec![
1165 4 : Expectation {
1166 4 : offset: 0,
1167 4 : bytes_total: 4,
1168 4 : result: Ok(vec![b'a', b'b']),
1169 4 : },
1170 4 : Expectation {
1171 4 : offset: 2,
1172 4 : bytes_total: 2,
1173 4 : result: Ok(vec![b'c', b'd']),
1174 4 : },
1175 4 : ]),
1176 4 : }));
1177 8 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1178 8 : let mock_read_at = Arc::clone(&mock_read_at);
1179 8 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1180 8 : })
1181 4 : .await;
1182 4 : assert!(res.is_ok());
1183 4 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd']);
1184 4 : }
1185 :
1186 : #[tokio::test]
1187 4 : async fn test_eof_before_buffer_full() {
1188 4 : let buf = Vec::with_capacity(3).slice_full();
1189 4 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
1190 4 : expectations: VecDeque::from(vec![
1191 4 : Expectation {
1192 4 : offset: 0,
1193 4 : bytes_total: 3,
1194 4 : result: Ok(vec![b'a']),
1195 4 : },
1196 4 : Expectation {
1197 4 : offset: 1,
1198 4 : bytes_total: 2,
1199 4 : result: Ok(vec![b'b']),
1200 4 : },
1201 4 : Expectation {
1202 4 : offset: 2,
1203 4 : bytes_total: 1,
1204 4 : result: Ok(vec![]),
1205 4 : },
1206 4 : ]),
1207 4 : }));
1208 12 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
1209 12 : let mock_read_at = Arc::clone(&mock_read_at);
1210 12 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
1211 12 : })
1212 4 : .await;
1213 4 : let Err(err) = res else {
1214 4 : panic!("should return an error");
1215 4 : };
1216 4 : assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof);
1217 4 : assert_eq!(format!("{err}"), "failed to fill whole buffer");
1218 4 : // buffer contents on error are unspecified
1219 4 : }
1220 : }
1221 :
1222 : struct FileGuard {
1223 : slot_guard: RwLockReadGuard<'static, SlotInner>,
1224 : }
1225 :
1226 : impl AsRef<OwnedFd> for FileGuard {
1227 3251012 : fn as_ref(&self) -> &OwnedFd {
1228 3251012 : // This unwrap is safe because we only create `FileGuard`s
1229 3251012 : // if we know that the file is Some.
1230 3251012 : self.slot_guard.file.as_ref().unwrap()
1231 3251012 : }
1232 : }
1233 :
1234 : impl FileGuard {
1235 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
1236 1625629 : fn with_std_file<F, R>(&self, with: F) -> R
1237 1625629 : where
1238 1625629 : F: FnOnce(&File) -> R,
1239 1625629 : {
1240 1625629 : // SAFETY:
1241 1625629 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
1242 1625629 : // - `&` usage below: `self` is `&`, hence Rust typesystem guarantees there are is no `&mut`
1243 1625629 : let file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
1244 1625629 : let res = with(&file);
1245 1625629 : let _ = file.into_raw_fd();
1246 1625629 : res
1247 1625629 : }
1248 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
1249 8 : fn with_std_file_mut<F, R>(&mut self, with: F) -> R
1250 8 : where
1251 8 : F: FnOnce(&mut File) -> R,
1252 8 : {
1253 8 : // SAFETY:
1254 8 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
1255 8 : // - &mut usage below: `self` is `&mut`, hence this call is the only task/thread that has control over the underlying fd
1256 8 : let mut file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
1257 8 : let res = with(&mut file);
1258 8 : let _ = file.into_raw_fd();
1259 8 : res
1260 8 : }
1261 : }
1262 :
1263 : impl tokio_epoll_uring::IoFd for FileGuard {
1264 1625375 : unsafe fn as_fd(&self) -> RawFd {
1265 1625375 : let owned_fd: &OwnedFd = self.as_ref();
1266 1625375 : owned_fd.as_raw_fd()
1267 1625375 : }
1268 : }
1269 :
1270 : #[cfg(test)]
1271 : impl VirtualFile {
1272 41832 : pub(crate) async fn read_blk(
1273 41832 : &self,
1274 41832 : blknum: u32,
1275 41832 : ctx: &RequestContext,
1276 41832 : ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
1277 41832 : self.inner.read_blk(blknum, ctx).await
1278 41832 : }
1279 : }
1280 :
1281 : #[cfg(test)]
1282 : impl VirtualFileInner {
1283 41832 : pub(crate) async fn read_blk(
1284 41832 : &self,
1285 41832 : blknum: u32,
1286 41832 : ctx: &RequestContext,
1287 41832 : ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
1288 : use crate::page_cache::PAGE_SZ;
1289 41832 : let slice = IoBufferMut::with_capacity(PAGE_SZ).slice_full();
1290 41832 : assert_eq!(slice.bytes_total(), PAGE_SZ);
1291 41832 : let slice = self
1292 41832 : .read_exact_at(slice, blknum as u64 * (PAGE_SZ as u64), ctx)
1293 41832 : .await?;
1294 41832 : Ok(crate::tenant::block_io::BlockLease::IoBufferMut(
1295 41832 : slice.into_inner(),
1296 41832 : ))
1297 41832 : }
1298 : }
1299 :
1300 : impl Drop for VirtualFileInner {
1301 : /// If a VirtualFile is dropped, close the underlying file if it was open.
1302 10627 : fn drop(&mut self) {
1303 10627 : let handle = self.handle.get_mut();
1304 :
1305 10627 : fn clean_slot(slot: &Slot, mut slot_guard: RwLockWriteGuard<'_, SlotInner>, tag: u64) {
1306 10627 : if slot_guard.tag == tag {
1307 9457 : slot.recently_used.store(false, Ordering::Relaxed);
1308 : // there is also operation "close-by-replace" for closes done on eviction for
1309 : // comparison.
1310 9457 : if let Some(fd) = slot_guard.file.take() {
1311 9457 : STORAGE_IO_TIME_METRIC
1312 9457 : .get(StorageIoOperation::Close)
1313 9457 : .observe_closure_duration(|| drop(fd));
1314 9457 : }
1315 1170 : }
1316 10627 : }
1317 :
1318 : // We don't have async drop so we cannot directly await the lock here.
1319 : // Instead, first do a best-effort attempt at closing the underlying
1320 : // file descriptor by using `try_write`, and if that fails, spawn
1321 : // a tokio task to do it asynchronously: we just want it to be
1322 : // cleaned up eventually.
1323 : // Most of the time, the `try_lock` should succeed though,
1324 : // as we have `&mut self` access. In other words, if the slot
1325 : // is still occupied by our file, there should be no access from
1326 : // other I/O operations; the only other possible place to lock
1327 : // the slot is the lock algorithm looking for free slots.
1328 10627 : let slot = &get_open_files().slots[handle.index];
1329 10627 : if let Ok(slot_guard) = slot.inner.try_write() {
1330 10627 : clean_slot(slot, slot_guard, handle.tag);
1331 10627 : } else {
1332 0 : let tag = handle.tag;
1333 0 : tokio::spawn(async move {
1334 0 : let slot_guard = slot.inner.write().await;
1335 0 : clean_slot(slot, slot_guard, tag);
1336 0 : });
1337 0 : };
1338 10627 : }
1339 : }
1340 :
1341 : impl OwnedAsyncWriter for VirtualFile {
1342 13214 : async fn write_all_at<Buf: IoBufAligned + Send>(
1343 13214 : &self,
1344 13214 : buf: FullSlice<Buf>,
1345 13214 : offset: u64,
1346 13214 : ctx: &RequestContext,
1347 13214 : ) -> std::io::Result<FullSlice<Buf>> {
1348 13214 : let (buf, res) = VirtualFile::write_all_at(self, buf, offset, ctx).await;
1349 13214 : res.map(|_| buf)
1350 13214 : }
1351 : }
1352 :
1353 : impl OpenFiles {
1354 472 : fn new(num_slots: usize) -> OpenFiles {
1355 472 : let mut slots = Box::new(Vec::with_capacity(num_slots));
1356 4720 : for _ in 0..num_slots {
1357 4720 : let slot = Slot {
1358 4720 : recently_used: AtomicBool::new(false),
1359 4720 : inner: RwLock::new(SlotInner { tag: 0, file: None }),
1360 4720 : };
1361 4720 : slots.push(slot);
1362 4720 : }
1363 :
1364 472 : OpenFiles {
1365 472 : next: AtomicUsize::new(0),
1366 472 : slots: Box::leak(slots),
1367 472 : }
1368 472 : }
1369 : }
1370 :
1371 : ///
1372 : /// Initialize the virtual file module. This must be called once at page
1373 : /// server startup.
1374 : ///
1375 : #[cfg(not(test))]
1376 0 : pub fn init(num_slots: usize, engine: IoEngineKind, mode: IoMode, sync_mode: SyncMode) {
1377 0 : if OPEN_FILES.set(OpenFiles::new(num_slots)).is_err() {
1378 0 : panic!("virtual_file::init called twice");
1379 0 : }
1380 0 : set_io_mode(mode);
1381 0 : io_engine::init(engine);
1382 0 : SYNC_MODE.store(sync_mode as u8, std::sync::atomic::Ordering::Relaxed);
1383 0 : crate::metrics::virtual_file_descriptor_cache::SIZE_MAX.set(num_slots as u64);
1384 0 : }
1385 :
1386 : const TEST_MAX_FILE_DESCRIPTORS: usize = 10;
1387 :
1388 : // Get a handle to the global slots array.
1389 3273893 : fn get_open_files() -> &'static OpenFiles {
1390 3273893 : //
1391 3273893 : // In unit tests, page server startup doesn't happen and no one calls
1392 3273893 : // virtual_file::init(). Initialize it here, with a small array.
1393 3273893 : //
1394 3273893 : // This applies to the virtual file tests below, but all other unit
1395 3273893 : // tests too, so the virtual file facility is always usable in
1396 3273893 : // unit tests.
1397 3273893 : //
1398 3273893 : if cfg!(test) {
1399 3273893 : OPEN_FILES.get_or_init(|| OpenFiles::new(TEST_MAX_FILE_DESCRIPTORS))
1400 : } else {
1401 0 : OPEN_FILES.get().expect("virtual_file::init not called yet")
1402 : }
1403 3273893 : }
1404 :
1405 : /// Gets the io buffer alignment.
1406 0 : pub(crate) const fn get_io_buffer_alignment() -> usize {
1407 0 : DEFAULT_IO_BUFFER_ALIGNMENT
1408 0 : }
1409 :
1410 : pub(crate) type IoBufferMut = AlignedBufferMut<ConstAlign<{ get_io_buffer_alignment() }>>;
1411 : pub(crate) type IoBuffer = AlignedBuffer<ConstAlign<{ get_io_buffer_alignment() }>>;
1412 : pub(crate) type IoPageSlice<'a> =
1413 : AlignedSlice<'a, PAGE_SZ, ConstAlign<{ get_io_buffer_alignment() }>>;
1414 :
1415 : static IO_MODE: AtomicU8 = AtomicU8::new(IoMode::preferred() as u8);
1416 :
1417 0 : pub(crate) fn set_io_mode(mode: IoMode) {
1418 0 : IO_MODE.store(mode as u8, std::sync::atomic::Ordering::Relaxed);
1419 0 : }
1420 :
1421 5096 : pub(crate) fn get_io_mode() -> IoMode {
1422 5096 : IoMode::try_from(IO_MODE.load(Ordering::Relaxed)).unwrap()
1423 5096 : }
1424 :
1425 : static SYNC_MODE: AtomicU8 = AtomicU8::new(SyncMode::Sync as u8);
1426 :
1427 : #[cfg(test)]
1428 : mod tests {
1429 : use std::io::Write;
1430 : use std::os::unix::fs::FileExt;
1431 : use std::sync::Arc;
1432 :
1433 : use owned_buffers_io::io_buf_ext::IoBufExt;
1434 : use owned_buffers_io::slice::SliceMutExt;
1435 : use rand::seq::SliceRandom;
1436 : use rand::{Rng, thread_rng};
1437 :
1438 : use super::*;
1439 : use crate::context::DownloadBehavior;
1440 : use crate::task_mgr::TaskKind;
1441 :
1442 : enum MaybeVirtualFile {
1443 : VirtualFile(VirtualFile),
1444 : File(File),
1445 : }
1446 :
1447 : impl From<VirtualFile> for MaybeVirtualFile {
1448 12 : fn from(vf: VirtualFile) -> Self {
1449 12 : MaybeVirtualFile::VirtualFile(vf)
1450 12 : }
1451 : }
1452 :
1453 : impl MaybeVirtualFile {
1454 808 : async fn read_exact_at(
1455 808 : &self,
1456 808 : mut slice: tokio_epoll_uring::Slice<IoBufferMut>,
1457 808 : offset: u64,
1458 808 : ctx: &RequestContext,
1459 808 : ) -> Result<tokio_epoll_uring::Slice<IoBufferMut>, Error> {
1460 808 : match self {
1461 404 : MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(slice, offset, ctx).await,
1462 404 : MaybeVirtualFile::File(file) => {
1463 404 : let rust_slice: &mut [u8] = slice.as_mut_rust_slice_full_zeroed();
1464 404 : file.read_exact_at(rust_slice, offset).map(|()| slice)
1465 : }
1466 : }
1467 808 : }
1468 16 : async fn write_all_at<Buf: IoBufAligned + Send>(
1469 16 : &self,
1470 16 : buf: FullSlice<Buf>,
1471 16 : offset: u64,
1472 16 : ctx: &RequestContext,
1473 16 : ) -> Result<(), Error> {
1474 16 : match self {
1475 8 : MaybeVirtualFile::VirtualFile(file) => {
1476 8 : let (_buf, res) = file.write_all_at(buf, offset, ctx).await;
1477 8 : res
1478 : }
1479 8 : MaybeVirtualFile::File(file) => file.write_all_at(&buf[..], offset),
1480 : }
1481 16 : }
1482 72 : async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
1483 72 : match self {
1484 36 : MaybeVirtualFile::VirtualFile(file) => file.seek(pos).await,
1485 36 : MaybeVirtualFile::File(file) => file.seek(pos),
1486 : }
1487 72 : }
1488 16 : async fn write_all<Buf: IoBuf + Send>(
1489 16 : &mut self,
1490 16 : buf: FullSlice<Buf>,
1491 16 : ctx: &RequestContext,
1492 16 : ) -> Result<(), Error> {
1493 16 : match self {
1494 8 : MaybeVirtualFile::VirtualFile(file) => {
1495 8 : let (_buf, res) = file.write_all(buf, ctx).await;
1496 8 : res.map(|_| ())
1497 : }
1498 8 : MaybeVirtualFile::File(file) => file.write_all(&buf[..]),
1499 : }
1500 16 : }
1501 :
1502 : // Helper function to slurp contents of a file, starting at the current position,
1503 : // into a string
1504 884 : async fn read_string(&mut self, ctx: &RequestContext) -> Result<String, Error> {
1505 : use std::io::Read;
1506 884 : let mut buf = String::new();
1507 884 : match self {
1508 448 : MaybeVirtualFile::VirtualFile(file) => {
1509 448 : let mut buf = Vec::new();
1510 448 : file.read_to_end(&mut buf, ctx).await?;
1511 444 : return Ok(String::from_utf8(buf).unwrap());
1512 : }
1513 436 : MaybeVirtualFile::File(file) => {
1514 436 : file.read_to_string(&mut buf)?;
1515 : }
1516 : }
1517 432 : Ok(buf)
1518 884 : }
1519 :
1520 : // Helper function to slurp a portion of a file into a string
1521 808 : async fn read_string_at(
1522 808 : &mut self,
1523 808 : pos: u64,
1524 808 : len: usize,
1525 808 : ctx: &RequestContext,
1526 808 : ) -> Result<String, Error> {
1527 808 : let slice = IoBufferMut::with_capacity(len).slice_full();
1528 808 : assert_eq!(slice.bytes_total(), len);
1529 808 : let slice = self.read_exact_at(slice, pos, ctx).await?;
1530 808 : let buf = slice.into_inner();
1531 808 : assert_eq!(buf.len(), len);
1532 :
1533 808 : Ok(String::from_utf8(buf.to_vec()).unwrap())
1534 808 : }
1535 : }
1536 :
1537 : #[tokio::test]
1538 4 : async fn test_virtual_files() -> anyhow::Result<()> {
1539 4 : // The real work is done in the test_files() helper function. This
1540 4 : // allows us to run the same set of tests against a native File, and
1541 4 : // VirtualFile. We trust the native Files and wouldn't need to test them,
1542 4 : // but this allows us to verify that the operations return the same
1543 4 : // results with VirtualFiles as with native Files. (Except that with
1544 4 : // native files, you will run out of file descriptors if the ulimit
1545 4 : // is low enough.)
1546 4 : struct A;
1547 4 :
1548 4 : impl Adapter for A {
1549 412 : async fn open(
1550 412 : path: Utf8PathBuf,
1551 412 : opts: OpenOptions,
1552 412 : ctx: &RequestContext,
1553 412 : ) -> Result<MaybeVirtualFile, anyhow::Error> {
1554 412 : let vf = VirtualFile::open_with_options(&path, &opts, ctx).await?;
1555 412 : Ok(MaybeVirtualFile::VirtualFile(vf))
1556 412 : }
1557 4 : }
1558 4 : test_files::<A>("virtual_files").await
1559 4 : }
1560 :
1561 : #[tokio::test]
1562 4 : async fn test_physical_files() -> anyhow::Result<()> {
1563 4 : struct B;
1564 4 :
1565 4 : impl Adapter for B {
1566 412 : async fn open(
1567 412 : path: Utf8PathBuf,
1568 412 : opts: OpenOptions,
1569 412 : _ctx: &RequestContext,
1570 412 : ) -> Result<MaybeVirtualFile, anyhow::Error> {
1571 4 : Ok(MaybeVirtualFile::File({
1572 412 : let owned_fd = opts.open(path.as_std_path()).await?;
1573 412 : File::from(owned_fd)
1574 4 : }))
1575 412 : }
1576 4 : }
1577 4 :
1578 4 : test_files::<B>("physical_files").await
1579 4 : }
1580 :
1581 : /// This is essentially a closure which returns a MaybeVirtualFile, but because rust edition
1582 : /// 2024 is not yet out with new lifetime capture or outlives rules, this is a async function
1583 : /// in trait which benefits from the new lifetime capture rules already.
1584 : trait Adapter {
1585 : async fn open(
1586 : path: Utf8PathBuf,
1587 : opts: OpenOptions,
1588 : ctx: &RequestContext,
1589 : ) -> Result<MaybeVirtualFile, anyhow::Error>;
1590 : }
1591 :
1592 8 : async fn test_files<A>(testname: &str) -> anyhow::Result<()>
1593 8 : where
1594 8 : A: Adapter,
1595 8 : {
1596 8 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1597 8 : let testdir = crate::config::PageServerConf::test_repo_dir(testname);
1598 8 : std::fs::create_dir_all(&testdir)?;
1599 :
1600 8 : let path_a = testdir.join("file_a");
1601 8 : let mut file_a = A::open(
1602 8 : path_a.clone(),
1603 8 : OpenOptions::new()
1604 8 : .write(true)
1605 8 : .create(true)
1606 8 : .truncate(true)
1607 8 : .to_owned(),
1608 8 : &ctx,
1609 8 : )
1610 8 : .await?;
1611 :
1612 8 : file_a
1613 8 : .write_all(b"foobar".to_vec().slice_len(), &ctx)
1614 8 : .await?;
1615 :
1616 : // cannot read from a file opened in write-only mode
1617 8 : let _ = file_a.read_string(&ctx).await.unwrap_err();
1618 :
1619 : // Close the file and re-open for reading
1620 8 : let mut file_a = A::open(path_a, OpenOptions::new().read(true).to_owned(), &ctx).await?;
1621 :
1622 : // cannot write to a file opened in read-only mode
1623 8 : let _ = file_a
1624 8 : .write_all(b"bar".to_vec().slice_len(), &ctx)
1625 8 : .await
1626 8 : .unwrap_err();
1627 8 :
1628 8 : // Try simple read
1629 8 : assert_eq!("foobar", file_a.read_string(&ctx).await?);
1630 :
1631 : // It's positioned at the EOF now.
1632 8 : assert_eq!("", file_a.read_string(&ctx).await?);
1633 :
1634 : // Test seeks.
1635 8 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1636 8 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1637 :
1638 8 : assert_eq!(file_a.seek(SeekFrom::End(-2)).await?, 4);
1639 8 : assert_eq!("ar", file_a.read_string(&ctx).await?);
1640 :
1641 8 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1642 8 : assert_eq!(file_a.seek(SeekFrom::Current(2)).await?, 3);
1643 8 : assert_eq!("bar", file_a.read_string(&ctx).await?);
1644 :
1645 8 : assert_eq!(file_a.seek(SeekFrom::Current(-5)).await?, 1);
1646 8 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1647 :
1648 : // Test erroneous seeks to before byte 0
1649 8 : file_a.seek(SeekFrom::End(-7)).await.unwrap_err();
1650 8 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1651 8 : file_a.seek(SeekFrom::Current(-2)).await.unwrap_err();
1652 8 :
1653 8 : // the erroneous seek should have left the position unchanged
1654 8 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1655 :
1656 : // Create another test file, and try FileExt functions on it.
1657 8 : let path_b = testdir.join("file_b");
1658 8 : let mut file_b = A::open(
1659 8 : path_b.clone(),
1660 8 : OpenOptions::new()
1661 8 : .read(true)
1662 8 : .write(true)
1663 8 : .create(true)
1664 8 : .truncate(true)
1665 8 : .to_owned(),
1666 8 : &ctx,
1667 8 : )
1668 8 : .await?;
1669 8 : file_b
1670 8 : .write_all_at(IoBuffer::from(b"BAR").slice_len(), 3, &ctx)
1671 8 : .await?;
1672 8 : file_b
1673 8 : .write_all_at(IoBuffer::from(b"FOO").slice_len(), 0, &ctx)
1674 8 : .await?;
1675 :
1676 8 : assert_eq!(file_b.read_string_at(2, 3, &ctx).await?, "OBA");
1677 :
1678 : // Open a lot of files, enough to cause some evictions. (Or to be precise,
1679 : // open the same file many times. The effect is the same.)
1680 : //
1681 : // leave file_a positioned at offset 1 before we start
1682 8 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1683 :
1684 8 : let mut vfiles = Vec::new();
1685 808 : for _ in 0..100 {
1686 800 : let mut vfile = A::open(
1687 800 : path_b.clone(),
1688 800 : OpenOptions::new().read(true).to_owned(),
1689 800 : &ctx,
1690 800 : )
1691 800 : .await?;
1692 800 : assert_eq!("FOOBAR", vfile.read_string(&ctx).await?);
1693 800 : vfiles.push(vfile);
1694 : }
1695 :
1696 : // make sure we opened enough files to definitely cause evictions.
1697 8 : assert!(vfiles.len() > TEST_MAX_FILE_DESCRIPTORS * 2);
1698 :
1699 : // The underlying file descriptor for 'file_a' should be closed now. Try to read
1700 : // from it again. We left the file positioned at offset 1 above.
1701 8 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1702 :
1703 : // Check that all the other FDs still work too. Use them in random order for
1704 : // good measure.
1705 8 : vfiles.as_mut_slice().shuffle(&mut thread_rng());
1706 800 : for vfile in vfiles.iter_mut() {
1707 800 : assert_eq!("OOBAR", vfile.read_string_at(1, 5, &ctx).await?);
1708 : }
1709 :
1710 8 : Ok(())
1711 8 : }
1712 :
1713 : /// Test using VirtualFiles from many threads concurrently. This tests both using
1714 : /// a lot of VirtualFiles concurrently, causing evictions, and also using the same
1715 : /// VirtualFile from multiple threads concurrently.
1716 : #[tokio::test]
1717 4 : async fn test_vfile_concurrency() -> Result<(), Error> {
1718 4 : const SIZE: usize = 8 * 1024;
1719 4 : const VIRTUAL_FILES: usize = 100;
1720 4 : const THREADS: usize = 100;
1721 4 : const SAMPLE: [u8; SIZE] = [0xADu8; SIZE];
1722 4 :
1723 4 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1724 4 : let testdir = crate::config::PageServerConf::test_repo_dir("vfile_concurrency");
1725 4 : std::fs::create_dir_all(&testdir)?;
1726 4 :
1727 4 : // Create a test file.
1728 4 : let test_file_path = testdir.join("concurrency_test_file");
1729 4 : {
1730 4 : let file = File::create(&test_file_path)?;
1731 4 : file.write_all_at(&SAMPLE, 0)?;
1732 4 : }
1733 4 :
1734 4 : // Open the file many times.
1735 4 : let mut files = Vec::new();
1736 404 : for _ in 0..VIRTUAL_FILES {
1737 400 : let f = VirtualFileInner::open_with_options(
1738 400 : &test_file_path,
1739 400 : OpenOptions::new().read(true),
1740 400 : &ctx,
1741 400 : )
1742 400 : .await?;
1743 400 : files.push(f);
1744 4 : }
1745 4 : let files = Arc::new(files);
1746 4 :
1747 4 : // Launch many threads, and use the virtual files concurrently in random order.
1748 4 : let rt = tokio::runtime::Builder::new_multi_thread()
1749 4 : .worker_threads(THREADS)
1750 4 : .thread_name("test_vfile_concurrency thread")
1751 4 : .build()
1752 4 : .unwrap();
1753 4 : let mut hdls = Vec::new();
1754 404 : for _threadno in 0..THREADS {
1755 400 : let files = files.clone();
1756 400 : let ctx = ctx.detached_child(TaskKind::UnitTest, DownloadBehavior::Error);
1757 400 : let hdl = rt.spawn(async move {
1758 400 : let mut buf = IoBufferMut::with_capacity_zeroed(SIZE);
1759 400 : let mut rng = rand::rngs::OsRng;
1760 400000 : for _ in 1..1000 {
1761 399600 : let f = &files[rng.gen_range(0..files.len())];
1762 399600 : buf = f
1763 399600 : .read_exact_at(buf.slice_full(), 0, &ctx)
1764 399600 : .await
1765 399600 : .unwrap()
1766 399600 : .into_inner();
1767 399600 : assert!(buf[..] == SAMPLE);
1768 4 : }
1769 400 : });
1770 400 : hdls.push(hdl);
1771 400 : }
1772 404 : for hdl in hdls {
1773 400 : hdl.await?;
1774 4 : }
1775 4 : std::mem::forget(rt);
1776 4 :
1777 4 : Ok(())
1778 4 : }
1779 :
1780 : #[tokio::test]
1781 4 : async fn test_atomic_overwrite_basic() {
1782 4 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1783 4 : let testdir = crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_basic");
1784 4 : std::fs::create_dir_all(&testdir).unwrap();
1785 4 :
1786 4 : let path = testdir.join("myfile");
1787 4 : let tmp_path = testdir.join("myfile.tmp");
1788 4 :
1789 4 : VirtualFileInner::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1790 4 : .await
1791 4 : .unwrap();
1792 4 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1793 4 : let post = file.read_string(&ctx).await.unwrap();
1794 4 : assert_eq!(post, "foo");
1795 4 : assert!(!tmp_path.exists());
1796 4 : drop(file);
1797 4 :
1798 4 : VirtualFileInner::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"bar".to_vec())
1799 4 : .await
1800 4 : .unwrap();
1801 4 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1802 4 : let post = file.read_string(&ctx).await.unwrap();
1803 4 : assert_eq!(post, "bar");
1804 4 : assert!(!tmp_path.exists());
1805 4 : drop(file);
1806 4 : }
1807 :
1808 : #[tokio::test]
1809 4 : async fn test_atomic_overwrite_preexisting_tmp() {
1810 4 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1811 4 : let testdir =
1812 4 : crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_preexisting_tmp");
1813 4 : std::fs::create_dir_all(&testdir).unwrap();
1814 4 :
1815 4 : let path = testdir.join("myfile");
1816 4 : let tmp_path = testdir.join("myfile.tmp");
1817 4 :
1818 4 : std::fs::write(&tmp_path, "some preexisting junk that should be removed").unwrap();
1819 4 : assert!(tmp_path.exists());
1820 4 :
1821 4 : VirtualFileInner::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1822 4 : .await
1823 4 : .unwrap();
1824 4 :
1825 4 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1826 4 : let post = file.read_string(&ctx).await.unwrap();
1827 4 : assert_eq!(post, "foo");
1828 4 : assert!(!tmp_path.exists());
1829 4 : drop(file);
1830 4 : }
1831 : }
|