Line data Source code
1 : //!
2 : //! VirtualFile is like a normal File, but it's not bound directly to
3 : //! a file descriptor. Instead, the file is opened when it's read from,
4 : //! and if too many files are open globally in the system, least-recently
5 : //! used ones are closed.
6 : //!
7 : //! To track which files have been recently used, we use the clock algorithm
8 : //! with a 'recently_used' flag on each slot.
9 : //!
10 : //! This is similar to PostgreSQL's virtual file descriptor facility in
11 : //! src/backend/storage/file/fd.c
12 : //!
13 : use crate::config::defaults::DEFAULT_IO_BUFFER_ALIGNMENT;
14 : use crate::context::RequestContext;
15 : use crate::metrics::{StorageIoOperation, STORAGE_IO_SIZE, STORAGE_IO_TIME_METRIC};
16 :
17 : use crate::page_cache::{PageWriteGuard, PAGE_SZ};
18 : use crate::tenant::TENANTS_SEGMENT_NAME;
19 : use camino::{Utf8Path, Utf8PathBuf};
20 : use once_cell::sync::OnceCell;
21 : use owned_buffers_io::io_buf_ext::FullSlice;
22 : use pageserver_api::shard::TenantShardId;
23 : use std::fs::File;
24 : use std::io::{Error, ErrorKind, Seek, SeekFrom};
25 : use tokio_epoll_uring::{BoundedBuf, IoBuf, IoBufMut, Slice};
26 :
27 : use std::os::fd::{AsRawFd, FromRawFd, IntoRawFd, OwnedFd, RawFd};
28 : use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
29 : use tokio::sync::{RwLock, RwLockReadGuard, RwLockWriteGuard};
30 : use tokio::time::Instant;
31 :
32 : pub use pageserver_api::models::virtual_file as api;
33 : pub(crate) mod io_engine;
34 : pub use io_engine::feature_test as io_engine_feature_test;
35 : pub use io_engine::io_engine_for_bench;
36 : pub use io_engine::FeatureTestResult as IoEngineFeatureTestResult;
37 : mod metadata;
38 : mod open_options;
39 : use self::owned_buffers_io::write::OwnedAsyncWriter;
40 : pub(crate) use api::DirectIoMode;
41 : pub(crate) use io_engine::IoEngineKind;
42 : pub(crate) use metadata::Metadata;
43 : pub(crate) use open_options::*;
44 :
45 : pub(crate) mod owned_buffers_io {
46 : //! Abstractions for IO with owned buffers.
47 : //!
48 : //! Not actually tied to [`crate::virtual_file`] specifically, but, it's the primary
49 : //! reason we need this abstraction.
50 : //!
51 : //! Over time, this could move into the `tokio-epoll-uring` crate, maybe `uring-common`,
52 : //! but for the time being we're proving out the primitives in the neon.git repo
53 : //! for faster iteration.
54 :
55 : pub(crate) mod io_buf_ext;
56 : pub(crate) mod slice;
57 : pub(crate) mod write;
58 : pub(crate) mod util {
59 : pub(crate) mod size_tracking_writer;
60 : }
61 : }
62 :
63 : ///
64 : /// A virtual file descriptor. You can use this just like std::fs::File, but internally
65 : /// the underlying file is closed if the system is low on file descriptors,
66 : /// and re-opened when it's accessed again.
67 : ///
68 : /// Like with std::fs::File, multiple threads can read/write the file concurrently,
69 : /// holding just a shared reference the same VirtualFile, using the read_at() / write_at()
70 : /// functions from the FileExt trait. But the functions from the Read/Write/Seek traits
71 : /// require a mutable reference, because they modify the "current position".
72 : ///
73 : /// Each VirtualFile has a physical file descriptor in the global OPEN_FILES array, at the
74 : /// slot that 'handle points to, if the underlying file is currently open. If it's not
75 : /// currently open, the 'handle' can still point to the slot where it was last kept. The
76 : /// 'tag' field is used to detect whether the handle still is valid or not.
77 : ///
78 : #[derive(Debug)]
79 : pub struct VirtualFile {
80 : /// Lazy handle to the global file descriptor cache. The slot that this points to
81 : /// might contain our File, or it may be empty, or it may contain a File that
82 : /// belongs to a different VirtualFile.
83 : handle: RwLock<SlotHandle>,
84 :
85 : /// Current file position
86 : pos: u64,
87 :
88 : /// File path and options to use to open it.
89 : ///
90 : /// Note: this only contains the options needed to re-open it. For example,
91 : /// if a new file is created, we only pass the create flag when it's initially
92 : /// opened, in the VirtualFile::create() function, and strip the flag before
93 : /// storing it here.
94 : pub path: Utf8PathBuf,
95 : open_options: OpenOptions,
96 :
97 : // These are strings becase we only use them for metrics, and those expect strings.
98 : // It makes no sense for us to constantly turn the `TimelineId` and `TenantId` into
99 : // strings.
100 : tenant_id: String,
101 : shard_id: String,
102 : timeline_id: String,
103 : }
104 :
105 : #[derive(Debug, PartialEq, Clone, Copy)]
106 : struct SlotHandle {
107 : /// Index into OPEN_FILES.slots
108 : index: usize,
109 :
110 : /// Value of 'tag' in the slot. If slot's tag doesn't match, then the slot has
111 : /// been recycled and no longer contains the FD for this virtual file.
112 : tag: u64,
113 : }
114 :
115 : /// OPEN_FILES is the global array that holds the physical file descriptors that
116 : /// are currently open. Each slot in the array is protected by a separate lock,
117 : /// so that different files can be accessed independently. The lock must be held
118 : /// in write mode to replace the slot with a different file, but a read mode
119 : /// is enough to operate on the file, whether you're reading or writing to it.
120 : ///
121 : /// OPEN_FILES starts in uninitialized state, and it's initialized by
122 : /// the virtual_file::init() function. It must be called exactly once at page
123 : /// server startup.
124 : static OPEN_FILES: OnceCell<OpenFiles> = OnceCell::new();
125 :
126 : struct OpenFiles {
127 : slots: &'static [Slot],
128 :
129 : /// clock arm for the clock algorithm
130 : next: AtomicUsize,
131 : }
132 :
133 : struct Slot {
134 : inner: RwLock<SlotInner>,
135 :
136 : /// has this file been used since last clock sweep?
137 : recently_used: AtomicBool,
138 : }
139 :
140 : struct SlotInner {
141 : /// Counter that's incremented every time a different file is stored here.
142 : /// To avoid the ABA problem.
143 : tag: u64,
144 :
145 : /// the underlying file
146 : file: Option<OwnedFd>,
147 : }
148 :
149 : /// Impl of [`tokio_epoll_uring::IoBuf`] and [`tokio_epoll_uring::IoBufMut`] for [`PageWriteGuard`].
150 : struct PageWriteGuardBuf {
151 : page: PageWriteGuard<'static>,
152 : }
153 : // Safety: the [`PageWriteGuard`] gives us exclusive ownership of the page cache slot,
154 : // and the location remains stable even if [`Self`] or the [`PageWriteGuard`] is moved.
155 : // Page cache pages are zero-initialized, so, wrt uninitialized memory we're good.
156 : // (Page cache tracks separately whether the contents are valid, see `PageWriteGuard::mark_valid`.)
157 : unsafe impl tokio_epoll_uring::IoBuf for PageWriteGuardBuf {
158 621692 : fn stable_ptr(&self) -> *const u8 {
159 621692 : self.page.as_ptr()
160 621692 : }
161 1165635 : fn bytes_init(&self) -> usize {
162 1165635 : self.page.len()
163 1165635 : }
164 466194 : fn bytes_total(&self) -> usize {
165 466194 : self.page.len()
166 466194 : }
167 : }
168 : // Safety: see above, plus: the ownership of [`PageWriteGuard`] means exclusive access,
169 : // hence it's safe to hand out the `stable_mut_ptr()`.
170 : unsafe impl tokio_epoll_uring::IoBufMut for PageWriteGuardBuf {
171 233147 : fn stable_mut_ptr(&mut self) -> *mut u8 {
172 233147 : self.page.as_mut_ptr()
173 233147 : }
174 :
175 155398 : unsafe fn set_init(&mut self, pos: usize) {
176 155398 : // There shouldn't really be any reason to call this API since bytes_init() == bytes_total().
177 155398 : assert!(pos <= self.page.len());
178 155398 : }
179 : }
180 :
181 : impl OpenFiles {
182 : /// Find a slot to use, evicting an existing file descriptor if needed.
183 : ///
184 : /// On return, we hold a lock on the slot, and its 'tag' has been updated
185 : /// recently_used has been set. It's all ready for reuse.
186 583169 : async fn find_victim_slot(&self) -> (SlotHandle, RwLockWriteGuard<SlotInner>) {
187 583169 : //
188 583169 : // Run the clock algorithm to find a slot to replace.
189 583169 : //
190 583169 : let num_slots = self.slots.len();
191 583169 : let mut retries = 0;
192 : let mut slot;
193 : let mut slot_guard;
194 : let index;
195 7038115 : loop {
196 7038115 : let next = self.next.fetch_add(1, Ordering::AcqRel) % num_slots;
197 7038115 : slot = &self.slots[next];
198 7038115 :
199 7038115 : // If the recently_used flag on this slot is set, continue the clock
200 7038115 : // sweep. Otherwise try to use this slot. If we cannot acquire the
201 7038115 : // lock, also continue the clock sweep.
202 7038115 : //
203 7038115 : // We only continue in this manner for a while, though. If we loop
204 7038115 : // through the array twice without finding a victim, just pick the
205 7038115 : // next slot and wait until we can reuse it. This way, we avoid
206 7038115 : // spinning in the extreme case that all the slots are busy with an
207 7038115 : // I/O operation.
208 7038115 : if retries < num_slots * 2 {
209 6771938 : if !slot.recently_used.swap(false, Ordering::Release) {
210 6146799 : if let Ok(guard) = slot.inner.try_write() {
211 316992 : slot_guard = guard;
212 316992 : index = next;
213 316992 : break;
214 5829807 : }
215 625139 : }
216 6454946 : retries += 1;
217 : } else {
218 266177 : slot_guard = slot.inner.write().await;
219 266177 : index = next;
220 266177 : break;
221 : }
222 : }
223 :
224 : //
225 : // We now have the victim slot locked. If it was in use previously, close the
226 : // old file.
227 : //
228 583169 : if let Some(old_file) = slot_guard.file.take() {
229 568966 : // the normal path of dropping VirtualFile uses "close", use "close-by-replace" here to
230 568966 : // distinguish the two.
231 568966 : STORAGE_IO_TIME_METRIC
232 568966 : .get(StorageIoOperation::CloseByReplace)
233 568966 : .observe_closure_duration(|| drop(old_file));
234 568966 : }
235 :
236 : // Prepare the slot for reuse and return it
237 583169 : slot_guard.tag += 1;
238 583169 : slot.recently_used.store(true, Ordering::Relaxed);
239 583169 : (
240 583169 : SlotHandle {
241 583169 : index,
242 583169 : tag: slot_guard.tag,
243 583169 : },
244 583169 : slot_guard,
245 583169 : )
246 583169 : }
247 : }
248 :
249 : /// Identify error types that should alwways terminate the process. Other
250 : /// error types may be elegible for retry.
251 6 : pub(crate) fn is_fatal_io_error(e: &std::io::Error) -> bool {
252 6 : use nix::errno::Errno::*;
253 6 : match e.raw_os_error().map(nix::errno::from_i32) {
254 : Some(EIO) => {
255 : // Terminate on EIO because we no longer trust the device to store
256 : // data safely, or to uphold persistence guarantees on fsync.
257 0 : true
258 : }
259 : Some(EROFS) => {
260 : // Terminate on EROFS because a filesystem is usually remounted
261 : // readonly when it has experienced some critical issue, so the same
262 : // logic as EIO applies.
263 0 : true
264 : }
265 : Some(EACCES) => {
266 : // Terminate on EACCESS because we should always have permissions
267 : // for our own data dir: if we don't, then we can't do our job and
268 : // need administrative intervention to fix permissions. Terminating
269 : // is the best way to make sure we stop cleanly rather than going
270 : // into infinite retry loops, and will make it clear to the outside
271 : // world that we need help.
272 0 : true
273 : }
274 : _ => {
275 : // Treat all other local file I/O errors are retryable. This includes:
276 : // - ENOSPC: we stay up and wait for eviction to free some space
277 : // - EINVAL, EBADF, EBADFD: this is a code bug, not a filesystem/hardware issue
278 : // - WriteZero, Interrupted: these are used internally VirtualFile
279 6 : false
280 : }
281 : }
282 6 : }
283 :
284 : /// Call this when the local filesystem gives us an error with an external
285 : /// cause: this includes EIO, EROFS, and EACCESS: all these indicate either
286 : /// bad storage or bad configuration, and we can't fix that from inside
287 : /// a running process.
288 0 : pub(crate) fn on_fatal_io_error(e: &std::io::Error, context: &str) -> ! {
289 0 : tracing::error!("Fatal I/O error: {e}: {context})");
290 0 : std::process::abort();
291 : }
292 :
293 : pub(crate) trait MaybeFatalIo<T> {
294 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T>;
295 : fn fatal_err(self, context: &str) -> T;
296 : }
297 :
298 : impl<T> MaybeFatalIo<T> for std::io::Result<T> {
299 : /// Terminate the process if the result is an error of a fatal type, else pass it through
300 : ///
301 : /// This is appropriate for writes, where we typically want to die on EIO/ACCES etc, but
302 : /// not on ENOSPC.
303 3407298 : fn maybe_fatal_err(self, context: &str) -> std::io::Result<T> {
304 3407298 : if let Err(e) = &self {
305 6 : if is_fatal_io_error(e) {
306 0 : on_fatal_io_error(e, context);
307 6 : }
308 3407292 : }
309 3407298 : self
310 3407298 : }
311 :
312 : /// Terminate the process on any I/O error.
313 : ///
314 : /// This is appropriate for reads on files that we know exist: they should always work.
315 6114 : fn fatal_err(self, context: &str) -> T {
316 6114 : match self {
317 6114 : Ok(v) => v,
318 0 : Err(e) => {
319 0 : on_fatal_io_error(&e, context);
320 : }
321 : }
322 6114 : }
323 : }
324 :
325 : /// Observe duration for the given storage I/O operation
326 : ///
327 : /// Unlike `observe_closure_duration`, this supports async,
328 : /// where "support" means that we measure wall clock time.
329 : macro_rules! observe_duration {
330 : ($op:expr, $($body:tt)*) => {{
331 : let instant = Instant::now();
332 : let result = $($body)*;
333 : let elapsed = instant.elapsed().as_secs_f64();
334 : STORAGE_IO_TIME_METRIC
335 : .get($op)
336 : .observe(elapsed);
337 : result
338 : }}
339 : }
340 :
341 : macro_rules! with_file {
342 : ($this:expr, $op:expr, | $ident:ident | $($body:tt)*) => {{
343 : let $ident = $this.lock_file().await?;
344 : observe_duration!($op, $($body)*)
345 : }};
346 : ($this:expr, $op:expr, | mut $ident:ident | $($body:tt)*) => {{
347 : let mut $ident = $this.lock_file().await?;
348 : observe_duration!($op, $($body)*)
349 : }};
350 : }
351 :
352 : impl VirtualFile {
353 : /// Open a file in read-only mode. Like File::open.
354 6576 : pub async fn open<P: AsRef<Utf8Path>>(
355 6576 : path: P,
356 6576 : ctx: &RequestContext,
357 6576 : ) -> Result<VirtualFile, std::io::Error> {
358 6576 : Self::open_with_options(path.as_ref(), OpenOptions::new().read(true), ctx).await
359 6576 : }
360 :
361 : /// Create a new file for writing. If the file exists, it will be truncated.
362 : /// Like File::create.
363 4377 : pub async fn create<P: AsRef<Utf8Path>>(
364 4377 : path: P,
365 4377 : ctx: &RequestContext,
366 4377 : ) -> Result<VirtualFile, std::io::Error> {
367 4377 : Self::open_with_options(
368 4377 : path.as_ref(),
369 4377 : OpenOptions::new().write(true).create(true).truncate(true),
370 4377 : ctx,
371 4377 : )
372 2252 : .await
373 4377 : }
374 :
375 : /// Open a file with given options.
376 : ///
377 : /// Note: If any custom flags were set in 'open_options' through OpenOptionsExt,
378 : /// they will be applied also when the file is subsequently re-opened, not only
379 : /// on the first time. Make sure that's sane!
380 17475 : pub async fn open_with_options<P: AsRef<Utf8Path>>(
381 17475 : path: P,
382 17475 : open_options: &OpenOptions,
383 17475 : _ctx: &RequestContext, /* TODO: carry a pointer to the metrics in the RequestContext instead of the parsing https://github.com/neondatabase/neon/issues/6107 */
384 17475 : ) -> Result<VirtualFile, std::io::Error> {
385 17475 : let path_ref = path.as_ref();
386 17475 : let path_str = path_ref.to_string();
387 17475 : let parts = path_str.split('/').collect::<Vec<&str>>();
388 17475 : let (tenant_id, shard_id, timeline_id) =
389 17475 : if parts.len() > 5 && parts[parts.len() - 5] == TENANTS_SEGMENT_NAME {
390 12993 : let tenant_shard_part = parts[parts.len() - 4];
391 12993 : let (tenant_id, shard_id) = match tenant_shard_part.parse::<TenantShardId>() {
392 12993 : Ok(tenant_shard_id) => (
393 12993 : tenant_shard_id.tenant_id.to_string(),
394 12993 : format!("{}", tenant_shard_id.shard_slug()),
395 12993 : ),
396 : Err(_) => {
397 : // Malformed path: this ID is just for observability, so tolerate it
398 : // and pass through
399 0 : (tenant_shard_part.to_string(), "*".to_string())
400 : }
401 : };
402 12993 : (tenant_id, shard_id, parts[parts.len() - 2].to_string())
403 : } else {
404 4482 : ("*".to_string(), "*".to_string(), "*".to_string())
405 : };
406 17475 : let (handle, mut slot_guard) = get_open_files().find_victim_slot().await;
407 :
408 : // NB: there is also StorageIoOperation::OpenAfterReplace which is for the case
409 : // where our caller doesn't get to use the returned VirtualFile before its
410 : // slot gets re-used by someone else.
411 17475 : let file = observe_duration!(StorageIoOperation::Open, {
412 17475 : open_options.open(path_ref.as_std_path()).await?
413 : });
414 :
415 : // Strip all options other than read and write.
416 : //
417 : // It would perhaps be nicer to check just for the read and write flags
418 : // explicitly, but OpenOptions doesn't contain any functions to read flags,
419 : // only to set them.
420 17475 : let mut reopen_options = open_options.clone();
421 17475 : reopen_options.create(false);
422 17475 : reopen_options.create_new(false);
423 17475 : reopen_options.truncate(false);
424 17475 :
425 17475 : let vfile = VirtualFile {
426 17475 : handle: RwLock::new(handle),
427 17475 : pos: 0,
428 17475 : path: path_ref.to_path_buf(),
429 17475 : open_options: reopen_options,
430 17475 : tenant_id,
431 17475 : shard_id,
432 17475 : timeline_id,
433 17475 : };
434 17475 :
435 17475 : // TODO: Under pressure, it's likely the slot will get re-used and
436 17475 : // the underlying file closed before they get around to using it.
437 17475 : // => https://github.com/neondatabase/neon/issues/6065
438 17475 : slot_guard.file.replace(file);
439 17475 :
440 17475 : Ok(vfile)
441 17475 : }
442 :
443 : /// Async version of [`::utils::crashsafe::overwrite`].
444 : ///
445 : /// # NB:
446 : ///
447 : /// Doesn't actually use the [`VirtualFile`] file descriptor cache, but,
448 : /// it did at an earlier time.
449 : /// And it will use this module's [`io_engine`] in the near future, so, leaving it here.
450 84 : pub async fn crashsafe_overwrite<B: BoundedBuf<Buf = Buf> + Send, Buf: IoBuf + Send>(
451 84 : final_path: Utf8PathBuf,
452 84 : tmp_path: Utf8PathBuf,
453 84 : content: B,
454 84 : ) -> std::io::Result<()> {
455 84 : // TODO: use tokio_epoll_uring if configured as `io_engine`.
456 84 : // See https://github.com/neondatabase/neon/issues/6663
457 84 :
458 84 : tokio::task::spawn_blocking(move || {
459 84 : let slice_storage;
460 84 : let content_len = content.bytes_init();
461 84 : let content = if content.bytes_init() > 0 {
462 84 : slice_storage = Some(content.slice(0..content_len));
463 84 : slice_storage.as_deref().expect("just set it to Some()")
464 : } else {
465 0 : &[]
466 : };
467 84 : utils::crashsafe::overwrite(&final_path, &tmp_path, content)
468 84 : })
469 84 : .await
470 84 : .expect("blocking task is never aborted")
471 84 : }
472 :
473 : /// Call File::sync_all() on the underlying File.
474 8115 : pub async fn sync_all(&self) -> Result<(), Error> {
475 8115 : with_file!(self, StorageIoOperation::Fsync, |file_guard| {
476 8115 : let (_file_guard, res) = io_engine::get().sync_all(file_guard).await;
477 8115 : res
478 : })
479 8115 : }
480 :
481 : /// Call File::sync_data() on the underlying File.
482 0 : pub async fn sync_data(&self) -> Result<(), Error> {
483 0 : with_file!(self, StorageIoOperation::Fsync, |file_guard| {
484 0 : let (_file_guard, res) = io_engine::get().sync_data(file_guard).await;
485 0 : res
486 : })
487 0 : }
488 :
489 5106 : pub async fn metadata(&self) -> Result<Metadata, Error> {
490 5106 : with_file!(self, StorageIoOperation::Metadata, |file_guard| {
491 5106 : let (_file_guard, res) = io_engine::get().metadata(file_guard).await;
492 5106 : res
493 : })
494 5106 : }
495 :
496 : /// Helper function internal to `VirtualFile` that looks up the underlying File,
497 : /// opens it and evicts some other File if necessary. The passed parameter is
498 : /// assumed to be a function available for the physical `File`.
499 : ///
500 : /// We are doing it via a macro as Rust doesn't support async closures that
501 : /// take on parameters with lifetimes.
502 5779146 : async fn lock_file(&self) -> Result<FileGuard, Error> {
503 5779146 : let open_files = get_open_files();
504 :
505 565694 : let mut handle_guard = {
506 : // Read the cached slot handle, and see if the slot that it points to still
507 : // contains our File.
508 : //
509 : // We only need to hold the handle lock while we read the current handle. If
510 : // another thread closes the file and recycles the slot for a different file,
511 : // we will notice that the handle we read is no longer valid and retry.
512 5779146 : let mut handle = *self.handle.read().await;
513 6071249 : loop {
514 6071249 : // Check if the slot contains our File
515 6071249 : {
516 6071249 : let slot = &open_files.slots[handle.index];
517 6071249 : let slot_guard = slot.inner.read().await;
518 6071249 : if slot_guard.tag == handle.tag && slot_guard.file.is_some() {
519 : // Found a cached file descriptor.
520 5213452 : slot.recently_used.store(true, Ordering::Relaxed);
521 5213452 : return Ok(FileGuard { slot_guard });
522 857797 : }
523 : }
524 :
525 : // The slot didn't contain our File. We will have to open it ourselves,
526 : // but before that, grab a write lock on handle in the VirtualFile, so
527 : // that no other thread will try to concurrently open the same file.
528 857797 : let handle_guard = self.handle.write().await;
529 :
530 : // If another thread changed the handle while we were not holding the lock,
531 : // then the handle might now be valid again. Loop back to retry.
532 857797 : if *handle_guard != handle {
533 292103 : handle = *handle_guard;
534 292103 : continue;
535 565694 : }
536 565694 : break handle_guard;
537 : }
538 : };
539 :
540 : // We need to open the file ourselves. The handle in the VirtualFile is
541 : // now locked in write-mode. Find a free slot to put it in.
542 565694 : let (handle, mut slot_guard) = open_files.find_victim_slot().await;
543 :
544 : // Re-open the physical file.
545 : // NB: we use StorageIoOperation::OpenAferReplace for this to distinguish this
546 : // case from StorageIoOperation::Open. This helps with identifying thrashing
547 : // of the virtual file descriptor cache.
548 565694 : let file = observe_duration!(StorageIoOperation::OpenAfterReplace, {
549 565694 : self.open_options.open(self.path.as_std_path()).await?
550 : });
551 :
552 : // Store the File in the slot and update the handle in the VirtualFile
553 : // to point to it.
554 565694 : slot_guard.file.replace(file);
555 565694 :
556 565694 : *handle_guard = handle;
557 565694 :
558 565694 : return Ok(FileGuard {
559 565694 : slot_guard: slot_guard.downgrade(),
560 565694 : });
561 5779146 : }
562 :
563 618 : pub fn remove(self) {
564 618 : let path = self.path.clone();
565 618 : drop(self);
566 618 : std::fs::remove_file(path).expect("failed to remove the virtual file");
567 618 : }
568 :
569 15972 : pub async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
570 15972 : match pos {
571 15942 : SeekFrom::Start(offset) => {
572 15942 : self.pos = offset;
573 15942 : }
574 12 : SeekFrom::End(offset) => {
575 12 : self.pos = with_file!(self, StorageIoOperation::Seek, |mut file_guard| file_guard
576 12 : .with_std_file_mut(|std_file| std_file.seek(SeekFrom::End(offset))))?
577 : }
578 18 : SeekFrom::Current(offset) => {
579 18 : let pos = self.pos as i128 + offset as i128;
580 18 : if pos < 0 {
581 6 : return Err(Error::new(
582 6 : ErrorKind::InvalidInput,
583 6 : "offset would be negative",
584 6 : ));
585 12 : }
586 12 : if pos > u64::MAX as i128 {
587 0 : return Err(Error::new(ErrorKind::InvalidInput, "offset overflow"));
588 12 : }
589 12 : self.pos = pos as u64;
590 : }
591 : }
592 15960 : Ok(self.pos)
593 15972 : }
594 :
595 : /// Read the file contents in range `offset..(offset + slice.bytes_total())` into `slice[0..slice.bytes_total()]`.
596 : ///
597 : /// The returned `Slice<Buf>` is equivalent to the input `slice`, i.e., it's the same view into the same buffer.
598 2357349 : pub async fn read_exact_at<Buf>(
599 2357349 : &self,
600 2357349 : slice: Slice<Buf>,
601 2357349 : offset: u64,
602 2357349 : ctx: &RequestContext,
603 2357349 : ) -> Result<Slice<Buf>, Error>
604 2357349 : where
605 2357349 : Buf: IoBufMut + Send,
606 2357349 : {
607 2357349 : let assert_we_return_original_bounds = if cfg!(debug_assertions) {
608 2357349 : Some((slice.stable_ptr() as usize, slice.bytes_total()))
609 : } else {
610 0 : None
611 : };
612 :
613 2357349 : let original_bounds = slice.bounds();
614 2357349 : let (buf, res) =
615 2694849 : read_exact_at_impl(slice, offset, |buf, offset| self.read_at(buf, offset, ctx)).await;
616 2357349 : let res = res.map(|_| buf.slice(original_bounds));
617 :
618 2357349 : if let Some(original_bounds) = assert_we_return_original_bounds {
619 2357349 : if let Ok(slice) = &res {
620 2357349 : let returned_bounds = (slice.stable_ptr() as usize, slice.bytes_total());
621 2357349 : assert_eq!(original_bounds, returned_bounds);
622 0 : }
623 0 : }
624 :
625 2357349 : res
626 2357349 : }
627 :
628 : /// Like [`Self::read_exact_at`] but for [`PageWriteGuard`].
629 155398 : pub async fn read_exact_at_page(
630 155398 : &self,
631 155398 : page: PageWriteGuard<'static>,
632 155398 : offset: u64,
633 155398 : ctx: &RequestContext,
634 155398 : ) -> Result<PageWriteGuard<'static>, Error> {
635 155398 : let buf = PageWriteGuardBuf { page }.slice_full();
636 155398 : debug_assert_eq!(buf.bytes_total(), PAGE_SZ);
637 155398 : self.read_exact_at(buf, offset, ctx)
638 96205 : .await
639 155398 : .map(|slice| slice.into_inner().page)
640 155398 : }
641 :
642 : // Copied from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#219-235
643 12 : pub async fn write_all_at<Buf: IoBuf + Send>(
644 12 : &self,
645 12 : buf: FullSlice<Buf>,
646 12 : mut offset: u64,
647 12 : ctx: &RequestContext,
648 12 : ) -> (FullSlice<Buf>, Result<(), Error>) {
649 12 : let buf = buf.into_raw_slice();
650 12 : let bounds = buf.bounds();
651 12 : let restore =
652 12 : |buf: Slice<_>| FullSlice::must_new(Slice::from_buf_bounds(buf.into_inner(), bounds));
653 12 : let mut buf = buf;
654 24 : while !buf.is_empty() {
655 12 : let (tmp, res) = self.write_at(FullSlice::must_new(buf), offset, ctx).await;
656 12 : buf = tmp.into_raw_slice();
657 0 : match res {
658 : Ok(0) => {
659 0 : return (
660 0 : restore(buf),
661 0 : Err(Error::new(
662 0 : std::io::ErrorKind::WriteZero,
663 0 : "failed to write whole buffer",
664 0 : )),
665 0 : );
666 : }
667 12 : Ok(n) => {
668 12 : buf = buf.slice(n..);
669 12 : offset += n as u64;
670 12 : }
671 0 : Err(e) if e.kind() == std::io::ErrorKind::Interrupted => {}
672 0 : Err(e) => return (restore(buf), Err(e)),
673 : }
674 : }
675 12 : (restore(buf), Ok(()))
676 12 : }
677 :
678 : /// Writes `buf` to the file at the current offset.
679 : ///
680 : /// Panics if there is an uninitialized range in `buf`, as that is most likely a bug in the caller.
681 3407334 : pub async fn write_all<Buf: IoBuf + Send>(
682 3407334 : &mut self,
683 3407334 : buf: FullSlice<Buf>,
684 3407334 : ctx: &RequestContext,
685 3407334 : ) -> (FullSlice<Buf>, Result<usize, Error>) {
686 3407334 : let buf = buf.into_raw_slice();
687 3407334 : let bounds = buf.bounds();
688 3407334 : let restore =
689 3407334 : |buf: Slice<_>| FullSlice::must_new(Slice::from_buf_bounds(buf.into_inner(), bounds));
690 3407334 : let nbytes = buf.len();
691 3407334 : let mut buf = buf;
692 6814548 : while !buf.is_empty() {
693 3407220 : let (tmp, res) = self.write(FullSlice::must_new(buf), ctx).await;
694 3407220 : buf = tmp.into_raw_slice();
695 6 : match res {
696 : Ok(0) => {
697 0 : return (
698 0 : restore(buf),
699 0 : Err(Error::new(
700 0 : std::io::ErrorKind::WriteZero,
701 0 : "failed to write whole buffer",
702 0 : )),
703 0 : );
704 : }
705 3407214 : Ok(n) => {
706 3407214 : buf = buf.slice(n..);
707 3407214 : }
708 6 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
709 6 : Err(e) => return (restore(buf), Err(e)),
710 : }
711 : }
712 3407328 : (restore(buf), Ok(nbytes))
713 3407334 : }
714 :
715 3407220 : async fn write<B: IoBuf + Send>(
716 3407220 : &mut self,
717 3407220 : buf: FullSlice<B>,
718 3407220 : ctx: &RequestContext,
719 3407220 : ) -> (FullSlice<B>, Result<usize, std::io::Error>) {
720 3407220 : let pos = self.pos;
721 3407220 : let (buf, res) = self.write_at(buf, pos, ctx).await;
722 3407220 : let n = match res {
723 3407214 : Ok(n) => n,
724 6 : Err(e) => return (buf, Err(e)),
725 : };
726 3407214 : self.pos += n as u64;
727 3407214 : (buf, Ok(n))
728 3407220 : }
729 :
730 2358681 : pub(crate) async fn read_at<Buf>(
731 2358681 : &self,
732 2358681 : buf: tokio_epoll_uring::Slice<Buf>,
733 2358681 : offset: u64,
734 2358681 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
735 2358681 : ) -> (tokio_epoll_uring::Slice<Buf>, Result<usize, Error>)
736 2358681 : where
737 2358681 : Buf: tokio_epoll_uring::IoBufMut + Send,
738 2358681 : {
739 2358681 : let file_guard = match self.lock_file().await {
740 2358681 : Ok(file_guard) => file_guard,
741 0 : Err(e) => return (buf, Err(e)),
742 : };
743 :
744 2358681 : observe_duration!(StorageIoOperation::Read, {
745 2358681 : let ((_file_guard, buf), res) = io_engine::get().read_at(file_guard, offset, buf).await;
746 2358681 : if let Ok(size) = res {
747 2358675 : STORAGE_IO_SIZE
748 2358675 : .with_label_values(&[
749 2358675 : "read",
750 2358675 : &self.tenant_id,
751 2358675 : &self.shard_id,
752 2358675 : &self.timeline_id,
753 2358675 : ])
754 2358675 : .add(size as i64);
755 2358675 : }
756 2358681 : (buf, res)
757 : })
758 2358681 : }
759 :
760 : /// The function aborts the process if the error is fatal.
761 3407232 : async fn write_at<B: IoBuf + Send>(
762 3407232 : &self,
763 3407232 : buf: FullSlice<B>,
764 3407232 : offset: u64,
765 3407232 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
766 3407232 : ) -> (FullSlice<B>, Result<usize, Error>) {
767 3407232 : let (slice, result) = self.write_at_inner(buf, offset, _ctx).await;
768 3407232 : let result = result.maybe_fatal_err("write_at");
769 3407232 : (slice, result)
770 3407232 : }
771 :
772 3407232 : async fn write_at_inner<B: IoBuf + Send>(
773 3407232 : &self,
774 3407232 : buf: FullSlice<B>,
775 3407232 : offset: u64,
776 3407232 : _ctx: &RequestContext, /* TODO: use for metrics: https://github.com/neondatabase/neon/issues/6107 */
777 3407232 : ) -> (FullSlice<B>, Result<usize, Error>) {
778 3407232 : let file_guard = match self.lock_file().await {
779 3407232 : Ok(file_guard) => file_guard,
780 0 : Err(e) => return (buf, Err(e)),
781 : };
782 3407232 : observe_duration!(StorageIoOperation::Write, {
783 3407232 : let ((_file_guard, buf), result) =
784 3407232 : io_engine::get().write_at(file_guard, offset, buf).await;
785 3407232 : if let Ok(size) = result {
786 3407226 : STORAGE_IO_SIZE
787 3407226 : .with_label_values(&[
788 3407226 : "write",
789 3407226 : &self.tenant_id,
790 3407226 : &self.shard_id,
791 3407226 : &self.timeline_id,
792 3407226 : ])
793 3407226 : .add(size as i64);
794 3407226 : }
795 3407232 : (buf, result)
796 : })
797 3407232 : }
798 : }
799 :
800 : // Adapted from https://doc.rust-lang.org/1.72.0/src/std/os/unix/fs.rs.html#117-135
801 2357373 : pub async fn read_exact_at_impl<Buf, F, Fut>(
802 2357373 : mut buf: tokio_epoll_uring::Slice<Buf>,
803 2357373 : mut offset: u64,
804 2357373 : mut read_at: F,
805 2357373 : ) -> (Buf, std::io::Result<()>)
806 2357373 : where
807 2357373 : Buf: IoBufMut + Send,
808 2357373 : F: FnMut(tokio_epoll_uring::Slice<Buf>, u64) -> Fut,
809 2357373 : Fut: std::future::Future<Output = (tokio_epoll_uring::Slice<Buf>, std::io::Result<usize>)>,
810 2357373 : {
811 4714752 : while buf.bytes_total() != 0 {
812 : let res;
813 2694849 : (buf, res) = read_at(buf, offset).await;
814 0 : match res {
815 6 : Ok(0) => break,
816 2357379 : Ok(n) => {
817 2357379 : buf = buf.slice(n..);
818 2357379 : offset += n as u64;
819 2357379 : }
820 0 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
821 0 : Err(e) => return (buf.into_inner(), Err(e)),
822 : }
823 : }
824 : // NB: don't use `buf.is_empty()` here; it is from the
825 : // `impl Deref for Slice { Target = [u8] }`; the &[u8]
826 : // returned by it only covers the initialized portion of `buf`.
827 : // Whereas we're interested in ensuring that we filled the entire
828 : // buffer that the user passed in.
829 2357373 : if buf.bytes_total() != 0 {
830 6 : (
831 6 : buf.into_inner(),
832 6 : Err(std::io::Error::new(
833 6 : std::io::ErrorKind::UnexpectedEof,
834 6 : "failed to fill whole buffer",
835 6 : )),
836 6 : )
837 : } else {
838 2357367 : assert_eq!(buf.len(), buf.bytes_total());
839 2357367 : (buf.into_inner(), Ok(()))
840 : }
841 2357373 : }
842 :
843 : #[cfg(test)]
844 : mod test_read_exact_at_impl {
845 :
846 : use std::{collections::VecDeque, sync::Arc};
847 :
848 : use tokio_epoll_uring::{BoundedBuf, BoundedBufMut};
849 :
850 : use super::read_exact_at_impl;
851 :
852 : struct Expectation {
853 : offset: u64,
854 : bytes_total: usize,
855 : result: std::io::Result<Vec<u8>>,
856 : }
857 : struct MockReadAt {
858 : expectations: VecDeque<Expectation>,
859 : }
860 :
861 : impl MockReadAt {
862 36 : async fn read_at(
863 36 : &mut self,
864 36 : mut buf: tokio_epoll_uring::Slice<Vec<u8>>,
865 36 : offset: u64,
866 36 : ) -> (tokio_epoll_uring::Slice<Vec<u8>>, std::io::Result<usize>) {
867 36 : let exp = self
868 36 : .expectations
869 36 : .pop_front()
870 36 : .expect("read_at called but we have no expectations left");
871 36 : assert_eq!(exp.offset, offset);
872 36 : assert_eq!(exp.bytes_total, buf.bytes_total());
873 36 : match exp.result {
874 36 : Ok(bytes) => {
875 36 : assert!(bytes.len() <= buf.bytes_total());
876 36 : buf.put_slice(&bytes);
877 36 : (buf, Ok(bytes.len()))
878 : }
879 0 : Err(e) => (buf, Err(e)),
880 : }
881 36 : }
882 : }
883 :
884 : impl Drop for MockReadAt {
885 24 : fn drop(&mut self) {
886 24 : assert_eq!(self.expectations.len(), 0);
887 24 : }
888 : }
889 :
890 : #[tokio::test]
891 6 : async fn test_basic() {
892 6 : let buf = Vec::with_capacity(5).slice_full();
893 6 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
894 6 : expectations: VecDeque::from(vec![Expectation {
895 6 : offset: 0,
896 6 : bytes_total: 5,
897 6 : result: Ok(vec![b'a', b'b', b'c', b'd', b'e']),
898 6 : }]),
899 6 : }));
900 6 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
901 6 : let mock_read_at = Arc::clone(&mock_read_at);
902 6 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
903 6 : })
904 6 : .await;
905 6 : assert!(res.is_ok());
906 6 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd', b'e']);
907 6 : }
908 :
909 : #[tokio::test]
910 6 : async fn test_empty_buf_issues_no_syscall() {
911 6 : let buf = Vec::new().slice_full();
912 6 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
913 6 : expectations: VecDeque::new(),
914 6 : }));
915 6 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
916 0 : let mock_read_at = Arc::clone(&mock_read_at);
917 6 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
918 6 : })
919 6 : .await;
920 6 : assert!(res.is_ok());
921 6 : }
922 :
923 : #[tokio::test]
924 6 : async fn test_two_read_at_calls_needed_until_buf_filled() {
925 6 : let buf = Vec::with_capacity(4).slice_full();
926 6 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
927 6 : expectations: VecDeque::from(vec![
928 6 : Expectation {
929 6 : offset: 0,
930 6 : bytes_total: 4,
931 6 : result: Ok(vec![b'a', b'b']),
932 6 : },
933 6 : Expectation {
934 6 : offset: 2,
935 6 : bytes_total: 2,
936 6 : result: Ok(vec![b'c', b'd']),
937 6 : },
938 6 : ]),
939 6 : }));
940 12 : let (buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
941 12 : let mock_read_at = Arc::clone(&mock_read_at);
942 12 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
943 12 : })
944 6 : .await;
945 6 : assert!(res.is_ok());
946 6 : assert_eq!(buf, vec![b'a', b'b', b'c', b'd']);
947 6 : }
948 :
949 : #[tokio::test]
950 6 : async fn test_eof_before_buffer_full() {
951 6 : let buf = Vec::with_capacity(3).slice_full();
952 6 : let mock_read_at = Arc::new(tokio::sync::Mutex::new(MockReadAt {
953 6 : expectations: VecDeque::from(vec![
954 6 : Expectation {
955 6 : offset: 0,
956 6 : bytes_total: 3,
957 6 : result: Ok(vec![b'a']),
958 6 : },
959 6 : Expectation {
960 6 : offset: 1,
961 6 : bytes_total: 2,
962 6 : result: Ok(vec![b'b']),
963 6 : },
964 6 : Expectation {
965 6 : offset: 2,
966 6 : bytes_total: 1,
967 6 : result: Ok(vec![]),
968 6 : },
969 6 : ]),
970 6 : }));
971 18 : let (_buf, res) = read_exact_at_impl(buf, 0, |buf, offset| {
972 18 : let mock_read_at = Arc::clone(&mock_read_at);
973 18 : async move { mock_read_at.lock().await.read_at(buf, offset).await }
974 18 : })
975 6 : .await;
976 6 : let Err(err) = res else {
977 6 : panic!("should return an error");
978 6 : };
979 6 : assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof);
980 6 : assert_eq!(format!("{err}"), "failed to fill whole buffer");
981 6 : // buffer contents on error are unspecified
982 6 : }
983 : }
984 :
985 : struct FileGuard {
986 : slot_guard: RwLockReadGuard<'static, SlotInner>,
987 : }
988 :
989 : impl AsRef<OwnedFd> for FileGuard {
990 5779146 : fn as_ref(&self) -> &OwnedFd {
991 5779146 : // This unwrap is safe because we only create `FileGuard`s
992 5779146 : // if we know that the file is Some.
993 5779146 : self.slot_guard.file.as_ref().unwrap()
994 5779146 : }
995 : }
996 :
997 : impl FileGuard {
998 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
999 2890804 : fn with_std_file<F, R>(&self, with: F) -> R
1000 2890804 : where
1001 2890804 : F: FnOnce(&File) -> R,
1002 2890804 : {
1003 2890804 : // SAFETY:
1004 2890804 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
1005 2890804 : // - `&` usage below: `self` is `&`, hence Rust typesystem guarantees there are is no `&mut`
1006 2890804 : let file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
1007 2890804 : let res = with(&file);
1008 2890804 : let _ = file.into_raw_fd();
1009 2890804 : res
1010 2890804 : }
1011 : /// Soft deprecation: we'll move VirtualFile to async APIs and remove this function eventually.
1012 12 : fn with_std_file_mut<F, R>(&mut self, with: F) -> R
1013 12 : where
1014 12 : F: FnOnce(&mut File) -> R,
1015 12 : {
1016 12 : // SAFETY:
1017 12 : // - lifetime of the fd: `file` doesn't outlive the OwnedFd stored in `self`.
1018 12 : // - &mut usage below: `self` is `&mut`, hence this call is the only task/thread that has control over the underlying fd
1019 12 : let mut file = unsafe { File::from_raw_fd(self.as_ref().as_raw_fd()) };
1020 12 : let res = with(&mut file);
1021 12 : let _ = file.into_raw_fd();
1022 12 : res
1023 12 : }
1024 : }
1025 :
1026 : impl tokio_epoll_uring::IoFd for FileGuard {
1027 2888330 : unsafe fn as_fd(&self) -> RawFd {
1028 2888330 : let owned_fd: &OwnedFd = self.as_ref();
1029 2888330 : owned_fd.as_raw_fd()
1030 2888330 : }
1031 : }
1032 :
1033 : #[cfg(test)]
1034 : impl VirtualFile {
1035 62748 : pub(crate) async fn read_blk(
1036 62748 : &self,
1037 62748 : blknum: u32,
1038 62748 : ctx: &RequestContext,
1039 62748 : ) -> Result<crate::tenant::block_io::BlockLease<'_>, std::io::Error> {
1040 62748 : use crate::page_cache::PAGE_SZ;
1041 62748 : let slice = Vec::with_capacity(PAGE_SZ).slice_full();
1042 62748 : assert_eq!(slice.bytes_total(), PAGE_SZ);
1043 62748 : let slice = self
1044 62748 : .read_exact_at(slice, blknum as u64 * (PAGE_SZ as u64), ctx)
1045 31857 : .await?;
1046 62748 : Ok(crate::tenant::block_io::BlockLease::Vec(slice.into_inner()))
1047 62748 : }
1048 :
1049 672 : async fn read_to_end(&mut self, buf: &mut Vec<u8>, ctx: &RequestContext) -> Result<(), Error> {
1050 672 : let mut tmp = vec![0; 128];
1051 : loop {
1052 1332 : let slice = tmp.slice(..128);
1053 1332 : let (slice, res) = self.read_at(slice, self.pos, ctx).await;
1054 6 : match res {
1055 666 : Ok(0) => return Ok(()),
1056 660 : Ok(n) => {
1057 660 : self.pos += n as u64;
1058 660 : buf.extend_from_slice(&slice[..n]);
1059 660 : }
1060 6 : Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => {}
1061 6 : Err(e) => return Err(e),
1062 : }
1063 660 : tmp = slice.into_inner();
1064 : }
1065 672 : }
1066 : }
1067 :
1068 : impl Drop for VirtualFile {
1069 : /// If a VirtualFile is dropped, close the underlying file if it was open.
1070 15138 : fn drop(&mut self) {
1071 15138 : let handle = self.handle.get_mut();
1072 15138 :
1073 15138 : fn clean_slot(slot: &Slot, mut slot_guard: RwLockWriteGuard<'_, SlotInner>, tag: u64) {
1074 15138 : if slot_guard.tag == tag {
1075 15138 : slot.recently_used.store(false, Ordering::Relaxed);
1076 15138 : // there is also operation "close-by-replace" for closes done on eviction for
1077 15138 : // comparison.
1078 15138 : if let Some(fd) = slot_guard.file.take() {
1079 13484 : STORAGE_IO_TIME_METRIC
1080 13484 : .get(StorageIoOperation::Close)
1081 13484 : .observe_closure_duration(|| drop(fd));
1082 13484 : }
1083 15138 : }
1084 15138 : }
1085 15138 :
1086 15138 : // We don't have async drop so we cannot directly await the lock here.
1087 15138 : // Instead, first do a best-effort attempt at closing the underlying
1088 15138 : // file descriptor by using `try_write`, and if that fails, spawn
1089 15138 : // a tokio task to do it asynchronously: we just want it to be
1090 15138 : // cleaned up eventually.
1091 15138 : // Most of the time, the `try_lock` should succeed though,
1092 15138 : // as we have `&mut self` access. In other words, if the slot
1093 15138 : // is still occupied by our file, there should be no access from
1094 15138 : // other I/O operations; the only other possible place to lock
1095 15138 : // the slot is the lock algorithm looking for free slots.
1096 15138 : let slot = &get_open_files().slots[handle.index];
1097 15138 : if let Ok(slot_guard) = slot.inner.try_write() {
1098 15138 : clean_slot(slot, slot_guard, handle.tag);
1099 15138 : } else {
1100 0 : let tag = handle.tag;
1101 0 : tokio::spawn(async move {
1102 0 : let slot_guard = slot.inner.write().await;
1103 0 : clean_slot(slot, slot_guard, tag);
1104 0 : });
1105 0 : };
1106 15138 : }
1107 : }
1108 :
1109 : impl OwnedAsyncWriter for VirtualFile {
1110 : #[inline(always)]
1111 19791 : async fn write_all<Buf: IoBuf + Send>(
1112 19791 : &mut self,
1113 19791 : buf: FullSlice<Buf>,
1114 19791 : ctx: &RequestContext,
1115 19791 : ) -> std::io::Result<(usize, FullSlice<Buf>)> {
1116 19791 : let (buf, res) = VirtualFile::write_all(self, buf, ctx).await;
1117 19791 : res.map(move |v| (v, buf))
1118 19791 : }
1119 : }
1120 :
1121 : impl OpenFiles {
1122 594 : fn new(num_slots: usize) -> OpenFiles {
1123 594 : let mut slots = Box::new(Vec::with_capacity(num_slots));
1124 5940 : for _ in 0..num_slots {
1125 5940 : let slot = Slot {
1126 5940 : recently_used: AtomicBool::new(false),
1127 5940 : inner: RwLock::new(SlotInner { tag: 0, file: None }),
1128 5940 : };
1129 5940 : slots.push(slot);
1130 5940 : }
1131 :
1132 594 : OpenFiles {
1133 594 : next: AtomicUsize::new(0),
1134 594 : slots: Box::leak(slots),
1135 594 : }
1136 594 : }
1137 : }
1138 :
1139 : ///
1140 : /// Initialize the virtual file module. This must be called once at page
1141 : /// server startup.
1142 : ///
1143 : #[cfg(not(test))]
1144 0 : pub fn init(num_slots: usize, engine: IoEngineKind, io_buffer_alignment: usize) {
1145 0 : if OPEN_FILES.set(OpenFiles::new(num_slots)).is_err() {
1146 0 : panic!("virtual_file::init called twice");
1147 0 : }
1148 0 : if set_io_buffer_alignment(io_buffer_alignment).is_err() {
1149 0 : panic!("IO buffer alignment ({io_buffer_alignment}) is not a power of two");
1150 0 : }
1151 0 : io_engine::init(engine);
1152 0 : crate::metrics::virtual_file_descriptor_cache::SIZE_MAX.set(num_slots as u64);
1153 0 : }
1154 :
1155 : const TEST_MAX_FILE_DESCRIPTORS: usize = 10;
1156 :
1157 : // Get a handle to the global slots array.
1158 5811759 : fn get_open_files() -> &'static OpenFiles {
1159 5811759 : //
1160 5811759 : // In unit tests, page server startup doesn't happen and no one calls
1161 5811759 : // virtual_file::init(). Initialize it here, with a small array.
1162 5811759 : //
1163 5811759 : // This applies to the virtual file tests below, but all other unit
1164 5811759 : // tests too, so the virtual file facility is always usable in
1165 5811759 : // unit tests.
1166 5811759 : //
1167 5811759 : if cfg!(test) {
1168 5811759 : OPEN_FILES.get_or_init(|| OpenFiles::new(TEST_MAX_FILE_DESCRIPTORS))
1169 : } else {
1170 0 : OPEN_FILES.get().expect("virtual_file::init not called yet")
1171 : }
1172 5811759 : }
1173 :
1174 : static IO_BUFFER_ALIGNMENT: AtomicUsize = AtomicUsize::new(DEFAULT_IO_BUFFER_ALIGNMENT);
1175 :
1176 : /// Returns true if `x` is zero or a power of two.
1177 1265820 : fn is_zero_or_power_of_two(x: usize) -> bool {
1178 1265820 : (x == 0) || ((x & (x - 1)) == 0)
1179 1265820 : }
1180 :
1181 : #[allow(unused)]
1182 0 : pub(crate) fn set_io_buffer_alignment(align: usize) -> Result<(), usize> {
1183 0 : if is_zero_or_power_of_two(align) {
1184 0 : IO_BUFFER_ALIGNMENT.store(align, std::sync::atomic::Ordering::Relaxed);
1185 0 : Ok(())
1186 : } else {
1187 0 : Err(align)
1188 : }
1189 0 : }
1190 :
1191 : /// Gets the io buffer alignment requirement. Returns 0 if there is no requirement specified.
1192 : ///
1193 : /// This function should be used to check the raw config value.
1194 1265820 : pub(crate) fn get_io_buffer_alignment_raw() -> usize {
1195 1265820 : let align = IO_BUFFER_ALIGNMENT.load(std::sync::atomic::Ordering::Relaxed);
1196 1265820 :
1197 1265820 : if cfg!(test) {
1198 1265820 : let env_var_name = "NEON_PAGESERVER_UNIT_TEST_IO_BUFFER_ALIGNMENT";
1199 1265820 : if let Some(test_align) = utils::env::var(env_var_name) {
1200 1265820 : if is_zero_or_power_of_two(test_align) {
1201 1265820 : test_align
1202 : } else {
1203 0 : panic!("IO buffer alignment ({test_align}) is not a power of two");
1204 : }
1205 : } else {
1206 0 : align
1207 : }
1208 : } else {
1209 0 : align
1210 : }
1211 1265820 : }
1212 :
1213 : /// Gets the io buffer alignment requirement. Returns 1 if the alignment config is set to zero.
1214 : ///
1215 : /// This function should be used for getting the actual alignment value to use.
1216 624087 : pub(crate) fn get_io_buffer_alignment() -> usize {
1217 624087 : let align = get_io_buffer_alignment_raw();
1218 624087 : align.max(1)
1219 624087 : }
1220 :
1221 : #[cfg(test)]
1222 : mod tests {
1223 : use crate::context::DownloadBehavior;
1224 : use crate::task_mgr::TaskKind;
1225 :
1226 : use super::*;
1227 : use owned_buffers_io::io_buf_ext::IoBufExt;
1228 : use owned_buffers_io::slice::SliceMutExt;
1229 : use rand::seq::SliceRandom;
1230 : use rand::thread_rng;
1231 : use rand::Rng;
1232 : use std::io::Write;
1233 : use std::os::unix::fs::FileExt;
1234 : use std::sync::Arc;
1235 :
1236 : enum MaybeVirtualFile {
1237 : VirtualFile(VirtualFile),
1238 : File(File),
1239 : }
1240 :
1241 : impl From<VirtualFile> for MaybeVirtualFile {
1242 18 : fn from(vf: VirtualFile) -> Self {
1243 18 : MaybeVirtualFile::VirtualFile(vf)
1244 18 : }
1245 : }
1246 :
1247 : impl MaybeVirtualFile {
1248 1212 : async fn read_exact_at(
1249 1212 : &self,
1250 1212 : mut slice: tokio_epoll_uring::Slice<Vec<u8>>,
1251 1212 : offset: u64,
1252 1212 : ctx: &RequestContext,
1253 1212 : ) -> Result<tokio_epoll_uring::Slice<Vec<u8>>, Error> {
1254 1212 : match self {
1255 608 : MaybeVirtualFile::VirtualFile(file) => file.read_exact_at(slice, offset, ctx).await,
1256 606 : MaybeVirtualFile::File(file) => {
1257 606 : let rust_slice: &mut [u8] = slice.as_mut_rust_slice_full_zeroed();
1258 606 : file.read_exact_at(rust_slice, offset).map(|()| slice)
1259 : }
1260 : }
1261 1212 : }
1262 24 : async fn write_all_at<Buf: IoBuf + Send>(
1263 24 : &self,
1264 24 : buf: FullSlice<Buf>,
1265 24 : offset: u64,
1266 24 : ctx: &RequestContext,
1267 24 : ) -> Result<(), Error> {
1268 24 : match self {
1269 12 : MaybeVirtualFile::VirtualFile(file) => {
1270 12 : let (_buf, res) = file.write_all_at(buf, offset, ctx).await;
1271 12 : res
1272 : }
1273 12 : MaybeVirtualFile::File(file) => file.write_all_at(&buf[..], offset),
1274 : }
1275 24 : }
1276 108 : async fn seek(&mut self, pos: SeekFrom) -> Result<u64, Error> {
1277 108 : match self {
1278 54 : MaybeVirtualFile::VirtualFile(file) => file.seek(pos).await,
1279 54 : MaybeVirtualFile::File(file) => file.seek(pos),
1280 : }
1281 108 : }
1282 24 : async fn write_all<Buf: IoBuf + Send>(
1283 24 : &mut self,
1284 24 : buf: FullSlice<Buf>,
1285 24 : ctx: &RequestContext,
1286 24 : ) -> Result<(), Error> {
1287 24 : match self {
1288 12 : MaybeVirtualFile::VirtualFile(file) => {
1289 12 : let (_buf, res) = file.write_all(buf, ctx).await;
1290 12 : res.map(|_| ())
1291 : }
1292 12 : MaybeVirtualFile::File(file) => file.write_all(&buf[..]),
1293 : }
1294 24 : }
1295 :
1296 : // Helper function to slurp contents of a file, starting at the current position,
1297 : // into a string
1298 1326 : async fn read_string(&mut self, ctx: &RequestContext) -> Result<String, Error> {
1299 1326 : use std::io::Read;
1300 1326 : let mut buf = String::new();
1301 1326 : match self {
1302 672 : MaybeVirtualFile::VirtualFile(file) => {
1303 672 : let mut buf = Vec::new();
1304 678 : file.read_to_end(&mut buf, ctx).await?;
1305 666 : return Ok(String::from_utf8(buf).unwrap());
1306 : }
1307 654 : MaybeVirtualFile::File(file) => {
1308 654 : file.read_to_string(&mut buf)?;
1309 : }
1310 : }
1311 648 : Ok(buf)
1312 1326 : }
1313 :
1314 : // Helper function to slurp a portion of a file into a string
1315 1212 : async fn read_string_at(
1316 1212 : &mut self,
1317 1212 : pos: u64,
1318 1212 : len: usize,
1319 1212 : ctx: &RequestContext,
1320 1212 : ) -> Result<String, Error> {
1321 1212 : let slice = Vec::with_capacity(len).slice_full();
1322 1212 : assert_eq!(slice.bytes_total(), len);
1323 1212 : let slice = self.read_exact_at(slice, pos, ctx).await?;
1324 1212 : let vec = slice.into_inner();
1325 1212 : assert_eq!(vec.len(), len);
1326 1212 : Ok(String::from_utf8(vec).unwrap())
1327 1212 : }
1328 : }
1329 :
1330 : #[tokio::test]
1331 6 : async fn test_virtual_files() -> anyhow::Result<()> {
1332 6 : // The real work is done in the test_files() helper function. This
1333 6 : // allows us to run the same set of tests against a native File, and
1334 6 : // VirtualFile. We trust the native Files and wouldn't need to test them,
1335 6 : // but this allows us to verify that the operations return the same
1336 6 : // results with VirtualFiles as with native Files. (Except that with
1337 6 : // native files, you will run out of file descriptors if the ulimit
1338 6 : // is low enough.)
1339 6 : struct A;
1340 6 :
1341 6 : impl Adapter for A {
1342 618 : async fn open(
1343 618 : path: Utf8PathBuf,
1344 618 : opts: OpenOptions,
1345 618 : ctx: &RequestContext,
1346 618 : ) -> Result<MaybeVirtualFile, anyhow::Error> {
1347 618 : let vf = VirtualFile::open_with_options(&path, &opts, ctx).await?;
1348 618 : Ok(MaybeVirtualFile::VirtualFile(vf))
1349 618 : }
1350 6 : }
1351 1592 : test_files::<A>("virtual_files").await
1352 6 : }
1353 :
1354 : #[tokio::test]
1355 6 : async fn test_physical_files() -> anyhow::Result<()> {
1356 6 : struct B;
1357 6 :
1358 6 : impl Adapter for B {
1359 618 : async fn open(
1360 618 : path: Utf8PathBuf,
1361 618 : opts: OpenOptions,
1362 618 : _ctx: &RequestContext,
1363 618 : ) -> Result<MaybeVirtualFile, anyhow::Error> {
1364 618 : Ok(MaybeVirtualFile::File({
1365 618 : let owned_fd = opts.open(path.as_std_path()).await?;
1366 618 : File::from(owned_fd)
1367 6 : }))
1368 618 : }
1369 6 : }
1370 6 :
1371 312 : test_files::<B>("physical_files").await
1372 6 : }
1373 :
1374 : /// This is essentially a closure which returns a MaybeVirtualFile, but because rust edition
1375 : /// 2024 is not yet out with new lifetime capture or outlives rules, this is a async function
1376 : /// in trait which benefits from the new lifetime capture rules already.
1377 : trait Adapter {
1378 : async fn open(
1379 : path: Utf8PathBuf,
1380 : opts: OpenOptions,
1381 : ctx: &RequestContext,
1382 : ) -> Result<MaybeVirtualFile, anyhow::Error>;
1383 : }
1384 :
1385 12 : async fn test_files<A>(testname: &str) -> anyhow::Result<()>
1386 12 : where
1387 12 : A: Adapter,
1388 12 : {
1389 12 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1390 12 : let testdir = crate::config::PageServerConf::test_repo_dir(testname);
1391 12 : std::fs::create_dir_all(&testdir)?;
1392 :
1393 12 : let path_a = testdir.join("file_a");
1394 12 : let mut file_a = A::open(
1395 12 : path_a.clone(),
1396 12 : OpenOptions::new()
1397 12 : .write(true)
1398 12 : .create(true)
1399 12 : .truncate(true)
1400 12 : .to_owned(),
1401 12 : &ctx,
1402 12 : )
1403 12 : .await?;
1404 12 : file_a
1405 12 : .write_all(b"foobar".to_vec().slice_len(), &ctx)
1406 3 : .await?;
1407 :
1408 : // cannot read from a file opened in write-only mode
1409 12 : let _ = file_a.read_string(&ctx).await.unwrap_err();
1410 :
1411 : // Close the file and re-open for reading
1412 12 : let mut file_a = A::open(path_a, OpenOptions::new().read(true).to_owned(), &ctx).await?;
1413 :
1414 : // cannot write to a file opened in read-only mode
1415 12 : let _ = file_a
1416 12 : .write_all(b"bar".to_vec().slice_len(), &ctx)
1417 3 : .await
1418 12 : .unwrap_err();
1419 12 :
1420 12 : // Try simple read
1421 12 : assert_eq!("foobar", file_a.read_string(&ctx).await?);
1422 :
1423 : // It's positioned at the EOF now.
1424 12 : assert_eq!("", file_a.read_string(&ctx).await?);
1425 :
1426 : // Test seeks.
1427 12 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1428 12 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1429 :
1430 12 : assert_eq!(file_a.seek(SeekFrom::End(-2)).await?, 4);
1431 12 : assert_eq!("ar", file_a.read_string(&ctx).await?);
1432 :
1433 12 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1434 12 : assert_eq!(file_a.seek(SeekFrom::Current(2)).await?, 3);
1435 12 : assert_eq!("bar", file_a.read_string(&ctx).await?);
1436 :
1437 12 : assert_eq!(file_a.seek(SeekFrom::Current(-5)).await?, 1);
1438 12 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1439 :
1440 : // Test erroneous seeks to before byte 0
1441 12 : file_a.seek(SeekFrom::End(-7)).await.unwrap_err();
1442 12 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1443 12 : file_a.seek(SeekFrom::Current(-2)).await.unwrap_err();
1444 12 :
1445 12 : // the erroneous seek should have left the position unchanged
1446 12 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1447 :
1448 : // Create another test file, and try FileExt functions on it.
1449 12 : let path_b = testdir.join("file_b");
1450 12 : let mut file_b = A::open(
1451 12 : path_b.clone(),
1452 12 : OpenOptions::new()
1453 12 : .read(true)
1454 12 : .write(true)
1455 12 : .create(true)
1456 12 : .truncate(true)
1457 12 : .to_owned(),
1458 12 : &ctx,
1459 12 : )
1460 6 : .await?;
1461 12 : file_b
1462 12 : .write_all_at(b"BAR".to_vec().slice_len(), 3, &ctx)
1463 3 : .await?;
1464 12 : file_b
1465 12 : .write_all_at(b"FOO".to_vec().slice_len(), 0, &ctx)
1466 3 : .await?;
1467 :
1468 12 : assert_eq!(file_b.read_string_at(2, 3, &ctx).await?, "OBA");
1469 :
1470 : // Open a lot of files, enough to cause some evictions. (Or to be precise,
1471 : // open the same file many times. The effect is the same.)
1472 : //
1473 : // leave file_a positioned at offset 1 before we start
1474 12 : assert_eq!(file_a.seek(SeekFrom::Start(1)).await?, 1);
1475 :
1476 12 : let mut vfiles = Vec::new();
1477 1212 : for _ in 0..100 {
1478 1200 : let mut vfile = A::open(
1479 1200 : path_b.clone(),
1480 1200 : OpenOptions::new().read(true).to_owned(),
1481 1200 : &ctx,
1482 1200 : )
1483 600 : .await?;
1484 1200 : assert_eq!("FOOBAR", vfile.read_string(&ctx).await?);
1485 1200 : vfiles.push(vfile);
1486 : }
1487 :
1488 : // make sure we opened enough files to definitely cause evictions.
1489 12 : assert!(vfiles.len() > TEST_MAX_FILE_DESCRIPTORS * 2);
1490 :
1491 : // The underlying file descriptor for 'file_a' should be closed now. Try to read
1492 : // from it again. We left the file positioned at offset 1 above.
1493 12 : assert_eq!("oobar", file_a.read_string(&ctx).await?);
1494 :
1495 : // Check that all the other FDs still work too. Use them in random order for
1496 : // good measure.
1497 12 : vfiles.as_mut_slice().shuffle(&mut thread_rng());
1498 1200 : for vfile in vfiles.iter_mut() {
1499 1200 : assert_eq!("OOBAR", vfile.read_string_at(1, 5, &ctx).await?);
1500 : }
1501 :
1502 12 : Ok(())
1503 12 : }
1504 :
1505 : /// Test using VirtualFiles from many threads concurrently. This tests both using
1506 : /// a lot of VirtualFiles concurrently, causing evictions, and also using the same
1507 : /// VirtualFile from multiple threads concurrently.
1508 : #[tokio::test]
1509 6 : async fn test_vfile_concurrency() -> Result<(), Error> {
1510 6 : const SIZE: usize = 8 * 1024;
1511 6 : const VIRTUAL_FILES: usize = 100;
1512 6 : const THREADS: usize = 100;
1513 6 : const SAMPLE: [u8; SIZE] = [0xADu8; SIZE];
1514 6 :
1515 6 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1516 6 : let testdir = crate::config::PageServerConf::test_repo_dir("vfile_concurrency");
1517 6 : std::fs::create_dir_all(&testdir)?;
1518 6 :
1519 6 : // Create a test file.
1520 6 : let test_file_path = testdir.join("concurrency_test_file");
1521 6 : {
1522 6 : let file = File::create(&test_file_path)?;
1523 6 : file.write_all_at(&SAMPLE, 0)?;
1524 6 : }
1525 6 :
1526 6 : // Open the file many times.
1527 6 : let mut files = Vec::new();
1528 606 : for _ in 0..VIRTUAL_FILES {
1529 600 : let f = VirtualFile::open_with_options(
1530 600 : &test_file_path,
1531 600 : OpenOptions::new().read(true),
1532 600 : &ctx,
1533 600 : )
1534 303 : .await?;
1535 600 : files.push(f);
1536 6 : }
1537 6 : let files = Arc::new(files);
1538 6 :
1539 6 : // Launch many threads, and use the virtual files concurrently in random order.
1540 6 : let rt = tokio::runtime::Builder::new_multi_thread()
1541 6 : .worker_threads(THREADS)
1542 6 : .thread_name("test_vfile_concurrency thread")
1543 6 : .build()
1544 6 : .unwrap();
1545 6 : let mut hdls = Vec::new();
1546 606 : for _threadno in 0..THREADS {
1547 600 : let files = files.clone();
1548 600 : let ctx = ctx.detached_child(TaskKind::UnitTest, DownloadBehavior::Error);
1549 600 : let hdl = rt.spawn(async move {
1550 600 : let mut buf = vec![0u8; SIZE];
1551 600 : let mut rng = rand::rngs::OsRng;
1552 600000 : for _ in 1..1000 {
1553 599400 : let f = &files[rng.gen_range(0..files.len())];
1554 599400 : buf = f
1555 599400 : .read_exact_at(buf.slice_full(), 0, &ctx)
1556 1785119 : .await
1557 599400 : .unwrap()
1558 599400 : .into_inner();
1559 599400 : assert!(buf == SAMPLE);
1560 6 : }
1561 600 : });
1562 600 : hdls.push(hdl);
1563 600 : }
1564 606 : for hdl in hdls {
1565 600 : hdl.await?;
1566 6 : }
1567 6 : std::mem::forget(rt);
1568 6 :
1569 6 : Ok(())
1570 6 : }
1571 :
1572 : #[tokio::test]
1573 6 : async fn test_atomic_overwrite_basic() {
1574 6 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1575 6 : let testdir = crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_basic");
1576 6 : std::fs::create_dir_all(&testdir).unwrap();
1577 6 :
1578 6 : let path = testdir.join("myfile");
1579 6 : let tmp_path = testdir.join("myfile.tmp");
1580 6 :
1581 6 : VirtualFile::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1582 6 : .await
1583 6 : .unwrap();
1584 6 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1585 6 : let post = file.read_string(&ctx).await.unwrap();
1586 6 : assert_eq!(post, "foo");
1587 6 : assert!(!tmp_path.exists());
1588 6 : drop(file);
1589 6 :
1590 6 : VirtualFile::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"bar".to_vec())
1591 6 : .await
1592 6 : .unwrap();
1593 6 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1594 6 : let post = file.read_string(&ctx).await.unwrap();
1595 6 : assert_eq!(post, "bar");
1596 6 : assert!(!tmp_path.exists());
1597 6 : drop(file);
1598 6 : }
1599 :
1600 : #[tokio::test]
1601 6 : async fn test_atomic_overwrite_preexisting_tmp() {
1602 6 : let ctx = RequestContext::new(TaskKind::UnitTest, DownloadBehavior::Error);
1603 6 : let testdir =
1604 6 : crate::config::PageServerConf::test_repo_dir("test_atomic_overwrite_preexisting_tmp");
1605 6 : std::fs::create_dir_all(&testdir).unwrap();
1606 6 :
1607 6 : let path = testdir.join("myfile");
1608 6 : let tmp_path = testdir.join("myfile.tmp");
1609 6 :
1610 6 : std::fs::write(&tmp_path, "some preexisting junk that should be removed").unwrap();
1611 6 : assert!(tmp_path.exists());
1612 6 :
1613 6 : VirtualFile::crashsafe_overwrite(path.clone(), tmp_path.clone(), b"foo".to_vec())
1614 6 : .await
1615 6 : .unwrap();
1616 6 :
1617 6 : let mut file = MaybeVirtualFile::from(VirtualFile::open(&path, &ctx).await.unwrap());
1618 6 : let post = file.read_string(&ctx).await.unwrap();
1619 6 : assert_eq!(post, "foo");
1620 6 : assert!(!tmp_path.exists());
1621 6 : drop(file);
1622 6 : }
1623 : }
|