Line data Source code
1 : //! Common traits and structs for layers
2 :
3 : pub mod batch_split_writer;
4 : pub mod delta_layer;
5 : pub mod errors;
6 : pub mod filter_iterator;
7 : pub mod image_layer;
8 : pub mod inmemory_layer;
9 : pub(crate) mod layer;
10 : mod layer_desc;
11 : mod layer_name;
12 : pub mod merge_iterator;
13 :
14 : use std::cmp::Ordering;
15 : use std::collections::hash_map::Entry;
16 : use std::collections::{BinaryHeap, HashMap};
17 : use std::ops::Range;
18 : use std::pin::Pin;
19 : use std::sync::Arc;
20 : use std::sync::atomic::AtomicUsize;
21 : use std::time::{Duration, SystemTime, UNIX_EPOCH};
22 :
23 : use crate::PERF_TRACE_TARGET;
24 : pub use batch_split_writer::{BatchLayerWriter, SplitDeltaLayerWriter, SplitImageLayerWriter};
25 : use bytes::Bytes;
26 : pub use delta_layer::{DeltaLayer, DeltaLayerWriter, ValueRef};
27 : use futures::StreamExt;
28 : use futures::stream::FuturesUnordered;
29 : pub use image_layer::{ImageLayer, ImageLayerWriter};
30 : pub use inmemory_layer::InMemoryLayer;
31 : pub(crate) use layer::{EvictionError, Layer, ResidentLayer};
32 : pub use layer_desc::{PersistentLayerDesc, PersistentLayerKey};
33 : pub use layer_name::{DeltaLayerName, ImageLayerName, LayerName};
34 : use pageserver_api::config::GetVectoredConcurrentIo;
35 : use pageserver_api::key::Key;
36 : use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum};
37 : use tracing::{Instrument, info_span, trace};
38 : use utils::lsn::Lsn;
39 : use utils::sync::gate::GateGuard;
40 : use wal_decoder::models::record::NeonWalRecord;
41 : use wal_decoder::models::value::Value;
42 :
43 : use self::inmemory_layer::InMemoryLayerFileId;
44 : use super::PageReconstructError;
45 : use super::layer_map::InMemoryLayerDesc;
46 : use super::timeline::{GetVectoredError, ReadPath};
47 : use crate::context::{
48 : AccessStatsBehavior, PerfInstrumentFutureExt, RequestContext, RequestContextBuilder,
49 : };
50 :
51 0 : pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool
52 0 : where
53 0 : T: PartialOrd<T>,
54 : {
55 0 : if a.start < b.start {
56 0 : a.end > b.start
57 : } else {
58 0 : b.end > a.start
59 : }
60 0 : }
61 :
62 : /// Struct used to communicate across calls to 'get_value_reconstruct_data'.
63 : ///
64 : /// Before first call, you can fill in 'page_img' if you have an older cached
65 : /// version of the page available. That can save work in
66 : /// 'get_value_reconstruct_data', as it can stop searching for page versions
67 : /// when all the WAL records going back to the cached image have been collected.
68 : ///
69 : /// When get_value_reconstruct_data returns Complete, 'img' is set to an image
70 : /// of the page, or the oldest WAL record in 'records' is a will_init-type
71 : /// record that initializes the page without requiring a previous image.
72 : ///
73 : /// If 'get_page_reconstruct_data' returns Continue, some 'records' may have
74 : /// been collected, but there are more records outside the current layer. Pass
75 : /// the same ValueReconstructState struct in the next 'get_value_reconstruct_data'
76 : /// call, to collect more records.
77 : ///
78 : #[derive(Debug, Default)]
79 : pub(crate) struct ValueReconstructState {
80 : pub(crate) records: Vec<(Lsn, NeonWalRecord)>,
81 : pub(crate) img: Option<(Lsn, Bytes)>,
82 : }
83 :
84 : impl ValueReconstructState {
85 : /// Returns the number of page deltas applied to the page image.
86 727154 : pub fn num_deltas(&self) -> usize {
87 727154 : match self.img {
88 699686 : Some(_) => self.records.len(),
89 27468 : None => self.records.len() - 1, // omit will_init record
90 : }
91 727154 : }
92 : }
93 :
94 : #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
95 : pub(crate) enum ValueReconstructSituation {
96 : Complete,
97 : #[default]
98 : Continue,
99 : }
100 :
101 : /// On disk representation of a value loaded in a buffer
102 : #[derive(Debug)]
103 : pub(crate) enum OnDiskValue {
104 : /// Unencoded [`Value::Image`]
105 : RawImage(Bytes),
106 : /// Encoded [`Value`]. Can deserialize into an image or a WAL record
107 : WalRecordOrImage(Bytes),
108 : }
109 :
110 : /// Reconstruct data accumulated for a single key during a vectored get
111 : #[derive(Debug, Default)]
112 : pub struct VectoredValueReconstructState {
113 : pub(crate) on_disk_values: Vec<(Lsn, OnDiskValueIoWaiter)>,
114 :
115 : pub(crate) situation: ValueReconstructSituation,
116 : }
117 :
118 : #[derive(Debug)]
119 : pub(crate) struct OnDiskValueIoWaiter {
120 : rx: tokio::sync::oneshot::Receiver<OnDiskValueIoResult>,
121 : }
122 :
123 : #[derive(Debug)]
124 : #[must_use]
125 : pub(crate) enum OnDiskValueIo {
126 : /// Traversal identified this IO as required to complete the vectored get.
127 : Required {
128 : num_active_ios: Arc<AtomicUsize>,
129 : tx: tokio::sync::oneshot::Sender<OnDiskValueIoResult>,
130 : },
131 : /// Sparse keyspace reads always read all the values for a given key,
132 : /// even though only the first value is needed.
133 : ///
134 : /// This variant represents the unnecessary IOs for those values at lower LSNs
135 : /// that aren't needed, but are currently still being done.
136 : ///
137 : /// The execution of unnecessary IOs was a pre-existing behavior before concurrent IO.
138 : /// We added this explicit representation here so that we can drop
139 : /// unnecessary IO results immediately, instead of buffering them in
140 : /// `oneshot` channels inside [`VectoredValueReconstructState`] until
141 : /// [`VectoredValueReconstructState::collect_pending_ios`] gets called.
142 : Unnecessary,
143 : }
144 :
145 : type OnDiskValueIoResult = Result<OnDiskValue, std::io::Error>;
146 :
147 : impl OnDiskValueIo {
148 1797620 : pub(crate) fn complete(self, res: OnDiskValueIoResult) {
149 1797620 : match self {
150 1753133 : OnDiskValueIo::Required { num_active_ios, tx } => {
151 1753133 : num_active_ios.fetch_sub(1, std::sync::atomic::Ordering::Release);
152 1753133 : let _ = tx.send(res);
153 1753133 : }
154 44487 : OnDiskValueIo::Unnecessary => {
155 44487 : // Nobody cared, see variant doc comment.
156 44487 : }
157 : }
158 1797620 : }
159 : }
160 :
161 : #[derive(Debug, thiserror::Error)]
162 : pub(crate) enum WaitCompletionError {
163 : #[error("OnDiskValueIo was dropped without completing, likely the sidecar task panicked")]
164 : IoDropped,
165 : }
166 :
167 : impl OnDiskValueIoWaiter {
168 1753132 : pub(crate) async fn wait_completion(self) -> Result<OnDiskValueIoResult, WaitCompletionError> {
169 : // NB: for Unnecessary IOs, this method never gets called because we don't add them to `on_disk_values`.
170 1753132 : self.rx.await.map_err(|_| WaitCompletionError::IoDropped)
171 1753132 : }
172 : }
173 :
174 : impl VectoredValueReconstructState {
175 : /// # Cancel-Safety
176 : ///
177 : /// Technically fine to stop polling this future, but, the IOs will still
178 : /// be executed to completion by the sidecar task and hold on to / consume resources.
179 : /// Better not do it to make reasonsing about the system easier.
180 363612 : pub(crate) async fn collect_pending_ios(
181 363612 : self,
182 363612 : ) -> Result<ValueReconstructState, PageReconstructError> {
183 : use utils::bin_ser::BeSer;
184 :
185 363612 : let mut res = Ok(ValueReconstructState::default());
186 :
187 : // We should try hard not to bail early, so that by the time we return from this
188 : // function, all IO for this value is done. It's not required -- we could totally
189 : // stop polling the IO futures in the sidecar task, they need to support that,
190 : // but just stopping to poll doesn't reduce the IO load on the disk. It's easier
191 : // to reason about the system if we just wait for all IO to complete, even if
192 : // we're no longer interested in the result.
193 : //
194 : // Revisit this when IO futures are replaced with a more sophisticated IO system
195 : // and an IO scheduler, where we know which IOs were submitted and which ones
196 : // just queued. Cf the comment on IoConcurrency::spawn_io.
197 2116744 : for (lsn, waiter) in self.on_disk_values {
198 1753132 : let value_recv_res = waiter
199 1753132 : .wait_completion()
200 1753132 : // we rely on the caller to poll us to completion, so this is not a bail point
201 1753132 : .await;
202 : // Force not bailing early by wrapping the code into a closure.
203 : #[allow(clippy::redundant_closure_call)]
204 1753132 : let _: () = (|| {
205 1753132 : match (&mut res, value_recv_res) {
206 0 : (Err(_), _) => {
207 0 : // We've already failed, no need to process more.
208 0 : }
209 0 : (Ok(_), Err(wait_err)) => {
210 0 : // This shouldn't happen - likely the sidecar task panicked.
211 0 : res = Err(PageReconstructError::Other(wait_err.into()));
212 0 : }
213 0 : (Ok(_), Ok(Err(err))) => {
214 0 : let err: std::io::Error = err;
215 0 : // TODO: returning IO error here will fail a compute query.
216 0 : // Probably not what we want, we're not doing `maybe_fatal_err`
217 0 : // in the IO futures.
218 0 : // But it's been like that for a long time, not changing it
219 0 : // as part of concurrent IO.
220 0 : // => https://github.com/neondatabase/neon/issues/10454
221 0 : res = Err(PageReconstructError::Other(err.into()));
222 0 : }
223 24027 : (Ok(ok), Ok(Ok(OnDiskValue::RawImage(img)))) => {
224 24027 : assert!(ok.img.is_none());
225 24027 : ok.img = Some((lsn, img));
226 : }
227 1729105 : (Ok(ok), Ok(Ok(OnDiskValue::WalRecordOrImage(buf)))) => {
228 1729105 : match Value::des(&buf) {
229 1403254 : Ok(Value::WalRecord(rec)) => {
230 1403254 : ok.records.push((lsn, rec));
231 1403254 : }
232 325851 : Ok(Value::Image(img)) => {
233 325851 : assert!(ok.img.is_none());
234 325851 : ok.img = Some((lsn, img));
235 : }
236 0 : Err(err) => {
237 0 : res = Err(PageReconstructError::Other(err.into()));
238 0 : }
239 : }
240 : }
241 : }
242 : })();
243 : }
244 :
245 363612 : res
246 363612 : }
247 :
248 : /// Benchmarking utility to await for the completion of all pending ios
249 : ///
250 : /// # Cancel-Safety
251 : ///
252 : /// Technically fine to stop polling this future, but, the IOs will still
253 : /// be executed to completion by the sidecar task and hold on to / consume resources.
254 : /// Better not do it to make reasonsing about the system easier.
255 : #[cfg(feature = "benchmarking")]
256 : pub async fn sink_pending_ios(self) -> Result<(), std::io::Error> {
257 : let mut res = Ok(());
258 :
259 : // We should try hard not to bail early, so that by the time we return from this
260 : // function, all IO for this value is done. It's not required -- we could totally
261 : // stop polling the IO futures in the sidecar task, they need to support that,
262 : // but just stopping to poll doesn't reduce the IO load on the disk. It's easier
263 : // to reason about the system if we just wait for all IO to complete, even if
264 : // we're no longer interested in the result.
265 : //
266 : // Revisit this when IO futures are replaced with a more sophisticated IO system
267 : // and an IO scheduler, where we know which IOs were submitted and which ones
268 : // just queued. Cf the comment on IoConcurrency::spawn_io.
269 : for (_lsn, waiter) in self.on_disk_values {
270 : let value_recv_res = waiter
271 : .wait_completion()
272 : // we rely on the caller to poll us to completion, so this is not a bail point
273 : .await;
274 :
275 : match (&mut res, value_recv_res) {
276 : (Err(_), _) => {
277 : // We've already failed, no need to process more.
278 : }
279 : (Ok(_), Err(_wait_err)) => {
280 : // This shouldn't happen - likely the sidecar task panicked.
281 : unreachable!();
282 : }
283 : (Ok(_), Ok(Err(err))) => {
284 : let err: std::io::Error = err;
285 : res = Err(err);
286 : }
287 : (Ok(_ok), Ok(Ok(OnDiskValue::RawImage(_img)))) => {}
288 : (Ok(_ok), Ok(Ok(OnDiskValue::WalRecordOrImage(_buf)))) => {}
289 : }
290 : }
291 :
292 : res
293 : }
294 : }
295 :
296 : /// Bag of data accumulated during a vectored get..
297 : pub struct ValuesReconstructState {
298 : /// The keys will be removed after `get_vectored` completes. The caller outside `Timeline`
299 : /// should not expect to get anything from this hashmap.
300 : pub keys: HashMap<Key, VectoredValueReconstructState>,
301 : /// The keys which are already retrieved
302 : keys_done: KeySpaceRandomAccum,
303 :
304 : /// The keys covered by the image layers
305 : keys_with_image_coverage: Option<Range<Key>>,
306 :
307 : // Statistics that are still accessible as a caller of `get_vectored_impl`.
308 : layers_visited: u32,
309 : delta_layers_visited: u32,
310 :
311 : pub(crate) io_concurrency: IoConcurrency,
312 : num_active_ios: Arc<AtomicUsize>,
313 :
314 : pub(crate) read_path: Option<ReadPath>,
315 : }
316 :
317 : /// The level of IO concurrency to be used on the read path
318 : ///
319 : /// The desired end state is that we always do parallel IO.
320 : /// This struct and the dispatching in the impl will be removed once
321 : /// we've built enough confidence.
322 : pub enum IoConcurrency {
323 : Sequential,
324 : SidecarTask {
325 : task_id: usize,
326 : ios_tx: tokio::sync::mpsc::UnboundedSender<IoFuture>,
327 : },
328 : }
329 :
330 : type IoFuture = Pin<Box<dyn Send + Future<Output = ()>>>;
331 :
332 : pub(crate) enum SelectedIoConcurrency {
333 : Sequential,
334 : SidecarTask(GateGuard),
335 : }
336 :
337 : impl std::fmt::Debug for IoConcurrency {
338 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
339 0 : match self {
340 0 : IoConcurrency::Sequential => write!(f, "Sequential"),
341 0 : IoConcurrency::SidecarTask { .. } => write!(f, "SidecarTask"),
342 : }
343 0 : }
344 : }
345 :
346 : impl std::fmt::Debug for SelectedIoConcurrency {
347 17 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
348 17 : match self {
349 0 : SelectedIoConcurrency::Sequential => write!(f, "Sequential"),
350 17 : SelectedIoConcurrency::SidecarTask(_) => write!(f, "SidecarTask"),
351 : }
352 17 : }
353 : }
354 :
355 : impl IoConcurrency {
356 : /// Force sequential IO. This is a temporary workaround until we have
357 : /// moved plumbing-through-the-call-stack
358 : /// of IoConcurrency into `RequestContextq.
359 : ///
360 : /// DO NOT USE for new code.
361 : ///
362 : /// Tracking issue: <https://github.com/neondatabase/neon/issues/10460>.
363 301320 : pub(crate) fn sequential() -> Self {
364 301320 : Self::spawn(SelectedIoConcurrency::Sequential)
365 301320 : }
366 :
367 321 : pub fn spawn_from_conf(conf: GetVectoredConcurrentIo, gate_guard: GateGuard) -> IoConcurrency {
368 321 : let selected = match conf {
369 0 : GetVectoredConcurrentIo::Sequential => SelectedIoConcurrency::Sequential,
370 321 : GetVectoredConcurrentIo::SidecarTask => SelectedIoConcurrency::SidecarTask(gate_guard),
371 : };
372 321 : Self::spawn(selected)
373 321 : }
374 :
375 301658 : pub(crate) fn spawn(io_concurrency: SelectedIoConcurrency) -> Self {
376 301658 : match io_concurrency {
377 301320 : SelectedIoConcurrency::Sequential => IoConcurrency::Sequential,
378 338 : SelectedIoConcurrency::SidecarTask(gate_guard) => {
379 338 : let (ios_tx, ios_rx) = tokio::sync::mpsc::unbounded_channel();
380 : static TASK_ID: AtomicUsize = AtomicUsize::new(0);
381 338 : let task_id = TASK_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
382 : // TODO: enrich the span with more context (tenant,shard,timeline) + (basebackup|pagestream|...)
383 338 : let span =
384 338 : tracing::info_span!(parent: None, "IoConcurrency_sidecar", task_id = task_id);
385 338 : trace!(task_id, "spawning sidecar task");
386 338 : tokio::spawn(async move {
387 337 : trace!("start");
388 337 : scopeguard::defer!{ trace!("end") };
389 : type IosRx = tokio::sync::mpsc::UnboundedReceiver<IoFuture>;
390 : enum State {
391 : Waiting {
392 : // invariant: is_empty(), but we recycle the allocation
393 : empty_futures: FuturesUnordered<IoFuture>,
394 : ios_rx: IosRx,
395 : },
396 : Executing {
397 : futures: FuturesUnordered<IoFuture>,
398 : ios_rx: IosRx,
399 : },
400 : ShuttingDown {
401 : futures: FuturesUnordered<IoFuture>,
402 : },
403 : }
404 337 : let mut state = State::Waiting {
405 337 : empty_futures: FuturesUnordered::new(),
406 337 : ios_rx,
407 337 : };
408 : loop {
409 23017 : match state {
410 : State::Waiting {
411 10407 : empty_futures,
412 10407 : mut ios_rx,
413 : } => {
414 10407 : assert!(empty_futures.is_empty());
415 10407 : tokio::select! {
416 10407 : fut = ios_rx.recv() => {
417 10388 : if let Some(fut) = fut {
418 10148 : trace!("received new io future");
419 10148 : empty_futures.push(fut);
420 10148 : state = State::Executing { futures: empty_futures, ios_rx };
421 : } else {
422 240 : state = State::ShuttingDown { futures: empty_futures }
423 : }
424 : }
425 : }
426 : }
427 : State::Executing {
428 12292 : mut futures,
429 12292 : mut ios_rx,
430 : } => {
431 12292 : tokio::select! {
432 12292 : res = futures.next() => {
433 11142 : trace!("io future completed");
434 11142 : assert!(res.is_some());
435 11142 : if futures.is_empty() {
436 10070 : state = State::Waiting { empty_futures: futures, ios_rx};
437 10070 : } else {
438 1072 : state = State::Executing { futures, ios_rx };
439 1072 : }
440 : }
441 12292 : fut = ios_rx.recv() => {
442 1150 : if let Some(fut) = fut {
443 1072 : trace!("received new io future");
444 1072 : futures.push(fut);
445 1072 : state = State::Executing { futures, ios_rx};
446 78 : } else {
447 78 : state = State::ShuttingDown { futures };
448 78 : }
449 : }
450 : }
451 : }
452 : State::ShuttingDown {
453 318 : mut futures,
454 : } => {
455 318 : trace!("shutting down");
456 396 : while let Some(()) = futures.next().await {
457 78 : trace!("io future completed (shutdown)");
458 : // drain
459 : }
460 318 : trace!("shutdown complete");
461 318 : break;
462 : }
463 : }
464 : }
465 318 : drop(gate_guard); // drop it right before we exit
466 338 : }.instrument(span));
467 338 : IoConcurrency::SidecarTask { task_id, ios_tx }
468 : }
469 : }
470 301658 : }
471 :
472 : /// Submit an IO to be executed in the background. DEADLOCK RISK, read the full doc string.
473 : ///
474 : /// The IO is represented as an opaque future.
475 : /// IO completion must be handled inside the future, e.g., through a oneshot channel.
476 : ///
477 : /// The API seems simple but there are multiple **pitfalls** involving
478 : /// DEADLOCK RISK.
479 : ///
480 : /// First, there are no guarantees about the exexecution of the IO.
481 : /// It may be `await`ed in-place before this function returns.
482 : /// It may be polled partially by this task and handed off to another task to be finished.
483 : /// It may be polled and then dropped before returning ready.
484 : ///
485 : /// This means that submitted IOs must not be interedependent.
486 : /// Interdependence may be through shared limited resources, e.g.,
487 : /// - VirtualFile file descriptor cache slot acquisition
488 : /// - tokio-epoll-uring slot
489 : ///
490 : /// # Why current usage is safe from deadlocks
491 : ///
492 : /// Textbook condition for a deadlock is that _all_ of the following be given
493 : /// - Mutual exclusion
494 : /// - Hold and wait
495 : /// - No preemption
496 : /// - Circular wait
497 : ///
498 : /// The current usage is safe because:
499 : /// - Mutual exclusion: IO futures definitely use mutexes, no way around that for now
500 : /// - Hold and wait: IO futures currently hold two kinds of locks/resources while waiting
501 : /// for acquisition of other resources:
502 : /// - VirtualFile file descriptor cache slot tokio mutex
503 : /// - tokio-epoll-uring slot (uses tokio notify => wait queue, much like mutex)
504 : /// - No preemption: there's no taking-away of acquired locks/resources => given
505 : /// - Circular wait: this is the part of the condition that isn't met: all IO futures
506 : /// first acquire VirtualFile mutex, then tokio-epoll-uring slot.
507 : /// There is no IO future that acquires slot before VirtualFile.
508 : /// Hence there can be no circular waiting.
509 : /// Hence there cannot be a deadlock.
510 : ///
511 : /// This is a very fragile situation and must be revisited whenver any code called from
512 : /// inside the IO futures is changed.
513 : ///
514 : /// We will move away from opaque IO futures towards well-defined IOs at some point in
515 : /// the future when we have shipped this first version of concurrent IO to production
516 : /// and are ready to retire the Sequential mode which runs the futures in place.
517 : /// Right now, while brittle, the opaque IO approach allows us to ship the feature
518 : /// with minimal changes to the code and minimal changes to existing behavior in Sequential mode.
519 : ///
520 : /// Also read the comment in `collect_pending_ios`.
521 412294 : pub(crate) async fn spawn_io<F>(&mut self, fut: F)
522 412294 : where
523 412294 : F: std::future::Future<Output = ()> + Send + 'static,
524 412294 : {
525 412294 : match self {
526 401074 : IoConcurrency::Sequential => fut.await,
527 11220 : IoConcurrency::SidecarTask { ios_tx, .. } => {
528 11220 : let fut = Box::pin(fut);
529 : // NB: experiments showed that doing an opportunistic poll of `fut` here was bad for throughput
530 : // while insignificant for latency.
531 : // It would make sense to revisit the tokio-epoll-uring API in the future such that we can try
532 : // a submission here, but never poll the future. That way, io_uring can make proccess while
533 : // the future sits in the ios_tx queue.
534 11220 : match ios_tx.send(fut) {
535 11220 : Ok(()) => {}
536 : Err(_) => {
537 0 : unreachable!("the io task must have exited, likely it panicked")
538 : }
539 : }
540 : }
541 : }
542 412294 : }
543 :
544 : #[cfg(test)]
545 17 : pub(crate) fn spawn_for_test() -> impl std::ops::DerefMut<Target = Self> {
546 : use std::ops::{Deref, DerefMut};
547 :
548 : use tracing::info;
549 : use utils::sync::gate::Gate;
550 :
551 : // Spawn needs a Gate, give it one.
552 : struct Wrapper {
553 : inner: IoConcurrency,
554 : #[allow(dead_code)]
555 : gate: Box<Gate>,
556 : }
557 : impl Deref for Wrapper {
558 : type Target = IoConcurrency;
559 :
560 9246 : fn deref(&self) -> &Self::Target {
561 9246 : &self.inner
562 9246 : }
563 : }
564 : impl DerefMut for Wrapper {
565 0 : fn deref_mut(&mut self) -> &mut Self::Target {
566 0 : &mut self.inner
567 0 : }
568 : }
569 17 : let gate = Box::new(Gate::default());
570 :
571 : // The default behavior when running Rust unit tests without any further
572 : // flags is to use the new behavior.
573 : // The CI uses the following environment variable to unit test both old
574 : // and new behavior.
575 : // NB: the Python regression & perf tests take the `else` branch
576 : // below and have their own defaults management.
577 17 : let selected = {
578 : // The pageserver_api::config type is unsuitable because it's internally tagged.
579 0 : #[derive(serde::Deserialize)]
580 : #[serde(rename_all = "kebab-case")]
581 : enum TestOverride {
582 : Sequential,
583 : SidecarTask,
584 : }
585 : use once_cell::sync::Lazy;
586 17 : static TEST_OVERRIDE: Lazy<TestOverride> = Lazy::new(|| {
587 17 : utils::env::var_serde_json_string(
588 17 : "NEON_PAGESERVER_UNIT_TEST_GET_VECTORED_CONCURRENT_IO",
589 : )
590 17 : .unwrap_or(TestOverride::SidecarTask)
591 17 : });
592 :
593 17 : match *TEST_OVERRIDE {
594 0 : TestOverride::Sequential => SelectedIoConcurrency::Sequential,
595 : TestOverride::SidecarTask => {
596 17 : SelectedIoConcurrency::SidecarTask(gate.enter().expect("just created it"))
597 : }
598 : }
599 : };
600 :
601 17 : info!(?selected, "get_vectored_concurrent_io test");
602 :
603 17 : Wrapper {
604 17 : inner: Self::spawn(selected),
605 17 : gate,
606 17 : }
607 17 : }
608 : }
609 :
610 : impl Clone for IoConcurrency {
611 19141 : fn clone(&self) -> Self {
612 19141 : match self {
613 0 : IoConcurrency::Sequential => IoConcurrency::Sequential,
614 19141 : IoConcurrency::SidecarTask { task_id, ios_tx } => IoConcurrency::SidecarTask {
615 19141 : task_id: *task_id,
616 19141 : ios_tx: ios_tx.clone(),
617 19141 : },
618 : }
619 19141 : }
620 : }
621 :
622 : /// Make noise in case the [`ValuesReconstructState`] gets dropped while
623 : /// there are still IOs in flight.
624 : /// Refer to `collect_pending_ios` for why we prefer not to do that.
625 : //
626 : /// We log from here instead of from the sidecar task because the [`ValuesReconstructState`]
627 : /// gets dropped in a tracing span with more context.
628 : /// We repeat the sidecar tasks's `task_id` so we can correlate what we emit here with
629 : /// the logs / panic handler logs from the sidecar task, which also logs the `task_id`.
630 : impl Drop for ValuesReconstructState {
631 312463 : fn drop(&mut self) {
632 312463 : let num_active_ios = self
633 312463 : .num_active_ios
634 312463 : .load(std::sync::atomic::Ordering::Acquire);
635 312463 : if num_active_ios == 0 {
636 312462 : return;
637 1 : }
638 1 : let sidecar_task_id = match &self.io_concurrency {
639 0 : IoConcurrency::Sequential => None,
640 1 : IoConcurrency::SidecarTask { task_id, .. } => Some(*task_id),
641 : };
642 1 : tracing::warn!(
643 : num_active_ios,
644 : ?sidecar_task_id,
645 0 : backtrace=%std::backtrace::Backtrace::force_capture(),
646 0 : "dropping ValuesReconstructState while some IOs have not been completed",
647 : );
648 312463 : }
649 : }
650 :
651 : impl ValuesReconstructState {
652 312463 : pub fn new(io_concurrency: IoConcurrency) -> Self {
653 312463 : Self {
654 312463 : keys: HashMap::new(),
655 312463 : keys_done: KeySpaceRandomAccum::new(),
656 312463 : keys_with_image_coverage: None,
657 312463 : layers_visited: 0,
658 312463 : delta_layers_visited: 0,
659 312463 : io_concurrency,
660 312463 : num_active_ios: Arc::new(AtomicUsize::new(0)),
661 312463 : read_path: None,
662 312463 : }
663 312463 : }
664 :
665 : /// Absolutely read [`IoConcurrency::spawn_io`] to learn about assumptions & pitfalls.
666 412294 : pub(crate) async fn spawn_io<F>(&mut self, fut: F)
667 412294 : where
668 412294 : F: std::future::Future<Output = ()> + Send + 'static,
669 412294 : {
670 412294 : self.io_concurrency.spawn_io(fut).await;
671 412294 : }
672 :
673 446449 : pub(crate) fn on_layer_visited(&mut self, layer: &ReadableLayer) {
674 446449 : self.layers_visited += 1;
675 446449 : if let ReadableLayer::PersistentLayer(layer) = layer {
676 139128 : if layer.layer_desc().is_delta() {
677 125008 : self.delta_layers_visited += 1;
678 125008 : }
679 307321 : }
680 446449 : }
681 :
682 173 : pub(crate) fn get_delta_layers_visited(&self) -> u32 {
683 173 : self.delta_layers_visited
684 173 : }
685 :
686 312448 : pub(crate) fn get_layers_visited(&self) -> u32 {
687 312448 : self.layers_visited
688 312448 : }
689 :
690 : /// On hitting image layer, we can mark all keys in this range as done, because
691 : /// if the image layer does not contain a key, it is deleted/never added.
692 14126 : pub(crate) fn on_image_layer_visited(&mut self, key_range: &Range<Key>) {
693 14126 : let prev_val = self.keys_with_image_coverage.replace(key_range.clone());
694 14126 : assert_eq!(
695 : prev_val, None,
696 0 : "should consume the keyspace before the next iteration"
697 : );
698 14126 : }
699 :
700 : /// Update the state collected for a given key.
701 : /// Returns true if this was the last value needed for the key and false otherwise.
702 : ///
703 : /// If the key is done after the update, mark it as such.
704 : ///
705 : /// If the key is in the sparse keyspace (i.e., aux files), we do not track them in
706 : /// `key_done`.
707 : // TODO: rename this method & update description.
708 1797620 : pub(crate) fn update_key(&mut self, key: &Key, lsn: Lsn, completes: bool) -> OnDiskValueIo {
709 1797620 : let state = self.keys.entry(*key).or_default();
710 :
711 1797620 : let is_sparse_key = key.is_sparse();
712 :
713 1797620 : let required_io = match state.situation {
714 : ValueReconstructSituation::Complete => {
715 44487 : if is_sparse_key {
716 : // Sparse keyspace might be visited multiple times because
717 : // we don't track unmapped keyspaces.
718 44487 : return OnDiskValueIo::Unnecessary;
719 : } else {
720 0 : unreachable!()
721 : }
722 : }
723 : ValueReconstructSituation::Continue => {
724 1753133 : self.num_active_ios
725 1753133 : .fetch_add(1, std::sync::atomic::Ordering::Release);
726 1753133 : let (tx, rx) = tokio::sync::oneshot::channel();
727 1753133 : state.on_disk_values.push((lsn, OnDiskValueIoWaiter { rx }));
728 1753133 : OnDiskValueIo::Required {
729 1753133 : tx,
730 1753133 : num_active_ios: Arc::clone(&self.num_active_ios),
731 1753133 : }
732 : }
733 : };
734 :
735 1753133 : if completes && state.situation == ValueReconstructSituation::Continue {
736 363613 : state.situation = ValueReconstructSituation::Complete;
737 363613 : if !is_sparse_key {
738 331727 : self.keys_done.add_key(*key);
739 331727 : }
740 1389520 : }
741 :
742 1753133 : required_io
743 1797620 : }
744 :
745 : /// Returns the key space describing the keys that have
746 : /// been marked as completed since the last call to this function.
747 : /// Returns individual keys done, and the image layer coverage.
748 446449 : pub(crate) fn consume_done_keys(&mut self) -> (KeySpace, Option<Range<Key>>) {
749 446449 : (
750 446449 : self.keys_done.consume_keyspace(),
751 446449 : self.keys_with_image_coverage.take(),
752 446449 : )
753 446449 : }
754 : }
755 :
756 : /// A key that uniquely identifies a layer in a timeline
757 : #[derive(Debug, PartialEq, Eq, Clone, Hash)]
758 : pub(crate) enum LayerId {
759 : PersitentLayerId(PersistentLayerKey),
760 : InMemoryLayerId(InMemoryLayerFileId),
761 : }
762 :
763 : /// Uniquely identify a layer visit by the layer
764 : /// and LSN range of the reads. Note that the end of the range is exclusive.
765 : ///
766 : /// The layer itself is not enough since we may have different LSN lower
767 : /// bounds for delta layer reads. Scenarios where this can happen are:
768 : ///
769 : /// 1. Layer overlaps: imagine an image layer inside and in-memory layer
770 : /// and a query that only partially hits the image layer. Part of the query
771 : /// needs to read the whole in-memory layer and the other part needs to read
772 : /// only up to the image layer. Hence, they'll have different LSN floor values
773 : /// for the read.
774 : ///
775 : /// 2. Scattered reads: the read path supports starting at different LSNs. Imagine
776 : /// The start LSN for one range is inside a layer and the start LSN for another range
777 : /// Is above the layer (includes all of it). Both ranges need to read the layer all the
778 : /// Way to the end but starting at different points. Hence, they'll have different LSN
779 : /// Ceil values.
780 : ///
781 : /// The implication is that we might visit the same layer multiple times
782 : /// in order to read different LSN ranges from it. In practice, this isn't very concerning
783 : /// because:
784 : /// 1. Layer overlaps are rare and generally not intended
785 : /// 2. Scattered reads will stabilise after the first few layers provided their starting LSNs
786 : /// are grouped tightly enough (likely the case).
787 : #[derive(Debug, PartialEq, Eq, Clone, Hash)]
788 : struct LayerToVisitId {
789 : layer_id: LayerId,
790 : lsn_floor: Lsn,
791 : lsn_ceil: Lsn,
792 : }
793 :
794 : #[derive(Debug, PartialEq, Eq, Hash)]
795 : pub enum ReadableLayerWeak {
796 : PersistentLayer(Arc<PersistentLayerDesc>),
797 : InMemoryLayer(InMemoryLayerDesc),
798 : }
799 :
800 : /// Layer wrapper for the read path. Note that it is valid
801 : /// to use these layers even after external operations have
802 : /// been performed on them (compaction, freeze, etc.).
803 : #[derive(Debug)]
804 : pub(crate) enum ReadableLayer {
805 : PersistentLayer(Layer),
806 : InMemoryLayer(Arc<InMemoryLayer>),
807 : }
808 :
809 : /// A partial description of a read to be done.
810 : #[derive(Debug, Clone)]
811 : struct LayerVisit {
812 : /// An id used to resolve the readable layer within the fringe
813 : layer_to_visit_id: LayerToVisitId,
814 : /// Lsn range for the read, used for selecting the next read
815 : lsn_range: Range<Lsn>,
816 : }
817 :
818 : /// Data structure which maintains a fringe of layers for the
819 : /// read path. The fringe is the set of layers which intersects
820 : /// the current keyspace that the search is descending on.
821 : /// Each layer tracks the keyspace that intersects it.
822 : ///
823 : /// The fringe must appear sorted by Lsn. Hence, it uses
824 : /// a two layer indexing scheme.
825 : #[derive(Debug)]
826 : pub(crate) struct LayerFringe {
827 : planned_visits_by_lsn: BinaryHeap<LayerVisit>,
828 : visit_reads: HashMap<LayerToVisitId, LayerVisitReads>,
829 : }
830 :
831 : #[derive(Debug)]
832 : struct LayerVisitReads {
833 : layer: ReadableLayer,
834 : target_keyspace: KeySpaceRandomAccum,
835 : }
836 :
837 : impl LayerFringe {
838 427092 : pub(crate) fn new() -> Self {
839 427092 : LayerFringe {
840 427092 : planned_visits_by_lsn: BinaryHeap::new(),
841 427092 : visit_reads: HashMap::new(),
842 427092 : }
843 427092 : }
844 :
845 873541 : pub(crate) fn next_layer(&mut self) -> Option<(ReadableLayer, KeySpace, Range<Lsn>)> {
846 873541 : let read_desc = self.planned_visits_by_lsn.pop()?;
847 :
848 446449 : let removed = self.visit_reads.remove_entry(&read_desc.layer_to_visit_id);
849 :
850 446449 : match removed {
851 : Some((
852 : _,
853 : LayerVisitReads {
854 446449 : layer,
855 446449 : mut target_keyspace,
856 : },
857 446449 : )) => Some((
858 446449 : layer,
859 446449 : target_keyspace.consume_keyspace(),
860 446449 : read_desc.lsn_range,
861 446449 : )),
862 0 : None => unreachable!("fringe internals are always consistent"),
863 : }
864 873541 : }
865 :
866 460844 : pub(crate) fn update(
867 460844 : &mut self,
868 460844 : layer: ReadableLayer,
869 460844 : keyspace: KeySpace,
870 460844 : lsn_range: Range<Lsn>,
871 460844 : ) {
872 460844 : let layer_to_visit_id = LayerToVisitId {
873 460844 : layer_id: layer.id(),
874 460844 : lsn_floor: lsn_range.start,
875 460844 : lsn_ceil: lsn_range.end,
876 460844 : };
877 :
878 460844 : let entry = self.visit_reads.entry(layer_to_visit_id.clone());
879 460844 : match entry {
880 14395 : Entry::Occupied(mut entry) => {
881 14395 : entry.get_mut().target_keyspace.add_keyspace(keyspace);
882 14395 : }
883 446449 : Entry::Vacant(entry) => {
884 446449 : self.planned_visits_by_lsn.push(LayerVisit {
885 446449 : lsn_range,
886 446449 : layer_to_visit_id: layer_to_visit_id.clone(),
887 446449 : });
888 446449 : let mut accum = KeySpaceRandomAccum::new();
889 446449 : accum.add_keyspace(keyspace);
890 446449 : entry.insert(LayerVisitReads {
891 446449 : layer,
892 446449 : target_keyspace: accum,
893 446449 : });
894 446449 : }
895 : }
896 460844 : }
897 : }
898 :
899 : impl Default for LayerFringe {
900 0 : fn default() -> Self {
901 0 : Self::new()
902 0 : }
903 : }
904 :
905 : impl Ord for LayerVisit {
906 89247 : fn cmp(&self, other: &Self) -> Ordering {
907 89247 : let ord = self.lsn_range.end.cmp(&other.lsn_range.end);
908 89247 : if ord == std::cmp::Ordering::Equal {
909 7221 : self.lsn_range.start.cmp(&other.lsn_range.start).reverse()
910 : } else {
911 82026 : ord
912 : }
913 89247 : }
914 : }
915 :
916 : impl PartialOrd for LayerVisit {
917 89247 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
918 89247 : Some(self.cmp(other))
919 89247 : }
920 : }
921 :
922 : impl PartialEq for LayerVisit {
923 0 : fn eq(&self, other: &Self) -> bool {
924 0 : self.lsn_range == other.lsn_range
925 0 : }
926 : }
927 :
928 : impl Eq for LayerVisit {}
929 :
930 : impl ReadableLayer {
931 460844 : pub(crate) fn id(&self) -> LayerId {
932 460844 : match self {
933 149679 : Self::PersistentLayer(layer) => LayerId::PersitentLayerId(layer.layer_desc().key()),
934 311165 : Self::InMemoryLayer(layer) => LayerId::InMemoryLayerId(layer.file_id()),
935 : }
936 460844 : }
937 :
938 446449 : pub(crate) async fn get_values_reconstruct_data(
939 446449 : &self,
940 446449 : keyspace: KeySpace,
941 446449 : lsn_range: Range<Lsn>,
942 446449 : reconstruct_state: &mut ValuesReconstructState,
943 446449 : ctx: &RequestContext,
944 446449 : ) -> Result<(), GetVectoredError> {
945 446449 : match self {
946 139128 : ReadableLayer::PersistentLayer(layer) => {
947 139128 : let ctx = RequestContextBuilder::from(ctx)
948 139128 : .perf_span(|crnt_perf_span| {
949 0 : info_span!(
950 : target: PERF_TRACE_TARGET,
951 0 : parent: crnt_perf_span,
952 : "PLAN_LAYER",
953 : layer = %layer
954 : )
955 0 : })
956 139128 : .attached_child();
957 :
958 139128 : layer
959 139128 : .get_values_reconstruct_data(keyspace, lsn_range, reconstruct_state, &ctx)
960 139128 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
961 139128 : .await
962 : }
963 307321 : ReadableLayer::InMemoryLayer(layer) => {
964 307321 : let ctx = RequestContextBuilder::from(ctx)
965 307321 : .perf_span(|crnt_perf_span| {
966 0 : info_span!(
967 : target: PERF_TRACE_TARGET,
968 0 : parent: crnt_perf_span,
969 : "PLAN_LAYER",
970 : layer = %layer
971 : )
972 0 : })
973 307321 : .attached_child();
974 :
975 307321 : layer
976 307321 : .get_values_reconstruct_data(keyspace, lsn_range, reconstruct_state, &ctx)
977 307321 : .maybe_perf_instrument(&ctx, |crnt_perf_span| crnt_perf_span.clone())
978 307321 : .await
979 : }
980 : }
981 446449 : }
982 : }
983 :
984 : /// Layers contain a hint indicating whether they are likely to be used for reads.
985 : ///
986 : /// This is a hint rather than an authoritative value, so that we do not have to update it synchronously
987 : /// when changing the visibility of layers (for example when creating a branch that makes some previously
988 : /// covered layers visible). It should be used for cache management but not for correctness-critical checks.
989 : #[derive(Debug, Clone, PartialEq, Eq)]
990 : pub enum LayerVisibilityHint {
991 : /// A Visible layer might be read while serving a read, because there is not an image layer between it
992 : /// and a readable LSN (the tip of the branch or a child's branch point)
993 : Visible,
994 : /// A Covered layer probably won't be read right now, but _can_ be read in future if someone creates
995 : /// a branch or ephemeral endpoint at an LSN below the layer that covers this.
996 : Covered,
997 : }
998 :
999 : pub(crate) struct LayerAccessStats(std::sync::atomic::AtomicU64);
1000 :
1001 : #[derive(Clone, Copy, strum_macros::EnumString)]
1002 : pub(crate) enum LayerAccessStatsReset {
1003 : NoReset,
1004 : AllStats,
1005 : }
1006 :
1007 : impl Default for LayerAccessStats {
1008 991 : fn default() -> Self {
1009 : // Default value is to assume resident since creation time, and visible.
1010 991 : let (_mask, mut value) = Self::to_low_res_timestamp(Self::RTIME_SHIFT, SystemTime::now());
1011 991 : value |= 0x1 << Self::VISIBILITY_SHIFT;
1012 :
1013 991 : Self(std::sync::atomic::AtomicU64::new(value))
1014 991 : }
1015 : }
1016 :
1017 : // Efficient store of two very-low-resolution timestamps and some bits. Used for storing last access time and
1018 : // last residence change time.
1019 : impl LayerAccessStats {
1020 : // How many high bits to drop from a u32 timestamp?
1021 : // - Only storing up to a u32 timestamp will work fine until 2038 (if this code is still in use
1022 : // after that, this software has been very successful!)
1023 : // - Dropping the top bit is implicitly safe because unix timestamps are meant to be
1024 : // stored in an i32, so they never used it.
1025 : // - Dropping the next two bits is safe because this code is only running on systems in
1026 : // years >= 2024, and these bits have been 1 since 2021
1027 : //
1028 : // Therefore we may store only 28 bits for a timestamp with one second resolution. We do
1029 : // this truncation to make space for some flags in the high bits of our u64.
1030 : const TS_DROP_HIGH_BITS: u32 = u32::count_ones(Self::TS_ONES) + 1;
1031 : const TS_MASK: u32 = 0x1f_ff_ff_ff;
1032 : const TS_ONES: u32 = 0x60_00_00_00;
1033 :
1034 : const ATIME_SHIFT: u32 = 0;
1035 : const RTIME_SHIFT: u32 = 32 - Self::TS_DROP_HIGH_BITS;
1036 : const VISIBILITY_SHIFT: u32 = 64 - 2 * Self::TS_DROP_HIGH_BITS;
1037 :
1038 137962 : fn write_bits(&self, mask: u64, value: u64) -> u64 {
1039 137962 : self.0
1040 137962 : .fetch_update(
1041 : // TODO: decide what orderings are correct
1042 137962 : std::sync::atomic::Ordering::Relaxed,
1043 137962 : std::sync::atomic::Ordering::Relaxed,
1044 137962 : |v| Some((v & !mask) | (value & mask)),
1045 : )
1046 137962 : .expect("Inner function is infallible")
1047 137962 : }
1048 :
1049 138756 : fn to_low_res_timestamp(shift: u32, time: SystemTime) -> (u64, u64) {
1050 : // Drop the low three bits of the timestamp, for an ~8s accuracy
1051 138756 : let timestamp = time.duration_since(UNIX_EPOCH).unwrap().as_secs() & (Self::TS_MASK as u64);
1052 :
1053 138756 : ((Self::TS_MASK as u64) << shift, timestamp << shift)
1054 138756 : }
1055 :
1056 73 : fn read_low_res_timestamp(&self, shift: u32) -> Option<SystemTime> {
1057 73 : let read = self.0.load(std::sync::atomic::Ordering::Relaxed);
1058 :
1059 73 : let ts_bits = (read & ((Self::TS_MASK as u64) << shift)) >> shift;
1060 73 : if ts_bits == 0 {
1061 33 : None
1062 : } else {
1063 40 : Some(UNIX_EPOCH + Duration::from_secs(ts_bits | (Self::TS_ONES as u64)))
1064 : }
1065 73 : }
1066 :
1067 : /// Record a change in layer residency.
1068 : ///
1069 : /// Recording the event must happen while holding the layer map lock to
1070 : /// ensure that latest-activity-threshold-based layer eviction (eviction_task.rs)
1071 : /// can do an "imitate access" to this layer, before it observes `now-latest_activity() > threshold`.
1072 : ///
1073 : /// If we instead recorded the residence event with a timestamp from before grabbing the layer map lock,
1074 : /// the following race could happen:
1075 : ///
1076 : /// - Compact: Write out an L1 layer from several L0 layers. This records residence event LayerCreate with the current timestamp.
1077 : /// - Eviction: imitate access logical size calculation. This accesses the L0 layers because the L1 layer is not yet in the layer map.
1078 : /// - Compact: Grab layer map lock, add the new L1 to layer map and remove the L0s, release layer map lock.
1079 : /// - Eviction: observes the new L1 layer whose only activity timestamp is the LayerCreate event.
1080 25 : pub(crate) fn record_residence_event_at(&self, now: SystemTime) {
1081 25 : let (mask, value) = Self::to_low_res_timestamp(Self::RTIME_SHIFT, now);
1082 25 : self.write_bits(mask, value);
1083 25 : }
1084 :
1085 24 : pub(crate) fn record_residence_event(&self) {
1086 24 : self.record_residence_event_at(SystemTime::now())
1087 24 : }
1088 :
1089 137740 : fn record_access_at(&self, now: SystemTime) -> bool {
1090 137740 : let (mut mask, mut value) = Self::to_low_res_timestamp(Self::ATIME_SHIFT, now);
1091 :
1092 : // A layer which is accessed must be visible.
1093 137740 : mask |= 0x1 << Self::VISIBILITY_SHIFT;
1094 137740 : value |= 0x1 << Self::VISIBILITY_SHIFT;
1095 :
1096 137740 : let old_bits = self.write_bits(mask, value);
1097 1 : !matches!(
1098 137740 : self.decode_visibility(old_bits),
1099 : LayerVisibilityHint::Visible
1100 : )
1101 137740 : }
1102 :
1103 : /// Returns true if we modified the layer's visibility to set it to Visible implicitly
1104 : /// as a result of this access
1105 139134 : pub(crate) fn record_access(&self, ctx: &RequestContext) -> bool {
1106 139134 : if ctx.access_stats_behavior() == AccessStatsBehavior::Skip {
1107 1397 : return false;
1108 137737 : }
1109 :
1110 137737 : self.record_access_at(SystemTime::now())
1111 139134 : }
1112 :
1113 0 : fn as_api_model(
1114 0 : &self,
1115 0 : reset: LayerAccessStatsReset,
1116 0 : ) -> pageserver_api::models::LayerAccessStats {
1117 0 : let ret = pageserver_api::models::LayerAccessStats {
1118 0 : access_time: self
1119 0 : .read_low_res_timestamp(Self::ATIME_SHIFT)
1120 0 : .unwrap_or(UNIX_EPOCH),
1121 0 : residence_time: self
1122 0 : .read_low_res_timestamp(Self::RTIME_SHIFT)
1123 0 : .unwrap_or(UNIX_EPOCH),
1124 0 : visible: matches!(self.visibility(), LayerVisibilityHint::Visible),
1125 : };
1126 0 : match reset {
1127 0 : LayerAccessStatsReset::NoReset => {}
1128 0 : LayerAccessStatsReset::AllStats => {
1129 0 : self.write_bits((Self::TS_MASK as u64) << Self::ATIME_SHIFT, 0x0);
1130 0 : self.write_bits((Self::TS_MASK as u64) << Self::RTIME_SHIFT, 0x0);
1131 0 : }
1132 : }
1133 0 : ret
1134 0 : }
1135 :
1136 : /// Get the latest access timestamp, falling back to latest residence event. The latest residence event
1137 : /// will be this Layer's construction time, if its residence hasn't changed since then.
1138 21 : pub(crate) fn latest_activity(&self) -> SystemTime {
1139 21 : if let Some(t) = self.read_low_res_timestamp(Self::ATIME_SHIFT) {
1140 3 : t
1141 : } else {
1142 18 : self.read_low_res_timestamp(Self::RTIME_SHIFT)
1143 18 : .expect("Residence time is set on construction")
1144 : }
1145 21 : }
1146 :
1147 : /// Whether this layer has been accessed (excluding in [`AccessStatsBehavior::Skip`]).
1148 : ///
1149 : /// This indicates whether the layer has been used for some purpose that would motivate
1150 : /// us to keep it on disk, such as for serving a getpage request.
1151 17 : fn accessed(&self) -> bool {
1152 : // Consider it accessed if the most recent access is more recent than
1153 : // the most recent change in residence status.
1154 : match (
1155 17 : self.read_low_res_timestamp(Self::ATIME_SHIFT),
1156 17 : self.read_low_res_timestamp(Self::RTIME_SHIFT),
1157 : ) {
1158 15 : (None, _) => false,
1159 0 : (Some(_), None) => true,
1160 2 : (Some(a), Some(r)) => a >= r,
1161 : }
1162 17 : }
1163 :
1164 : /// Helper for extracting the visibility hint from the literal value of our inner u64
1165 138341 : fn decode_visibility(&self, bits: u64) -> LayerVisibilityHint {
1166 138341 : match (bits >> Self::VISIBILITY_SHIFT) & 0x1 {
1167 138334 : 1 => LayerVisibilityHint::Visible,
1168 7 : 0 => LayerVisibilityHint::Covered,
1169 0 : _ => unreachable!(),
1170 : }
1171 138341 : }
1172 :
1173 : /// Returns the old value which has been replaced
1174 197 : pub(crate) fn set_visibility(&self, visibility: LayerVisibilityHint) -> LayerVisibilityHint {
1175 197 : let value = match visibility {
1176 180 : LayerVisibilityHint::Visible => 0x1 << Self::VISIBILITY_SHIFT,
1177 17 : LayerVisibilityHint::Covered => 0x0,
1178 : };
1179 :
1180 197 : let old_bits = self.write_bits(0x1 << Self::VISIBILITY_SHIFT, value);
1181 197 : self.decode_visibility(old_bits)
1182 197 : }
1183 :
1184 404 : pub(crate) fn visibility(&self) -> LayerVisibilityHint {
1185 404 : let read = self.0.load(std::sync::atomic::Ordering::Relaxed);
1186 404 : self.decode_visibility(read)
1187 404 : }
1188 : }
1189 :
1190 : /// Get a layer descriptor from a layer.
1191 : pub(crate) trait AsLayerDesc {
1192 : /// Get the layer descriptor.
1193 : fn layer_desc(&self) -> &PersistentLayerDesc;
1194 : }
1195 :
1196 : pub mod tests {
1197 : use pageserver_api::shard::TenantShardId;
1198 : use utils::id::TimelineId;
1199 :
1200 : use super::*;
1201 :
1202 : impl From<DeltaLayerName> for PersistentLayerDesc {
1203 11 : fn from(value: DeltaLayerName) -> Self {
1204 11 : PersistentLayerDesc::new_delta(
1205 11 : TenantShardId::from([0; 18]),
1206 11 : TimelineId::from_array([0; 16]),
1207 11 : value.key_range,
1208 11 : value.lsn_range,
1209 : 233,
1210 : )
1211 11 : }
1212 : }
1213 :
1214 : impl From<ImageLayerName> for PersistentLayerDesc {
1215 12 : fn from(value: ImageLayerName) -> Self {
1216 12 : PersistentLayerDesc::new_img(
1217 12 : TenantShardId::from([0; 18]),
1218 12 : TimelineId::from_array([0; 16]),
1219 12 : value.key_range,
1220 12 : value.lsn,
1221 : 233,
1222 : )
1223 12 : }
1224 : }
1225 :
1226 : impl From<LayerName> for PersistentLayerDesc {
1227 23 : fn from(value: LayerName) -> Self {
1228 23 : match value {
1229 11 : LayerName::Delta(d) => Self::from(d),
1230 12 : LayerName::Image(i) => Self::from(i),
1231 : }
1232 23 : }
1233 : }
1234 : }
1235 :
1236 : /// Range wrapping newtype, which uses display to render Debug.
1237 : ///
1238 : /// Useful with `Key`, which has too verbose `{:?}` for printing multiple layers.
1239 : struct RangeDisplayDebug<'a, T: std::fmt::Display>(&'a Range<T>);
1240 :
1241 : impl<T: std::fmt::Display> std::fmt::Debug for RangeDisplayDebug<'_, T> {
1242 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1243 0 : write!(f, "{}..{}", self.0.start, self.0.end)
1244 0 : }
1245 : }
1246 :
1247 : #[cfg(test)]
1248 : mod tests2 {
1249 : use pageserver_api::key::DBDIR_KEY;
1250 : use tracing::info;
1251 :
1252 : use super::*;
1253 : use crate::tenant::storage_layer::IoConcurrency;
1254 :
1255 : /// TODO: currently this test relies on manual visual inspection of the --no-capture output.
1256 : /// Should look like so:
1257 : /// ```text
1258 : /// RUST_LOG=trace cargo nextest run --features testing --no-capture test_io_concurrency_noise
1259 : /// running 1 test
1260 : /// 2025-01-21T17:42:01.335679Z INFO get_vectored_concurrent_io test selected=SidecarTask
1261 : /// 2025-01-21T17:42:01.335680Z TRACE spawning sidecar task task_id=0
1262 : /// 2025-01-21T17:42:01.335937Z TRACE IoConcurrency_sidecar{task_id=0}: start
1263 : /// 2025-01-21T17:42:01.335972Z TRACE IoConcurrency_sidecar{task_id=0}: received new io future
1264 : /// 2025-01-21T17:42:01.335999Z INFO IoConcurrency_sidecar{task_id=0}: waiting for signal to complete IO
1265 : /// 2025-01-21T17:42:01.336229Z WARN dropping ValuesReconstructState while some IOs have not been completed num_active_ios=1 sidecar_task_id=Some(0) backtrace= 0: <pageserver::tenant::storage_layer::ValuesReconstructState as core::ops::drop::Drop>::drop
1266 : /// at ./src/tenant/storage_layer.rs:553:24
1267 : /// 1: core::ptr::drop_in_place<pageserver::tenant::storage_layer::ValuesReconstructState>
1268 : /// at /home/christian/.rustup/toolchains/1.84.0-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/ptr/mod.rs:521:1
1269 : /// 2: core::mem::drop
1270 : /// at /home/christian/.rustup/toolchains/1.84.0-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/mem/mod.rs:942:24
1271 : /// 3: pageserver::tenant::storage_layer::tests2::test_io_concurrency_noise::{{closure}}
1272 : /// at ./src/tenant/storage_layer.rs:1159:9
1273 : /// ...
1274 : /// 49: <unknown>
1275 : /// 2025-01-21T17:42:01.452293Z INFO IoConcurrency_sidecar{task_id=0}: completing IO
1276 : /// 2025-01-21T17:42:01.452357Z TRACE IoConcurrency_sidecar{task_id=0}: io future completed
1277 : /// 2025-01-21T17:42:01.452473Z TRACE IoConcurrency_sidecar{task_id=0}: end
1278 : /// test tenant::storage_layer::tests2::test_io_concurrency_noise ... ok
1279 : ///
1280 : /// ```
1281 : #[tokio::test]
1282 1 : async fn test_io_concurrency_noise() {
1283 1 : crate::tenant::harness::setup_logging();
1284 :
1285 1 : let io_concurrency = IoConcurrency::spawn_for_test();
1286 1 : match *io_concurrency {
1287 : IoConcurrency::Sequential => {
1288 : // This test asserts behavior in sidecar mode, doesn't make sense in sequential mode.
1289 0 : return;
1290 : }
1291 1 : IoConcurrency::SidecarTask { .. } => {}
1292 : }
1293 1 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
1294 :
1295 1 : let (io_fut_is_waiting_tx, io_fut_is_waiting) = tokio::sync::oneshot::channel();
1296 1 : let (do_complete_io, should_complete_io) = tokio::sync::oneshot::channel();
1297 1 : let (io_fut_exiting_tx, io_fut_exiting) = tokio::sync::oneshot::channel();
1298 :
1299 1 : let io = reconstruct_state.update_key(&DBDIR_KEY, Lsn(8), true);
1300 1 : reconstruct_state
1301 1 : .spawn_io(async move {
1302 1 : info!("waiting for signal to complete IO");
1303 1 : io_fut_is_waiting_tx.send(()).unwrap();
1304 1 : should_complete_io.await.unwrap();
1305 1 : info!("completing IO");
1306 1 : io.complete(Ok(OnDiskValue::RawImage(Bytes::new())));
1307 1 : io_fut_exiting_tx.send(()).unwrap();
1308 1 : })
1309 1 : .await;
1310 :
1311 1 : io_fut_is_waiting.await.unwrap();
1312 :
1313 : // this is what makes the noise
1314 1 : drop(reconstruct_state);
1315 :
1316 1 : do_complete_io.send(()).unwrap();
1317 :
1318 1 : io_fut_exiting.await.unwrap();
1319 1 : }
1320 : }
|