Line data Source code
1 : //! Common traits and structs for layers
2 :
3 : pub mod batch_split_writer;
4 : pub mod delta_layer;
5 : pub mod filter_iterator;
6 : pub mod image_layer;
7 : pub mod inmemory_layer;
8 : pub(crate) mod layer;
9 : mod layer_desc;
10 : mod layer_name;
11 : pub mod merge_iterator;
12 :
13 : use crate::config::PageServerConf;
14 : use crate::context::{AccessStatsBehavior, RequestContext};
15 : use bytes::Bytes;
16 : use futures::stream::FuturesUnordered;
17 : use futures::StreamExt;
18 : use pageserver_api::key::Key;
19 : use pageserver_api::keyspace::{KeySpace, KeySpaceRandomAccum};
20 : use pageserver_api::record::NeonWalRecord;
21 : use pageserver_api::value::Value;
22 : use std::cmp::Ordering;
23 : use std::collections::hash_map::Entry;
24 : use std::collections::{BinaryHeap, HashMap};
25 : use std::future::Future;
26 : use std::ops::Range;
27 : use std::pin::Pin;
28 : use std::sync::atomic::AtomicUsize;
29 : use std::sync::Arc;
30 : use std::time::{Duration, SystemTime, UNIX_EPOCH};
31 : use tracing::{trace, Instrument};
32 : use utils::sync::gate::GateGuard;
33 :
34 : use utils::lsn::Lsn;
35 :
36 : pub use batch_split_writer::{BatchLayerWriter, SplitDeltaLayerWriter, SplitImageLayerWriter};
37 : pub use delta_layer::{DeltaLayer, DeltaLayerWriter, ValueRef};
38 : pub use image_layer::{ImageLayer, ImageLayerWriter};
39 : pub use inmemory_layer::InMemoryLayer;
40 : pub use layer_desc::{PersistentLayerDesc, PersistentLayerKey};
41 : pub use layer_name::{DeltaLayerName, ImageLayerName, LayerName};
42 :
43 : pub(crate) use layer::{EvictionError, Layer, ResidentLayer};
44 :
45 : use self::inmemory_layer::InMemoryLayerFileId;
46 :
47 : use super::timeline::GetVectoredError;
48 : use super::PageReconstructError;
49 :
50 0 : pub fn range_overlaps<T>(a: &Range<T>, b: &Range<T>) -> bool
51 0 : where
52 0 : T: PartialOrd<T>,
53 0 : {
54 0 : if a.start < b.start {
55 0 : a.end > b.start
56 : } else {
57 0 : b.end > a.start
58 : }
59 0 : }
60 :
61 : /// Struct used to communicate across calls to 'get_value_reconstruct_data'.
62 : ///
63 : /// Before first call, you can fill in 'page_img' if you have an older cached
64 : /// version of the page available. That can save work in
65 : /// 'get_value_reconstruct_data', as it can stop searching for page versions
66 : /// when all the WAL records going back to the cached image have been collected.
67 : ///
68 : /// When get_value_reconstruct_data returns Complete, 'img' is set to an image
69 : /// of the page, or the oldest WAL record in 'records' is a will_init-type
70 : /// record that initializes the page without requiring a previous image.
71 : ///
72 : /// If 'get_page_reconstruct_data' returns Continue, some 'records' may have
73 : /// been collected, but there are more records outside the current layer. Pass
74 : /// the same ValueReconstructState struct in the next 'get_value_reconstruct_data'
75 : /// call, to collect more records.
76 : ///
77 : #[derive(Debug, Default)]
78 : pub(crate) struct ValueReconstructState {
79 : pub(crate) records: Vec<(Lsn, NeonWalRecord)>,
80 : pub(crate) img: Option<(Lsn, Bytes)>,
81 : }
82 :
83 : impl ValueReconstructState {
84 : /// Returns the number of page deltas applied to the page image.
85 1335896 : pub fn num_deltas(&self) -> usize {
86 1335896 : match self.img {
87 1335808 : Some(_) => self.records.len(),
88 88 : None => self.records.len() - 1, // omit will_init record
89 : }
90 1335896 : }
91 : }
92 :
93 : #[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
94 : pub(crate) enum ValueReconstructSituation {
95 : Complete,
96 : #[default]
97 : Continue,
98 : }
99 :
100 : /// On disk representation of a value loaded in a buffer
101 : #[derive(Debug)]
102 : pub(crate) enum OnDiskValue {
103 : /// Unencoded [`Value::Image`]
104 : RawImage(Bytes),
105 : /// Encoded [`Value`]. Can deserialize into an image or a WAL record
106 : WalRecordOrImage(Bytes),
107 : }
108 :
109 : /// Reconstruct data accumulated for a single key during a vectored get
110 : #[derive(Debug, Default)]
111 : pub(crate) struct VectoredValueReconstructState {
112 : pub(crate) on_disk_values: Vec<(Lsn, OnDiskValueIoWaiter)>,
113 :
114 : pub(crate) situation: ValueReconstructSituation,
115 : }
116 :
117 : #[derive(Debug)]
118 : pub(crate) struct OnDiskValueIoWaiter {
119 : rx: tokio::sync::oneshot::Receiver<OnDiskValueIoResult>,
120 : }
121 :
122 : #[derive(Debug)]
123 : #[must_use]
124 : pub(crate) enum OnDiskValueIo {
125 : /// Traversal identified this IO as required to complete the vectored get.
126 : Required {
127 : num_active_ios: Arc<AtomicUsize>,
128 : tx: tokio::sync::oneshot::Sender<OnDiskValueIoResult>,
129 : },
130 : /// Sparse keyspace reads always read all the values for a given key,
131 : /// even though only the first value is needed.
132 : ///
133 : /// This variant represents the unnecessary IOs for those values at lower LSNs
134 : /// that aren't needed, but are currently still being done.
135 : ///
136 : /// The execution of unnecessary IOs was a pre-existing behavior before concurrent IO.
137 : /// We added this explicit representation here so that we can drop
138 : /// unnecessary IO results immediately, instead of buffering them in
139 : /// `oneshot` channels inside [`VectoredValueReconstructState`] until
140 : /// [`VectoredValueReconstructState::collect_pending_ios`] gets called.
141 : Unnecessary,
142 : }
143 :
144 : type OnDiskValueIoResult = Result<OnDiskValue, std::io::Error>;
145 :
146 : impl OnDiskValueIo {
147 1482389 : pub(crate) fn complete(self, res: OnDiskValueIoResult) {
148 1482389 : match self {
149 1337350 : OnDiskValueIo::Required { num_active_ios, tx } => {
150 1337350 : num_active_ios.fetch_sub(1, std::sync::atomic::Ordering::Release);
151 1337350 : let _ = tx.send(res);
152 1337350 : }
153 145039 : OnDiskValueIo::Unnecessary => {
154 145039 : // Nobody cared, see variant doc comment.
155 145039 : }
156 : }
157 1482389 : }
158 : }
159 :
160 : #[derive(Debug, thiserror::Error)]
161 : pub(crate) enum WaitCompletionError {
162 : #[error("OnDiskValueIo was dropped without completing, likely the sidecar task panicked")]
163 : IoDropped,
164 : }
165 :
166 : impl OnDiskValueIoWaiter {
167 1337348 : pub(crate) async fn wait_completion(self) -> Result<OnDiskValueIoResult, WaitCompletionError> {
168 1337348 : // NB: for Unnecessary IOs, this method never gets called because we don't add them to `on_disk_values`.
169 1337348 : self.rx.await.map_err(|_| WaitCompletionError::IoDropped)
170 1337348 : }
171 : }
172 :
173 : impl VectoredValueReconstructState {
174 : /// # Cancel-Safety
175 : ///
176 : /// Technically fine to stop polling this future, but, the IOs will still
177 : /// be executed to completion by the sidecar task and hold on to / consume resources.
178 : /// Better not do it to make reasonsing about the system easier.
179 1336036 : pub(crate) async fn collect_pending_ios(
180 1336036 : self,
181 1336036 : ) -> Result<ValueReconstructState, PageReconstructError> {
182 : use utils::bin_ser::BeSer;
183 :
184 1336036 : let mut res = Ok(ValueReconstructState::default());
185 :
186 : // We should try hard not to bail early, so that by the time we return from this
187 : // function, all IO for this value is done. It's not required -- we could totally
188 : // stop polling the IO futures in the sidecar task, they need to support that,
189 : // but just stopping to poll doesn't reduce the IO load on the disk. It's easier
190 : // to reason about the system if we just wait for all IO to complete, even if
191 : // we're no longer interested in the result.
192 : //
193 : // Revisit this when IO futures are replaced with a more sophisticated IO system
194 : // and an IO scheduler, where we know which IOs were submitted and which ones
195 : // just queued. Cf the comment on IoConcurrency::spawn_io.
196 2673384 : for (lsn, waiter) in self.on_disk_values {
197 1337348 : let value_recv_res = waiter
198 1337348 : .wait_completion()
199 1337348 : // we rely on the caller to poll us to completion, so this is not a bail point
200 1337348 : .await;
201 : // Force not bailing early by wrapping the code into a closure.
202 : #[allow(clippy::redundant_closure_call)]
203 1337348 : let _: () = (|| {
204 1337348 : match (&mut res, value_recv_res) {
205 0 : (Err(_), _) => {
206 0 : // We've already failed, no need to process more.
207 0 : }
208 0 : (Ok(_), Err(wait_err)) => {
209 0 : // This shouldn't happen - likely the sidecar task panicked.
210 0 : res = Err(PageReconstructError::Other(wait_err.into()));
211 0 : }
212 0 : (Ok(_), Ok(Err(err))) => {
213 0 : let err: std::io::Error = err;
214 0 : // TODO: returning IO error here will fail a compute query.
215 0 : // Probably not what we want, we're not doing `maybe_fatal_err`
216 0 : // in the IO futures.
217 0 : // But it's been like that for a long time, not changing it
218 0 : // as part of concurrent IO.
219 0 : // => https://github.com/neondatabase/neon/issues/10454
220 0 : res = Err(PageReconstructError::Other(err.into()));
221 0 : }
222 38345 : (Ok(ok), Ok(Ok(OnDiskValue::RawImage(img)))) => {
223 38345 : assert!(ok.img.is_none());
224 38345 : ok.img = Some((lsn, img));
225 : }
226 1299003 : (Ok(ok), Ok(Ok(OnDiskValue::WalRecordOrImage(buf)))) => {
227 1299003 : match Value::des(&buf) {
228 1400 : Ok(Value::WalRecord(rec)) => {
229 1400 : ok.records.push((lsn, rec));
230 1400 : }
231 1297603 : Ok(Value::Image(img)) => {
232 1297603 : assert!(ok.img.is_none());
233 1297603 : ok.img = Some((lsn, img));
234 : }
235 0 : Err(err) => {
236 0 : res = Err(PageReconstructError::Other(err.into()));
237 0 : }
238 : }
239 : }
240 : }
241 1337348 : })();
242 1337348 : }
243 :
244 1336036 : res
245 1336036 : }
246 : }
247 :
248 : /// Bag of data accumulated during a vectored get..
249 : pub(crate) struct ValuesReconstructState {
250 : /// The keys will be removed after `get_vectored` completes. The caller outside `Timeline`
251 : /// should not expect to get anything from this hashmap.
252 : pub(crate) keys: HashMap<Key, VectoredValueReconstructState>,
253 : /// The keys which are already retrieved
254 : keys_done: KeySpaceRandomAccum,
255 :
256 : /// The keys covered by the image layers
257 : keys_with_image_coverage: Option<Range<Key>>,
258 :
259 : // Statistics that are still accessible as a caller of `get_vectored_impl`.
260 : layers_visited: u32,
261 : delta_layers_visited: u32,
262 :
263 : pub(crate) io_concurrency: IoConcurrency,
264 : num_active_ios: Arc<AtomicUsize>,
265 : }
266 :
267 : /// The level of IO concurrency to be used on the read path
268 : ///
269 : /// The desired end state is that we always do parallel IO.
270 : /// This struct and the dispatching in the impl will be removed once
271 : /// we've built enough confidence.
272 : pub(crate) enum IoConcurrency {
273 : Sequential,
274 : SidecarTask {
275 : task_id: usize,
276 : ios_tx: tokio::sync::mpsc::UnboundedSender<IoFuture>,
277 : },
278 : }
279 :
280 : type IoFuture = Pin<Box<dyn Send + Future<Output = ()>>>;
281 :
282 : pub(crate) enum SelectedIoConcurrency {
283 : Sequential,
284 : SidecarTask(GateGuard),
285 : }
286 :
287 : impl std::fmt::Debug for IoConcurrency {
288 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
289 0 : match self {
290 0 : IoConcurrency::Sequential => write!(f, "Sequential"),
291 0 : IoConcurrency::SidecarTask { .. } => write!(f, "SidecarTask"),
292 : }
293 0 : }
294 : }
295 :
296 : impl std::fmt::Debug for SelectedIoConcurrency {
297 64 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
298 64 : match self {
299 32 : SelectedIoConcurrency::Sequential => write!(f, "Sequential"),
300 32 : SelectedIoConcurrency::SidecarTask(_) => write!(f, "SidecarTask"),
301 : }
302 64 : }
303 : }
304 :
305 : impl IoConcurrency {
306 : /// Force sequential IO. This is a temporary workaround until we have
307 : /// moved plumbing-through-the-call-stack
308 : /// of IoConcurrency into `RequestContextq.
309 : ///
310 : /// DO NOT USE for new code.
311 : ///
312 : /// Tracking issue: <https://github.com/neondatabase/neon/issues/10460>.
313 1215176 : pub(crate) fn sequential() -> Self {
314 1215176 : Self::spawn(SelectedIoConcurrency::Sequential)
315 1215176 : }
316 :
317 1012 : pub(crate) fn spawn_from_conf(
318 1012 : conf: &'static PageServerConf,
319 1012 : gate_guard: GateGuard,
320 1012 : ) -> IoConcurrency {
321 : use pageserver_api::config::GetVectoredConcurrentIo;
322 1012 : let selected = match conf.get_vectored_concurrent_io {
323 1012 : GetVectoredConcurrentIo::Sequential => SelectedIoConcurrency::Sequential,
324 0 : GetVectoredConcurrentIo::SidecarTask => SelectedIoConcurrency::SidecarTask(gate_guard),
325 : };
326 1012 : Self::spawn(selected)
327 1012 : }
328 :
329 1216252 : pub(crate) fn spawn(io_concurrency: SelectedIoConcurrency) -> Self {
330 1216252 : match io_concurrency {
331 1216220 : SelectedIoConcurrency::Sequential => IoConcurrency::Sequential,
332 32 : SelectedIoConcurrency::SidecarTask(gate_guard) => {
333 32 : let (ios_tx, ios_rx) = tokio::sync::mpsc::unbounded_channel();
334 : static TASK_ID: AtomicUsize = AtomicUsize::new(0);
335 32 : let task_id = TASK_ID.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
336 : // TODO: enrich the span with more context (tenant,shard,timeline) + (basebackup|pagestream|...)
337 32 : let span =
338 32 : tracing::info_span!(parent: None, "IoConcurrency_sidecar", task_id = task_id);
339 32 : trace!(task_id, "spawning sidecar task");
340 32 : tokio::spawn(async move {
341 32 : trace!("start");
342 32 : scopeguard::defer!{ trace!("end") };
343 : type IosRx = tokio::sync::mpsc::UnboundedReceiver<IoFuture>;
344 : enum State {
345 : Waiting {
346 : // invariant: is_empty(), but we recycle the allocation
347 : empty_futures: FuturesUnordered<IoFuture>,
348 : ios_rx: IosRx,
349 : },
350 : Executing {
351 : futures: FuturesUnordered<IoFuture>,
352 : ios_rx: IosRx,
353 : },
354 : ShuttingDown {
355 : futures: FuturesUnordered<IoFuture>,
356 : },
357 : }
358 32 : let mut state = State::Waiting {
359 32 : empty_futures: FuturesUnordered::new(),
360 32 : ios_rx,
361 32 : };
362 : loop {
363 39142 : match state {
364 : State::Waiting {
365 18543 : empty_futures,
366 18543 : mut ios_rx,
367 18543 : } => {
368 18543 : assert!(empty_futures.is_empty());
369 18543 : tokio::select! {
370 18543 : fut = ios_rx.recv() => {
371 18511 : if let Some(fut) = fut {
372 18511 : trace!("received new io future");
373 18511 : empty_futures.push(fut);
374 18511 : state = State::Executing { futures: empty_futures, ios_rx };
375 : } else {
376 0 : state = State::ShuttingDown { futures: empty_futures }
377 : }
378 : }
379 : }
380 : }
381 : State::Executing {
382 20599 : mut futures,
383 20599 : mut ios_rx,
384 20599 : } => {
385 20599 : tokio::select! {
386 20599 : res = futures.next() => {
387 19555 : trace!("io future completed");
388 19555 : assert!(res.is_some());
389 19555 : if futures.is_empty() {
390 18511 : state = State::Waiting { empty_futures: futures, ios_rx};
391 18511 : } else {
392 1044 : state = State::Executing { futures, ios_rx };
393 1044 : }
394 : }
395 20599 : fut = ios_rx.recv() => {
396 1044 : if let Some(fut) = fut {
397 1044 : trace!("received new io future");
398 1044 : futures.push(fut);
399 1044 : state = State::Executing { futures, ios_rx};
400 0 : } else {
401 0 : state = State::ShuttingDown { futures };
402 0 : }
403 : }
404 : }
405 : }
406 : State::ShuttingDown {
407 0 : mut futures,
408 0 : } => {
409 0 : trace!("shutting down");
410 0 : while let Some(()) = futures.next().await {
411 0 : trace!("io future completed (shutdown)");
412 : // drain
413 : }
414 0 : trace!("shutdown complete");
415 0 : break;
416 0 : }
417 0 : }
418 0 : }
419 0 : drop(gate_guard); // drop it right before we exit
420 32 : }.instrument(span));
421 32 : IoConcurrency::SidecarTask { task_id, ios_tx }
422 : }
423 : }
424 1216252 : }
425 :
426 76338 : pub(crate) fn clone(&self) -> Self {
427 76338 : match self {
428 39460 : IoConcurrency::Sequential => IoConcurrency::Sequential,
429 36878 : IoConcurrency::SidecarTask { task_id, ios_tx } => IoConcurrency::SidecarTask {
430 36878 : task_id: *task_id,
431 36878 : ios_tx: ios_tx.clone(),
432 36878 : },
433 : }
434 76338 : }
435 :
436 : /// Submit an IO to be executed in the background. DEADLOCK RISK, read the full doc string.
437 : ///
438 : /// The IO is represented as an opaque future.
439 : /// IO completion must be handled inside the future, e.g., through a oneshot channel.
440 : ///
441 : /// The API seems simple but there are multiple **pitfalls** involving
442 : /// DEADLOCK RISK.
443 : ///
444 : /// First, there are no guarantees about the exexecution of the IO.
445 : /// It may be `await`ed in-place before this function returns.
446 : /// It may be polled partially by this task and handed off to another task to be finished.
447 : /// It may be polled and then dropped before returning ready.
448 : ///
449 : /// This means that submitted IOs must not be interedependent.
450 : /// Interdependence may be through shared limited resources, e.g.,
451 : /// - VirtualFile file descriptor cache slot acquisition
452 : /// - tokio-epoll-uring slot
453 : ///
454 : /// # Why current usage is safe from deadlocks
455 : ///
456 : /// Textbook condition for a deadlock is that _all_ of the following be given
457 : /// - Mutual exclusion
458 : /// - Hold and wait
459 : /// - No preemption
460 : /// - Circular wait
461 : ///
462 : /// The current usage is safe because:
463 : /// - Mutual exclusion: IO futures definitely use mutexes, no way around that for now
464 : /// - Hold and wait: IO futures currently hold two kinds of locks/resources while waiting
465 : /// for acquisition of other resources:
466 : /// - VirtualFile file descriptor cache slot tokio mutex
467 : /// - tokio-epoll-uring slot (uses tokio notify => wait queue, much like mutex)
468 : /// - No preemption: there's no taking-away of acquired locks/resources => given
469 : /// - Circular wait: this is the part of the condition that isn't met: all IO futures
470 : /// first acquire VirtualFile mutex, then tokio-epoll-uring slot.
471 : /// There is no IO future that acquires slot before VirtualFile.
472 : /// Hence there can be no circular waiting.
473 : /// Hence there cannot be a deadlock.
474 : ///
475 : /// This is a very fragile situation and must be revisited whenver any code called from
476 : /// inside the IO futures is changed.
477 : ///
478 : /// We will move away from opaque IO futures towards well-defined IOs at some point in
479 : /// the future when we have shipped this first version of concurrent IO to production
480 : /// and are ready to retire the Sequential mode which runs the futures in place.
481 : /// Right now, while brittle, the opaque IO approach allows us to ship the feature
482 : /// with minimal changes to the code and minimal changes to existing behavior in Sequential mode.
483 : ///
484 : /// Also read the comment in `collect_pending_ios`.
485 1525513 : pub(crate) async fn spawn_io<F>(&mut self, fut: F)
486 1525513 : where
487 1525513 : F: std::future::Future<Output = ()> + Send + 'static,
488 1525513 : {
489 1525513 : match self {
490 1505958 : IoConcurrency::Sequential => fut.await,
491 19555 : IoConcurrency::SidecarTask { ios_tx, .. } => {
492 19555 : let fut = Box::pin(fut);
493 19555 : // NB: experiments showed that doing an opportunistic poll of `fut` here was bad for throughput
494 19555 : // while insignificant for latency.
495 19555 : // It would make sense to revisit the tokio-epoll-uring API in the future such that we can try
496 19555 : // a submission here, but never poll the future. That way, io_uring can make proccess while
497 19555 : // the future sits in the ios_tx queue.
498 19555 : match ios_tx.send(fut) {
499 19555 : Ok(()) => {}
500 : Err(_) => {
501 0 : unreachable!("the io task must have exited, likely it panicked")
502 : }
503 : }
504 : }
505 : }
506 1525513 : }
507 :
508 : #[cfg(test)]
509 64 : pub(crate) fn spawn_for_test() -> impl std::ops::DerefMut<Target = Self> {
510 : use std::ops::{Deref, DerefMut};
511 : use tracing::info;
512 : use utils::sync::gate::Gate;
513 :
514 : // Spawn needs a Gate, give it one.
515 : struct Wrapper {
516 : inner: IoConcurrency,
517 : #[allow(dead_code)]
518 : gate: Box<Gate>,
519 : }
520 : impl Deref for Wrapper {
521 : type Target = IoConcurrency;
522 :
523 36974 : fn deref(&self) -> &Self::Target {
524 36974 : &self.inner
525 36974 : }
526 : }
527 : impl DerefMut for Wrapper {
528 0 : fn deref_mut(&mut self) -> &mut Self::Target {
529 0 : &mut self.inner
530 0 : }
531 : }
532 64 : let gate = Box::new(Gate::default());
533 :
534 : // The default behavior when running Rust unit tests without any further
535 : // flags is to use the new behavior.
536 : // The CI uses the following environment variable to unit test both old
537 : // and new behavior.
538 : // NB: the Python regression & perf tests take the `else` branch
539 : // below and have their own defaults management.
540 64 : let selected = {
541 : // The pageserver_api::config type is unsuitable because it's internally tagged.
542 64 : #[derive(serde::Deserialize)]
543 : #[serde(rename_all = "kebab-case")]
544 : enum TestOverride {
545 : Sequential,
546 : SidecarTask,
547 : }
548 : use once_cell::sync::Lazy;
549 64 : static TEST_OVERRIDE: Lazy<TestOverride> = Lazy::new(|| {
550 64 : utils::env::var_serde_json_string(
551 64 : "NEON_PAGESERVER_UNIT_TEST_GET_VECTORED_CONCURRENT_IO",
552 64 : )
553 64 : .unwrap_or(TestOverride::SidecarTask)
554 64 : });
555 :
556 64 : match *TEST_OVERRIDE {
557 32 : TestOverride::Sequential => SelectedIoConcurrency::Sequential,
558 : TestOverride::SidecarTask => {
559 32 : SelectedIoConcurrency::SidecarTask(gate.enter().expect("just created it"))
560 : }
561 : }
562 : };
563 :
564 64 : info!(?selected, "get_vectored_concurrent_io test");
565 :
566 64 : Wrapper {
567 64 : inner: Self::spawn(selected),
568 64 : gate,
569 64 : }
570 64 : }
571 : }
572 :
573 : /// Make noise in case the [`ValuesReconstructState`] gets dropped while
574 : /// there are still IOs in flight.
575 : /// Refer to `collect_pending_ios` for why we prefer not to do that.
576 : //
577 : /// We log from here instead of from the sidecar task because the [`ValuesReconstructState`]
578 : /// gets dropped in a tracing span with more context.
579 : /// We repeat the sidecar tasks's `task_id` so we can correlate what we emit here with
580 : /// the logs / panic handler logs from the sidecar task, which also logs the `task_id`.
581 : impl Drop for ValuesReconstructState {
582 1255286 : fn drop(&mut self) {
583 1255286 : let num_active_ios = self
584 1255286 : .num_active_ios
585 1255286 : .load(std::sync::atomic::Ordering::Acquire);
586 1255286 : if num_active_ios == 0 {
587 1255284 : return;
588 2 : }
589 2 : let sidecar_task_id = match &self.io_concurrency {
590 0 : IoConcurrency::Sequential => None,
591 2 : IoConcurrency::SidecarTask { task_id, .. } => Some(*task_id),
592 : };
593 2 : tracing::warn!(
594 : num_active_ios,
595 : ?sidecar_task_id,
596 0 : backtrace=%std::backtrace::Backtrace::force_capture(),
597 0 : "dropping ValuesReconstructState while some IOs have not been completed",
598 : );
599 1255286 : }
600 : }
601 :
602 : impl ValuesReconstructState {
603 1255286 : pub(crate) fn new(io_concurrency: IoConcurrency) -> Self {
604 1255286 : Self {
605 1255286 : keys: HashMap::new(),
606 1255286 : keys_done: KeySpaceRandomAccum::new(),
607 1255286 : keys_with_image_coverage: None,
608 1255286 : layers_visited: 0,
609 1255286 : delta_layers_visited: 0,
610 1255286 : io_concurrency,
611 1255286 : num_active_ios: Arc::new(AtomicUsize::new(0)),
612 1255286 : }
613 1255286 : }
614 :
615 : /// Absolutely read [`IoConcurrency::spawn_io`] to learn about assumptions & pitfalls.
616 1525513 : pub(crate) async fn spawn_io<F>(&mut self, fut: F)
617 1525513 : where
618 1525513 : F: std::future::Future<Output = ()> + Send + 'static,
619 1525513 : {
620 1525513 : self.io_concurrency.spawn_io(fut).await;
621 1525513 : }
622 :
623 1692844 : pub(crate) fn on_layer_visited(&mut self, layer: &ReadableLayer) {
624 1692844 : self.layers_visited += 1;
625 1692844 : if let ReadableLayer::PersistentLayer(layer) = layer {
626 479508 : if layer.layer_desc().is_delta() {
627 434416 : self.delta_layers_visited += 1;
628 434416 : }
629 1213336 : }
630 1692844 : }
631 :
632 456 : pub(crate) fn get_delta_layers_visited(&self) -> u32 {
633 456 : self.delta_layers_visited
634 456 : }
635 :
636 1255228 : pub(crate) fn get_layers_visited(&self) -> u32 {
637 1255228 : self.layers_visited
638 1255228 : }
639 :
640 : /// On hitting image layer, we can mark all keys in this range as done, because
641 : /// if the image layer does not contain a key, it is deleted/never added.
642 45116 : pub(crate) fn on_image_layer_visited(&mut self, key_range: &Range<Key>) {
643 45116 : let prev_val = self.keys_with_image_coverage.replace(key_range.clone());
644 45116 : assert_eq!(
645 : prev_val, None,
646 0 : "should consume the keyspace before the next iteration"
647 : );
648 45116 : }
649 :
650 : /// Update the state collected for a given key.
651 : /// Returns true if this was the last value needed for the key and false otherwise.
652 : ///
653 : /// If the key is done after the update, mark it as such.
654 : ///
655 : /// If the key is in the sparse keyspace (i.e., aux files), we do not track them in
656 : /// `key_done`.
657 : // TODO: rename this method & update description.
658 1482389 : pub(crate) fn update_key(&mut self, key: &Key, lsn: Lsn, completes: bool) -> OnDiskValueIo {
659 1482389 : let state = self.keys.entry(*key).or_default();
660 1482389 :
661 1482389 : let is_sparse_key = key.is_sparse();
662 :
663 1482389 : let required_io = match state.situation {
664 : ValueReconstructSituation::Complete => {
665 145039 : if is_sparse_key {
666 : // Sparse keyspace might be visited multiple times because
667 : // we don't track unmapped keyspaces.
668 145039 : return OnDiskValueIo::Unnecessary;
669 : } else {
670 0 : unreachable!()
671 : }
672 : }
673 : ValueReconstructSituation::Continue => {
674 1337350 : self.num_active_ios
675 1337350 : .fetch_add(1, std::sync::atomic::Ordering::Release);
676 1337350 : let (tx, rx) = tokio::sync::oneshot::channel();
677 1337350 : state.on_disk_values.push((lsn, OnDiskValueIoWaiter { rx }));
678 1337350 : OnDiskValueIo::Required {
679 1337350 : tx,
680 1337350 : num_active_ios: Arc::clone(&self.num_active_ios),
681 1337350 : }
682 1337350 : }
683 1337350 : };
684 1337350 :
685 1337350 : if completes && state.situation == ValueReconstructSituation::Continue {
686 1336038 : state.situation = ValueReconstructSituation::Complete;
687 1336038 : if !is_sparse_key {
688 1208522 : self.keys_done.add_key(*key);
689 1208522 : }
690 1312 : }
691 :
692 1337350 : required_io
693 1482389 : }
694 :
695 : /// Returns the key space describing the keys that have
696 : /// been marked as completed since the last call to this function.
697 : /// Returns individual keys done, and the image layer coverage.
698 3399002 : pub(crate) fn consume_done_keys(&mut self) -> (KeySpace, Option<Range<Key>>) {
699 3399002 : (
700 3399002 : self.keys_done.consume_keyspace(),
701 3399002 : self.keys_with_image_coverage.take(),
702 3399002 : )
703 3399002 : }
704 : }
705 :
706 : /// A key that uniquely identifies a layer in a timeline
707 : #[derive(Debug, PartialEq, Eq, Clone, Hash)]
708 : pub(crate) enum LayerId {
709 : PersitentLayerId(PersistentLayerKey),
710 : InMemoryLayerId(InMemoryLayerFileId),
711 : }
712 :
713 : /// Uniquely identify a layer visit by the layer
714 : /// and LSN floor (or start LSN) of the reads.
715 : /// The layer itself is not enough since we may
716 : /// have different LSN lower bounds for delta layer reads.
717 : #[derive(Debug, PartialEq, Eq, Clone, Hash)]
718 : struct LayerToVisitId {
719 : layer_id: LayerId,
720 : lsn_floor: Lsn,
721 : }
722 :
723 : /// Layer wrapper for the read path. Note that it is valid
724 : /// to use these layers even after external operations have
725 : /// been performed on them (compaction, freeze, etc.).
726 : #[derive(Debug)]
727 : pub(crate) enum ReadableLayer {
728 : PersistentLayer(Layer),
729 : InMemoryLayer(Arc<InMemoryLayer>),
730 : }
731 :
732 : /// A partial description of a read to be done.
733 : #[derive(Debug, Clone)]
734 : struct LayerVisit {
735 : /// An id used to resolve the readable layer within the fringe
736 : layer_to_visit_id: LayerToVisitId,
737 : /// Lsn range for the read, used for selecting the next read
738 : lsn_range: Range<Lsn>,
739 : }
740 :
741 : /// Data structure which maintains a fringe of layers for the
742 : /// read path. The fringe is the set of layers which intersects
743 : /// the current keyspace that the search is descending on.
744 : /// Each layer tracks the keyspace that intersects it.
745 : ///
746 : /// The fringe must appear sorted by Lsn. Hence, it uses
747 : /// a two layer indexing scheme.
748 : #[derive(Debug)]
749 : pub(crate) struct LayerFringe {
750 : planned_visits_by_lsn: BinaryHeap<LayerVisit>,
751 : visit_reads: HashMap<LayerToVisitId, LayerVisitReads>,
752 : }
753 :
754 : #[derive(Debug)]
755 : struct LayerVisitReads {
756 : layer: ReadableLayer,
757 : target_keyspace: KeySpaceRandomAccum,
758 : }
759 :
760 : impl LayerFringe {
761 1706158 : pub(crate) fn new() -> Self {
762 1706158 : LayerFringe {
763 1706158 : planned_visits_by_lsn: BinaryHeap::new(),
764 1706158 : visit_reads: HashMap::new(),
765 1706158 : }
766 1706158 : }
767 :
768 3399002 : pub(crate) fn next_layer(&mut self) -> Option<(ReadableLayer, KeySpace, Range<Lsn>)> {
769 3399002 : let read_desc = self.planned_visits_by_lsn.pop()?;
770 :
771 1692844 : let removed = self.visit_reads.remove_entry(&read_desc.layer_to_visit_id);
772 1692844 :
773 1692844 : match removed {
774 : Some((
775 : _,
776 : LayerVisitReads {
777 1692844 : layer,
778 1692844 : mut target_keyspace,
779 1692844 : },
780 1692844 : )) => Some((
781 1692844 : layer,
782 1692844 : target_keyspace.consume_keyspace(),
783 1692844 : read_desc.lsn_range,
784 1692844 : )),
785 0 : None => unreachable!("fringe internals are always consistent"),
786 : }
787 3399002 : }
788 :
789 1692872 : pub(crate) fn update(
790 1692872 : &mut self,
791 1692872 : layer: ReadableLayer,
792 1692872 : keyspace: KeySpace,
793 1692872 : lsn_range: Range<Lsn>,
794 1692872 : ) {
795 1692872 : let layer_to_visit_id = LayerToVisitId {
796 1692872 : layer_id: layer.id(),
797 1692872 : lsn_floor: lsn_range.start,
798 1692872 : };
799 1692872 :
800 1692872 : let entry = self.visit_reads.entry(layer_to_visit_id.clone());
801 1692872 : match entry {
802 28 : Entry::Occupied(mut entry) => {
803 28 : entry.get_mut().target_keyspace.add_keyspace(keyspace);
804 28 : }
805 1692844 : Entry::Vacant(entry) => {
806 1692844 : self.planned_visits_by_lsn.push(LayerVisit {
807 1692844 : lsn_range,
808 1692844 : layer_to_visit_id: layer_to_visit_id.clone(),
809 1692844 : });
810 1692844 : let mut accum = KeySpaceRandomAccum::new();
811 1692844 : accum.add_keyspace(keyspace);
812 1692844 : entry.insert(LayerVisitReads {
813 1692844 : layer,
814 1692844 : target_keyspace: accum,
815 1692844 : });
816 1692844 : }
817 : }
818 1692872 : }
819 : }
820 :
821 : impl Default for LayerFringe {
822 0 : fn default() -> Self {
823 0 : Self::new()
824 0 : }
825 : }
826 :
827 : impl Ord for LayerVisit {
828 60 : fn cmp(&self, other: &Self) -> Ordering {
829 60 : let ord = self.lsn_range.end.cmp(&other.lsn_range.end);
830 60 : if ord == std::cmp::Ordering::Equal {
831 44 : self.lsn_range.start.cmp(&other.lsn_range.start).reverse()
832 : } else {
833 16 : ord
834 : }
835 60 : }
836 : }
837 :
838 : impl PartialOrd for LayerVisit {
839 60 : fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
840 60 : Some(self.cmp(other))
841 60 : }
842 : }
843 :
844 : impl PartialEq for LayerVisit {
845 0 : fn eq(&self, other: &Self) -> bool {
846 0 : self.lsn_range == other.lsn_range
847 0 : }
848 : }
849 :
850 : impl Eq for LayerVisit {}
851 :
852 : impl ReadableLayer {
853 1692872 : pub(crate) fn id(&self) -> LayerId {
854 1692872 : match self {
855 479536 : Self::PersistentLayer(layer) => LayerId::PersitentLayerId(layer.layer_desc().key()),
856 1213336 : Self::InMemoryLayer(layer) => LayerId::InMemoryLayerId(layer.file_id()),
857 : }
858 1692872 : }
859 :
860 1692844 : pub(crate) async fn get_values_reconstruct_data(
861 1692844 : &self,
862 1692844 : keyspace: KeySpace,
863 1692844 : lsn_range: Range<Lsn>,
864 1692844 : reconstruct_state: &mut ValuesReconstructState,
865 1692844 : ctx: &RequestContext,
866 1692844 : ) -> Result<(), GetVectoredError> {
867 1692844 : match self {
868 479508 : ReadableLayer::PersistentLayer(layer) => {
869 479508 : layer
870 479508 : .get_values_reconstruct_data(keyspace, lsn_range, reconstruct_state, ctx)
871 479508 : .await
872 : }
873 1213336 : ReadableLayer::InMemoryLayer(layer) => {
874 1213336 : layer
875 1213336 : .get_values_reconstruct_data(keyspace, lsn_range.end, reconstruct_state, ctx)
876 1213336 : .await
877 : }
878 : }
879 1692844 : }
880 : }
881 :
882 : /// Layers contain a hint indicating whether they are likely to be used for reads.
883 : ///
884 : /// This is a hint rather than an authoritative value, so that we do not have to update it synchronously
885 : /// when changing the visibility of layers (for example when creating a branch that makes some previously
886 : /// covered layers visible). It should be used for cache management but not for correctness-critical checks.
887 : #[derive(Debug, Clone, PartialEq, Eq)]
888 : pub enum LayerVisibilityHint {
889 : /// A Visible layer might be read while serving a read, because there is not an image layer between it
890 : /// and a readable LSN (the tip of the branch or a child's branch point)
891 : Visible,
892 : /// A Covered layer probably won't be read right now, but _can_ be read in future if someone creates
893 : /// a branch or ephemeral endpoint at an LSN below the layer that covers this.
894 : Covered,
895 : }
896 :
897 : pub(crate) struct LayerAccessStats(std::sync::atomic::AtomicU64);
898 :
899 0 : #[derive(Clone, Copy, strum_macros::EnumString)]
900 : pub(crate) enum LayerAccessStatsReset {
901 : NoReset,
902 : AllStats,
903 : }
904 :
905 : impl Default for LayerAccessStats {
906 3836 : fn default() -> Self {
907 3836 : // Default value is to assume resident since creation time, and visible.
908 3836 : let (_mask, mut value) = Self::to_low_res_timestamp(Self::RTIME_SHIFT, SystemTime::now());
909 3836 : value |= 0x1 << Self::VISIBILITY_SHIFT;
910 3836 :
911 3836 : Self(std::sync::atomic::AtomicU64::new(value))
912 3836 : }
913 : }
914 :
915 : // Efficient store of two very-low-resolution timestamps and some bits. Used for storing last access time and
916 : // last residence change time.
917 : impl LayerAccessStats {
918 : // How many high bits to drop from a u32 timestamp?
919 : // - Only storing up to a u32 timestamp will work fine until 2038 (if this code is still in use
920 : // after that, this software has been very successful!)
921 : // - Dropping the top bit is implicitly safe because unix timestamps are meant to be
922 : // stored in an i32, so they never used it.
923 : // - Dropping the next two bits is safe because this code is only running on systems in
924 : // years >= 2024, and these bits have been 1 since 2021
925 : //
926 : // Therefore we may store only 28 bits for a timestamp with one second resolution. We do
927 : // this truncation to make space for some flags in the high bits of our u64.
928 : const TS_DROP_HIGH_BITS: u32 = u32::count_ones(Self::TS_ONES) + 1;
929 : const TS_MASK: u32 = 0x1f_ff_ff_ff;
930 : const TS_ONES: u32 = 0x60_00_00_00;
931 :
932 : const ATIME_SHIFT: u32 = 0;
933 : const RTIME_SHIFT: u32 = 32 - Self::TS_DROP_HIGH_BITS;
934 : const VISIBILITY_SHIFT: u32 = 64 - 2 * Self::TS_DROP_HIGH_BITS;
935 :
936 479748 : fn write_bits(&self, mask: u64, value: u64) -> u64 {
937 479748 : self.0
938 479748 : .fetch_update(
939 479748 : // TODO: decide what orderings are correct
940 479748 : std::sync::atomic::Ordering::Relaxed,
941 479748 : std::sync::atomic::Ordering::Relaxed,
942 479748 : |v| Some((v & !mask) | (value & mask)),
943 479748 : )
944 479748 : .expect("Inner function is infallible")
945 479748 : }
946 :
947 482864 : fn to_low_res_timestamp(shift: u32, time: SystemTime) -> (u64, u64) {
948 482864 : // Drop the low three bits of the timestamp, for an ~8s accuracy
949 482864 : let timestamp = time.duration_since(UNIX_EPOCH).unwrap().as_secs() & (Self::TS_MASK as u64);
950 482864 :
951 482864 : ((Self::TS_MASK as u64) << shift, timestamp << shift)
952 482864 : }
953 :
954 124 : fn read_low_res_timestamp(&self, shift: u32) -> Option<SystemTime> {
955 124 : let read = self.0.load(std::sync::atomic::Ordering::Relaxed);
956 124 :
957 124 : let ts_bits = (read & ((Self::TS_MASK as u64) << shift)) >> shift;
958 124 : if ts_bits == 0 {
959 48 : None
960 : } else {
961 76 : Some(UNIX_EPOCH + Duration::from_secs(ts_bits | (Self::TS_ONES as u64)))
962 : }
963 124 : }
964 :
965 : /// Record a change in layer residency.
966 : ///
967 : /// Recording the event must happen while holding the layer map lock to
968 : /// ensure that latest-activity-threshold-based layer eviction (eviction_task.rs)
969 : /// can do an "imitate access" to this layer, before it observes `now-latest_activity() > threshold`.
970 : ///
971 : /// If we instead recorded the residence event with a timestamp from before grabbing the layer map lock,
972 : /// the following race could happen:
973 : ///
974 : /// - Compact: Write out an L1 layer from several L0 layers. This records residence event LayerCreate with the current timestamp.
975 : /// - Eviction: imitate access logical size calculation. This accesses the L0 layers because the L1 layer is not yet in the layer map.
976 : /// - Compact: Grab layer map lock, add the new L1 to layer map and remove the L0s, release layer map lock.
977 : /// - Eviction: observes the new L1 layer whose only activity timestamp is the LayerCreate event.
978 52 : pub(crate) fn record_residence_event_at(&self, now: SystemTime) {
979 52 : let (mask, value) = Self::to_low_res_timestamp(Self::RTIME_SHIFT, now);
980 52 : self.write_bits(mask, value);
981 52 : }
982 :
983 48 : pub(crate) fn record_residence_event(&self) {
984 48 : self.record_residence_event_at(SystemTime::now())
985 48 : }
986 :
987 478976 : fn record_access_at(&self, now: SystemTime) -> bool {
988 478976 : let (mut mask, mut value) = Self::to_low_res_timestamp(Self::ATIME_SHIFT, now);
989 478976 :
990 478976 : // A layer which is accessed must be visible.
991 478976 : mask |= 0x1 << Self::VISIBILITY_SHIFT;
992 478976 : value |= 0x1 << Self::VISIBILITY_SHIFT;
993 478976 :
994 478976 : let old_bits = self.write_bits(mask, value);
995 4 : !matches!(
996 478976 : self.decode_visibility(old_bits),
997 : LayerVisibilityHint::Visible
998 : )
999 478976 : }
1000 :
1001 : /// Returns true if we modified the layer's visibility to set it to Visible implicitly
1002 : /// as a result of this access
1003 479532 : pub(crate) fn record_access(&self, ctx: &RequestContext) -> bool {
1004 479532 : if ctx.access_stats_behavior() == AccessStatsBehavior::Skip {
1005 568 : return false;
1006 478964 : }
1007 478964 :
1008 478964 : self.record_access_at(SystemTime::now())
1009 479532 : }
1010 :
1011 0 : fn as_api_model(
1012 0 : &self,
1013 0 : reset: LayerAccessStatsReset,
1014 0 : ) -> pageserver_api::models::LayerAccessStats {
1015 0 : let ret = pageserver_api::models::LayerAccessStats {
1016 0 : access_time: self
1017 0 : .read_low_res_timestamp(Self::ATIME_SHIFT)
1018 0 : .unwrap_or(UNIX_EPOCH),
1019 0 : residence_time: self
1020 0 : .read_low_res_timestamp(Self::RTIME_SHIFT)
1021 0 : .unwrap_or(UNIX_EPOCH),
1022 0 : visible: matches!(self.visibility(), LayerVisibilityHint::Visible),
1023 : };
1024 0 : match reset {
1025 0 : LayerAccessStatsReset::NoReset => {}
1026 0 : LayerAccessStatsReset::AllStats => {
1027 0 : self.write_bits((Self::TS_MASK as u64) << Self::ATIME_SHIFT, 0x0);
1028 0 : self.write_bits((Self::TS_MASK as u64) << Self::RTIME_SHIFT, 0x0);
1029 0 : }
1030 : }
1031 0 : ret
1032 0 : }
1033 :
1034 : /// Get the latest access timestamp, falling back to latest residence event. The latest residence event
1035 : /// will be this Layer's construction time, if its residence hasn't changed since then.
1036 32 : pub(crate) fn latest_activity(&self) -> SystemTime {
1037 32 : if let Some(t) = self.read_low_res_timestamp(Self::ATIME_SHIFT) {
1038 12 : t
1039 : } else {
1040 20 : self.read_low_res_timestamp(Self::RTIME_SHIFT)
1041 20 : .expect("Residence time is set on construction")
1042 : }
1043 32 : }
1044 :
1045 : /// Whether this layer has been accessed (excluding in [`AccessStatsBehavior::Skip`]).
1046 : ///
1047 : /// This indicates whether the layer has been used for some purpose that would motivate
1048 : /// us to keep it on disk, such as for serving a getpage request.
1049 36 : fn accessed(&self) -> bool {
1050 36 : // Consider it accessed if the most recent access is more recent than
1051 36 : // the most recent change in residence status.
1052 36 : match (
1053 36 : self.read_low_res_timestamp(Self::ATIME_SHIFT),
1054 36 : self.read_low_res_timestamp(Self::RTIME_SHIFT),
1055 : ) {
1056 28 : (None, _) => false,
1057 0 : (Some(_), None) => true,
1058 8 : (Some(a), Some(r)) => a >= r,
1059 : }
1060 36 : }
1061 :
1062 : /// Helper for extracting the visibility hint from the literal value of our inner u64
1063 481119 : fn decode_visibility(&self, bits: u64) -> LayerVisibilityHint {
1064 481119 : match (bits >> Self::VISIBILITY_SHIFT) & 0x1 {
1065 481075 : 1 => LayerVisibilityHint::Visible,
1066 44 : 0 => LayerVisibilityHint::Covered,
1067 0 : _ => unreachable!(),
1068 : }
1069 481119 : }
1070 :
1071 : /// Returns the old value which has been replaced
1072 720 : pub(crate) fn set_visibility(&self, visibility: LayerVisibilityHint) -> LayerVisibilityHint {
1073 720 : let value = match visibility {
1074 616 : LayerVisibilityHint::Visible => 0x1 << Self::VISIBILITY_SHIFT,
1075 104 : LayerVisibilityHint::Covered => 0x0,
1076 : };
1077 :
1078 720 : let old_bits = self.write_bits(0x1 << Self::VISIBILITY_SHIFT, value);
1079 720 : self.decode_visibility(old_bits)
1080 720 : }
1081 :
1082 1423 : pub(crate) fn visibility(&self) -> LayerVisibilityHint {
1083 1423 : let read = self.0.load(std::sync::atomic::Ordering::Relaxed);
1084 1423 : self.decode_visibility(read)
1085 1423 : }
1086 : }
1087 :
1088 : /// Get a layer descriptor from a layer.
1089 : pub(crate) trait AsLayerDesc {
1090 : /// Get the layer descriptor.
1091 : fn layer_desc(&self) -> &PersistentLayerDesc;
1092 : }
1093 :
1094 : pub mod tests {
1095 : use pageserver_api::shard::TenantShardId;
1096 : use utils::id::TimelineId;
1097 :
1098 : use super::*;
1099 :
1100 : impl From<DeltaLayerName> for PersistentLayerDesc {
1101 0 : fn from(value: DeltaLayerName) -> Self {
1102 0 : PersistentLayerDesc::new_delta(
1103 0 : TenantShardId::from([0; 18]),
1104 0 : TimelineId::from_array([0; 16]),
1105 0 : value.key_range,
1106 0 : value.lsn_range,
1107 0 : 233,
1108 0 : )
1109 0 : }
1110 : }
1111 :
1112 : impl From<ImageLayerName> for PersistentLayerDesc {
1113 0 : fn from(value: ImageLayerName) -> Self {
1114 0 : PersistentLayerDesc::new_img(
1115 0 : TenantShardId::from([0; 18]),
1116 0 : TimelineId::from_array([0; 16]),
1117 0 : value.key_range,
1118 0 : value.lsn,
1119 0 : 233,
1120 0 : )
1121 0 : }
1122 : }
1123 :
1124 : impl From<LayerName> for PersistentLayerDesc {
1125 0 : fn from(value: LayerName) -> Self {
1126 0 : match value {
1127 0 : LayerName::Delta(d) => Self::from(d),
1128 0 : LayerName::Image(i) => Self::from(i),
1129 : }
1130 0 : }
1131 : }
1132 : }
1133 :
1134 : /// Range wrapping newtype, which uses display to render Debug.
1135 : ///
1136 : /// Useful with `Key`, which has too verbose `{:?}` for printing multiple layers.
1137 : struct RangeDisplayDebug<'a, T: std::fmt::Display>(&'a Range<T>);
1138 :
1139 : impl<T: std::fmt::Display> std::fmt::Debug for RangeDisplayDebug<'_, T> {
1140 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
1141 0 : write!(f, "{}..{}", self.0.start, self.0.end)
1142 0 : }
1143 : }
1144 :
1145 : #[cfg(test)]
1146 : mod tests2 {
1147 : use pageserver_api::key::DBDIR_KEY;
1148 : use tracing::info;
1149 :
1150 : use super::*;
1151 : use crate::tenant::storage_layer::IoConcurrency;
1152 :
1153 : /// TODO: currently this test relies on manual visual inspection of the --no-capture output.
1154 : /// Should look like so:
1155 : /// ```text
1156 : /// RUST_LOG=trace cargo nextest run --features testing --no-capture test_io_concurrency_noise
1157 : /// running 1 test
1158 : /// 2025-01-21T17:42:01.335679Z INFO get_vectored_concurrent_io test selected=SidecarTask
1159 : /// 2025-01-21T17:42:01.335680Z TRACE spawning sidecar task task_id=0
1160 : /// 2025-01-21T17:42:01.335937Z TRACE IoConcurrency_sidecar{task_id=0}: start
1161 : /// 2025-01-21T17:42:01.335972Z TRACE IoConcurrency_sidecar{task_id=0}: received new io future
1162 : /// 2025-01-21T17:42:01.335999Z INFO IoConcurrency_sidecar{task_id=0}: waiting for signal to complete IO
1163 : /// 2025-01-21T17:42:01.336229Z WARN dropping ValuesReconstructState while some IOs have not been completed num_active_ios=1 sidecar_task_id=Some(0) backtrace= 0: <pageserver::tenant::storage_layer::ValuesReconstructState as core::ops::drop::Drop>::drop
1164 : /// at ./src/tenant/storage_layer.rs:553:24
1165 : /// 1: core::ptr::drop_in_place<pageserver::tenant::storage_layer::ValuesReconstructState>
1166 : /// at /home/christian/.rustup/toolchains/1.84.0-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/ptr/mod.rs:521:1
1167 : /// 2: core::mem::drop
1168 : /// at /home/christian/.rustup/toolchains/1.84.0-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/mem/mod.rs:942:24
1169 : /// 3: pageserver::tenant::storage_layer::tests2::test_io_concurrency_noise::{{closure}}
1170 : /// at ./src/tenant/storage_layer.rs:1159:9
1171 : /// ...
1172 : /// 49: <unknown>
1173 : /// 2025-01-21T17:42:01.452293Z INFO IoConcurrency_sidecar{task_id=0}: completing IO
1174 : /// 2025-01-21T17:42:01.452357Z TRACE IoConcurrency_sidecar{task_id=0}: io future completed
1175 : /// 2025-01-21T17:42:01.452473Z TRACE IoConcurrency_sidecar{task_id=0}: end
1176 : /// test tenant::storage_layer::tests2::test_io_concurrency_noise ... ok
1177 : ///
1178 : /// ```
1179 : #[tokio::test]
1180 4 : async fn test_io_concurrency_noise() {
1181 4 : crate::tenant::harness::setup_logging();
1182 4 :
1183 4 : let io_concurrency = IoConcurrency::spawn_for_test();
1184 4 : match *io_concurrency {
1185 4 : IoConcurrency::Sequential => {
1186 4 : // This test asserts behavior in sidecar mode, doesn't make sense in sequential mode.
1187 4 : return;
1188 4 : }
1189 4 : IoConcurrency::SidecarTask { .. } => {}
1190 2 : }
1191 2 : let mut reconstruct_state = ValuesReconstructState::new(io_concurrency.clone());
1192 2 :
1193 2 : let (io_fut_is_waiting_tx, io_fut_is_waiting) = tokio::sync::oneshot::channel();
1194 2 : let (do_complete_io, should_complete_io) = tokio::sync::oneshot::channel();
1195 2 : let (io_fut_exiting_tx, io_fut_exiting) = tokio::sync::oneshot::channel();
1196 2 :
1197 2 : let io = reconstruct_state.update_key(&DBDIR_KEY, Lsn(8), true);
1198 2 : reconstruct_state
1199 2 : .spawn_io(async move {
1200 2 : info!("waiting for signal to complete IO");
1201 4 : io_fut_is_waiting_tx.send(()).unwrap();
1202 2 : should_complete_io.await.unwrap();
1203 2 : info!("completing IO");
1204 4 : io.complete(Ok(OnDiskValue::RawImage(Bytes::new())));
1205 2 : io_fut_exiting_tx.send(()).unwrap();
1206 2 : })
1207 2 : .await;
1208 4 :
1209 4 : io_fut_is_waiting.await.unwrap();
1210 2 :
1211 2 : // this is what makes the noise
1212 2 : drop(reconstruct_state);
1213 2 :
1214 2 : do_complete_io.send(()).unwrap();
1215 2 :
1216 2 : io_fut_exiting.await.unwrap();
1217 4 : }
1218 : }
|