Line data Source code
1 : //! An in-memory layer stores recently received key-value pairs.
2 : //!
3 : //! The "in-memory" part of the name is a bit misleading: the actual page versions are
4 : //! held in an ephemeral file, not in memory. The metadata for each page version, i.e.
5 : //! its position in the file, is kept in memory, though.
6 : //!
7 : use std::cmp::Ordering;
8 : use std::collections::{BTreeMap, HashMap};
9 : use std::fmt::Write;
10 : use std::ops::Range;
11 : use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering as AtomicOrdering};
12 : use std::sync::{Arc, OnceLock};
13 : use std::time::Instant;
14 :
15 : use anyhow::Result;
16 : use camino::Utf8PathBuf;
17 : use pageserver_api::key::{CompactKey, Key};
18 : use pageserver_api::keyspace::KeySpace;
19 : use pageserver_api::models::InMemoryLayerInfo;
20 : use pageserver_api::shard::TenantShardId;
21 : use tokio::sync::RwLock;
22 : use tracing::*;
23 : use utils::id::TimelineId;
24 : use utils::lsn::Lsn;
25 : use utils::vec_map::VecMap;
26 : use wal_decoder::serialized_batch::{SerializedValueBatch, SerializedValueMeta, ValueMeta};
27 :
28 : use super::{DeltaLayerWriter, PersistentLayerDesc, ValuesReconstructState};
29 : use crate::assert_u64_eq_usize::{U64IsUsize, UsizeIsU64, u64_to_usize};
30 : use crate::config::PageServerConf;
31 : use crate::context::{PageContentKind, RequestContext, RequestContextBuilder};
32 : // avoid binding to Write (conflicts with std::io::Write)
33 : // while being able to use std::fmt::Write's methods
34 : use crate::metrics::TIMELINE_EPHEMERAL_BYTES;
35 : use crate::tenant::ephemeral_file::EphemeralFile;
36 : use crate::tenant::storage_layer::{OnDiskValue, OnDiskValueIo};
37 : use crate::tenant::timeline::GetVectoredError;
38 : use crate::virtual_file::owned_buffers_io::io_buf_ext::IoBufExt;
39 : use crate::{l0_flush, page_cache};
40 :
41 : pub(crate) mod vectored_dio_read;
42 :
43 : #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
44 : pub(crate) struct InMemoryLayerFileId(page_cache::FileId);
45 :
46 : pub struct InMemoryLayer {
47 : conf: &'static PageServerConf,
48 : tenant_shard_id: TenantShardId,
49 : timeline_id: TimelineId,
50 : file_id: InMemoryLayerFileId,
51 :
52 : /// This layer contains all the changes from 'start_lsn'. The
53 : /// start is inclusive.
54 : start_lsn: Lsn,
55 :
56 : /// Frozen layers have an exclusive end LSN.
57 : /// Writes are only allowed when this is `None`.
58 : pub(crate) end_lsn: OnceLock<Lsn>,
59 :
60 : /// Used for traversal path. Cached representation of the in-memory layer after frozen.
61 : frozen_local_path_str: OnceLock<Arc<str>>,
62 :
63 : opened_at: Instant,
64 :
65 : /// The above fields never change, except for `end_lsn`, which is only set once.
66 : /// All other changing parts are in `inner`, and protected by a mutex.
67 : inner: RwLock<InMemoryLayerInner>,
68 :
69 : estimated_in_mem_size: AtomicU64,
70 : }
71 :
72 : impl std::fmt::Debug for InMemoryLayer {
73 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
74 0 : f.debug_struct("InMemoryLayer")
75 0 : .field("start_lsn", &self.start_lsn)
76 0 : .field("end_lsn", &self.end_lsn)
77 0 : .field("inner", &self.inner)
78 0 : .finish()
79 0 : }
80 : }
81 :
82 : pub struct InMemoryLayerInner {
83 : /// All versions of all pages in the layer are kept here. Indexed
84 : /// by block number and LSN. The [`IndexEntry`] is an offset into the
85 : /// ephemeral file where the page version is stored.
86 : index: BTreeMap<CompactKey, VecMap<Lsn, IndexEntry>>,
87 :
88 : /// The values are stored in a serialized format in this file.
89 : /// Each serialized Value is preceded by a 'u32' length field.
90 : /// PerSeg::page_versions map stores offsets into this file.
91 : file: EphemeralFile,
92 :
93 : resource_units: GlobalResourceUnits,
94 : }
95 :
96 : /// Support the same max blob length as blob_io, because ultimately
97 : /// all the InMemoryLayer contents end up being written into a delta layer,
98 : /// using the [`crate::tenant::blob_io`].
99 : const MAX_SUPPORTED_BLOB_LEN: usize = crate::tenant::blob_io::MAX_SUPPORTED_BLOB_LEN;
100 : const MAX_SUPPORTED_BLOB_LEN_BITS: usize = {
101 : let trailing_ones = MAX_SUPPORTED_BLOB_LEN.trailing_ones() as usize;
102 : let leading_zeroes = MAX_SUPPORTED_BLOB_LEN.leading_zeros() as usize;
103 : assert!(trailing_ones + leading_zeroes == std::mem::size_of::<usize>() * 8);
104 : trailing_ones
105 : };
106 :
107 : /// See [`InMemoryLayerInner::index`].
108 : ///
109 : /// For memory efficiency, the data is packed into a u64.
110 : ///
111 : /// Layout:
112 : /// - 1 bit: `will_init`
113 : /// - [`MAX_SUPPORTED_BLOB_LEN_BITS`][]: `len`
114 : /// - [`MAX_SUPPORTED_POS_BITS`](IndexEntry::MAX_SUPPORTED_POS_BITS): `pos`
115 : #[derive(Debug, Clone, Copy, PartialEq, Eq)]
116 : pub struct IndexEntry(u64);
117 :
118 : impl IndexEntry {
119 : /// See [`Self::MAX_SUPPORTED_POS`].
120 : const MAX_SUPPORTED_POS_BITS: usize = {
121 : let remainder = 64 - 1 - MAX_SUPPORTED_BLOB_LEN_BITS;
122 : if remainder < 32 {
123 : panic!("pos can be u32 as per type system, support that");
124 : }
125 : remainder
126 : };
127 : /// The maximum supported blob offset that can be represented by [`Self`].
128 : /// See also [`Self::validate_checkpoint_distance`].
129 : const MAX_SUPPORTED_POS: usize = (1 << Self::MAX_SUPPORTED_POS_BITS) - 1;
130 :
131 : // Layout
132 : const WILL_INIT_RANGE: Range<usize> = 0..1;
133 : const LEN_RANGE: Range<usize> =
134 : Self::WILL_INIT_RANGE.end..Self::WILL_INIT_RANGE.end + MAX_SUPPORTED_BLOB_LEN_BITS;
135 : const POS_RANGE: Range<usize> =
136 : Self::LEN_RANGE.end..Self::LEN_RANGE.end + Self::MAX_SUPPORTED_POS_BITS;
137 : const _ASSERT: () = {
138 : if Self::POS_RANGE.end != 64 {
139 : panic!("we don't want undefined bits for our own sanity")
140 : }
141 : };
142 :
143 : /// Fails if and only if the offset or length encoded in `arg` is too large to be represented by [`Self`].
144 : ///
145 : /// The only reason why that can happen in the system is if the [`InMemoryLayer`] grows too long.
146 : /// The [`InMemoryLayer`] size is determined by the checkpoint distance, enforced by [`crate::tenant::Timeline::should_roll`].
147 : ///
148 : /// Thus, to avoid failure of this function, whenever we start up and/or change checkpoint distance,
149 : /// call [`Self::validate_checkpoint_distance`] with the new checkpoint distance value.
150 : ///
151 : /// TODO: this check should happen ideally at config parsing time (and in the request handler when a change to checkpoint distance is requested)
152 : /// When cleaning this up, also look into the s3 max file size check that is performed in delta layer writer.
153 : #[inline(always)]
154 10181596 : fn new(arg: IndexEntryNewArgs) -> anyhow::Result<Self> {
155 10181596 : let IndexEntryNewArgs {
156 10181596 : base_offset,
157 10181596 : batch_offset,
158 10181596 : len,
159 10181596 : will_init,
160 10181596 : } = arg;
161 :
162 10181596 : let pos = base_offset
163 10181596 : .checked_add(batch_offset)
164 10181596 : .ok_or_else(|| anyhow::anyhow!("base_offset + batch_offset overflows u64: base_offset={base_offset} batch_offset={batch_offset}"))?;
165 :
166 10181596 : if pos.into_usize() > Self::MAX_SUPPORTED_POS {
167 16 : anyhow::bail!(
168 16 : "base_offset+batch_offset exceeds the maximum supported value: base_offset={base_offset} batch_offset={batch_offset} (+)={pos} max={max}",
169 16 : max = Self::MAX_SUPPORTED_POS
170 16 : );
171 10181580 : }
172 10181580 :
173 10181580 : if len > MAX_SUPPORTED_BLOB_LEN {
174 4 : anyhow::bail!(
175 4 : "len exceeds the maximum supported length: len={len} max={MAX_SUPPORTED_BLOB_LEN}",
176 4 : );
177 10181576 : }
178 10181576 :
179 10181576 : let mut data: u64 = 0;
180 : use bit_field::BitField;
181 10181576 : data.set_bits(Self::WILL_INIT_RANGE, if will_init { 1 } else { 0 });
182 10181576 : data.set_bits(Self::LEN_RANGE, len.into_u64());
183 10181576 : data.set_bits(Self::POS_RANGE, pos);
184 10181576 :
185 10181576 : Ok(Self(data))
186 10181596 : }
187 :
188 : #[inline(always)]
189 9770106 : fn unpack(&self) -> IndexEntryUnpacked {
190 : use bit_field::BitField;
191 9770106 : IndexEntryUnpacked {
192 9770106 : will_init: self.0.get_bits(Self::WILL_INIT_RANGE) != 0,
193 9770106 : len: self.0.get_bits(Self::LEN_RANGE),
194 9770106 : pos: self.0.get_bits(Self::POS_RANGE),
195 9770106 : }
196 9770106 : }
197 :
198 : /// See [`Self::new`].
199 488 : pub(crate) const fn validate_checkpoint_distance(
200 488 : checkpoint_distance: u64,
201 488 : ) -> Result<(), &'static str> {
202 488 : if checkpoint_distance > Self::MAX_SUPPORTED_POS as u64 {
203 0 : return Err("exceeds the maximum supported value");
204 488 : }
205 488 : let res = u64_to_usize(checkpoint_distance).checked_add(MAX_SUPPORTED_BLOB_LEN);
206 488 : if res.is_none() {
207 0 : return Err(
208 0 : "checkpoint distance + max supported blob len overflows in-memory addition",
209 0 : );
210 488 : }
211 488 :
212 488 : // NB: it is ok for the result of the addition to be larger than MAX_SUPPORTED_POS
213 488 :
214 488 : Ok(())
215 488 : }
216 :
217 : const _ASSERT_DEFAULT_CHECKPOINT_DISTANCE_IS_VALID: () = {
218 : let res = Self::validate_checkpoint_distance(
219 : pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE,
220 : );
221 : if res.is_err() {
222 : panic!("default checkpoint distance is valid")
223 : }
224 : };
225 : }
226 :
227 : /// Args to [`IndexEntry::new`].
228 : #[derive(Clone, Copy)]
229 : struct IndexEntryNewArgs {
230 : base_offset: u64,
231 : batch_offset: u64,
232 : len: usize,
233 : will_init: bool,
234 : }
235 :
236 : /// Unpacked representation of the bitfielded [`IndexEntry`].
237 : #[derive(Clone, Copy, PartialEq, Eq, Debug)]
238 : struct IndexEntryUnpacked {
239 : will_init: bool,
240 : len: u64,
241 : pos: u64,
242 : }
243 :
244 : impl std::fmt::Debug for InMemoryLayerInner {
245 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
246 0 : f.debug_struct("InMemoryLayerInner").finish()
247 0 : }
248 : }
249 :
250 : /// State shared by all in-memory (ephemeral) layers. Updated infrequently during background ticks in Timeline,
251 : /// to minimize contention.
252 : ///
253 : /// This global state is used to implement behaviors that require a global view of the system, e.g.
254 : /// rolling layers proactively to limit the total amount of dirty data.
255 : pub(crate) struct GlobalResources {
256 : // Limit on how high dirty_bytes may grow before we start freezing layers to reduce it.
257 : // Zero means unlimited.
258 : pub(crate) max_dirty_bytes: AtomicU64,
259 : // How many bytes are in all EphemeralFile objects
260 : dirty_bytes: AtomicU64,
261 : // How many layers are contributing to dirty_bytes
262 : dirty_layers: AtomicUsize,
263 : }
264 :
265 : // Per-timeline RAII struct for its contribution to [`GlobalResources`]
266 : struct GlobalResourceUnits {
267 : // How many dirty bytes have I added to the global dirty_bytes: this guard object is responsible
268 : // for decrementing the global counter by this many bytes when dropped.
269 : dirty_bytes: u64,
270 : }
271 :
272 : impl GlobalResourceUnits {
273 : // Hint for the layer append path to update us when the layer size differs from the last
274 : // call to update_size by this much. If we don't reach this threshold, we'll still get
275 : // updated when the Timeline "ticks" in the background.
276 : const MAX_SIZE_DRIFT: u64 = 10 * 1024 * 1024;
277 :
278 2616 : fn new() -> Self {
279 2616 : GLOBAL_RESOURCES
280 2616 : .dirty_layers
281 2616 : .fetch_add(1, AtomicOrdering::Relaxed);
282 2616 : Self { dirty_bytes: 0 }
283 2616 : }
284 :
285 : /// Do not call this frequently: all timelines will write to these same global atomics,
286 : /// so this is a relatively expensive operation. Wait at least a few seconds between calls.
287 : ///
288 : /// Returns the effective layer size limit that should be applied, if any, to keep
289 : /// the total number of dirty bytes below the configured maximum.
290 2380 : fn publish_size(&mut self, size: u64) -> Option<u64> {
291 2380 : let new_global_dirty_bytes = match size.cmp(&self.dirty_bytes) {
292 2360 : Ordering::Equal => GLOBAL_RESOURCES.dirty_bytes.load(AtomicOrdering::Relaxed),
293 : Ordering::Greater => {
294 16 : let delta = size - self.dirty_bytes;
295 16 : let old = GLOBAL_RESOURCES
296 16 : .dirty_bytes
297 16 : .fetch_add(delta, AtomicOrdering::Relaxed);
298 16 : old + delta
299 : }
300 : Ordering::Less => {
301 4 : let delta = self.dirty_bytes - size;
302 4 : let old = GLOBAL_RESOURCES
303 4 : .dirty_bytes
304 4 : .fetch_sub(delta, AtomicOrdering::Relaxed);
305 4 : old - delta
306 : }
307 : };
308 :
309 : // This is a sloppy update: concurrent updates to the counter will race, and the exact
310 : // value of the metric might not be the exact latest value of GLOBAL_RESOURCES::dirty_bytes.
311 : // That's okay: as long as the metric contains some recent value, it doesn't have to always
312 : // be literally the last update.
313 2380 : TIMELINE_EPHEMERAL_BYTES.set(new_global_dirty_bytes);
314 2380 :
315 2380 : self.dirty_bytes = size;
316 2380 :
317 2380 : let max_dirty_bytes = GLOBAL_RESOURCES
318 2380 : .max_dirty_bytes
319 2380 : .load(AtomicOrdering::Relaxed);
320 2380 : if max_dirty_bytes > 0 && new_global_dirty_bytes > max_dirty_bytes {
321 : // Set the layer file limit to the average layer size: this implies that all above-average
322 : // sized layers will be elegible for freezing. They will be frozen in the order they
323 : // next enter publish_size.
324 0 : Some(
325 0 : new_global_dirty_bytes
326 0 : / GLOBAL_RESOURCES.dirty_layers.load(AtomicOrdering::Relaxed) as u64,
327 0 : )
328 : } else {
329 2380 : None
330 : }
331 2380 : }
332 :
333 : // Call publish_size if the input size differs from last published size by more than
334 : // the drift limit
335 9608480 : fn maybe_publish_size(&mut self, size: u64) {
336 9608480 : let publish = match size.cmp(&self.dirty_bytes) {
337 0 : Ordering::Equal => false,
338 9608480 : Ordering::Greater => size - self.dirty_bytes > Self::MAX_SIZE_DRIFT,
339 0 : Ordering::Less => self.dirty_bytes - size > Self::MAX_SIZE_DRIFT,
340 : };
341 :
342 9608480 : if publish {
343 16 : self.publish_size(size);
344 9608464 : }
345 9608480 : }
346 : }
347 :
348 : impl Drop for GlobalResourceUnits {
349 2364 : fn drop(&mut self) {
350 2364 : GLOBAL_RESOURCES
351 2364 : .dirty_layers
352 2364 : .fetch_sub(1, AtomicOrdering::Relaxed);
353 2364 :
354 2364 : // Subtract our contribution to the global total dirty bytes
355 2364 : self.publish_size(0);
356 2364 : }
357 : }
358 :
359 : pub(crate) static GLOBAL_RESOURCES: GlobalResources = GlobalResources {
360 : max_dirty_bytes: AtomicU64::new(0),
361 : dirty_bytes: AtomicU64::new(0),
362 : dirty_layers: AtomicUsize::new(0),
363 : };
364 :
365 : impl InMemoryLayer {
366 1214349 : pub(crate) fn file_id(&self) -> InMemoryLayerFileId {
367 1214349 : self.file_id
368 1214349 : }
369 :
370 2364 : pub(crate) fn get_timeline_id(&self) -> TimelineId {
371 2364 : self.timeline_id
372 2364 : }
373 :
374 4720 : pub(crate) fn info(&self) -> InMemoryLayerInfo {
375 4720 : let lsn_start = self.start_lsn;
376 :
377 4720 : if let Some(&lsn_end) = self.end_lsn.get() {
378 4716 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end }
379 : } else {
380 4 : InMemoryLayerInfo::Open { lsn_start }
381 : }
382 4720 : }
383 :
384 4712 : pub(crate) fn try_len(&self) -> Option<u64> {
385 4712 : self.inner.try_read().map(|i| i.file.len()).ok()
386 4712 : }
387 :
388 9608480 : pub(crate) fn assert_writable(&self) {
389 9608480 : assert!(self.end_lsn.get().is_none());
390 9608480 : }
391 :
392 4564767 : pub(crate) fn end_lsn_or_max(&self) -> Lsn {
393 4564767 : self.end_lsn.get().copied().unwrap_or(Lsn::MAX)
394 4564767 : }
395 :
396 4562411 : pub(crate) fn get_lsn_range(&self) -> Range<Lsn> {
397 4562411 : self.start_lsn..self.end_lsn_or_max()
398 4562411 : }
399 :
400 : /// debugging function to print out the contents of the layer
401 : ///
402 : /// this is likely completly unused
403 0 : pub async fn dump(&self, _verbose: bool, _ctx: &RequestContext) -> Result<()> {
404 0 : let end_str = self.end_lsn_or_max();
405 0 :
406 0 : println!(
407 0 : "----- in-memory layer for tli {} LSNs {}-{} ----",
408 0 : self.timeline_id, self.start_lsn, end_str,
409 0 : );
410 0 :
411 0 : Ok(())
412 0 : }
413 :
414 : // Look up the keys in the provided keyspace and update
415 : // the reconstruct state with whatever is found.
416 1213509 : pub(crate) async fn get_values_reconstruct_data(
417 1213509 : self: &Arc<InMemoryLayer>,
418 1213509 : keyspace: KeySpace,
419 1213509 : lsn_range: Range<Lsn>,
420 1213509 : reconstruct_state: &mut ValuesReconstructState,
421 1213509 : ctx: &RequestContext,
422 1213509 : ) -> Result<(), GetVectoredError> {
423 1213509 : let ctx = RequestContextBuilder::extend(ctx)
424 1213509 : .page_content_kind(PageContentKind::InMemoryLayer)
425 1213509 : .build();
426 :
427 1213509 : let inner = self.inner.read().await;
428 :
429 : struct ValueRead {
430 : entry_lsn: Lsn,
431 : read: vectored_dio_read::LogicalRead<Vec<u8>>,
432 : }
433 1213509 : let mut reads: HashMap<Key, Vec<ValueRead>> = HashMap::new();
434 1213509 : let mut ios: HashMap<(Key, Lsn), OnDiskValueIo> = Default::default();
435 :
436 1213929 : for range in keyspace.ranges.iter() {
437 1213929 : for (key, vec_map) in inner
438 1213929 : .index
439 1213929 : .range(range.start.to_compact()..range.end.to_compact())
440 : {
441 999002 : let key = Key::from_compact(*key);
442 999002 : let slice = vec_map.slice_range(lsn_range.clone());
443 :
444 999002 : for (entry_lsn, index_entry) in slice.iter().rev() {
445 : let IndexEntryUnpacked {
446 998994 : pos,
447 998994 : len,
448 998994 : will_init,
449 998994 : } = index_entry.unpack();
450 998994 :
451 998994 : reads.entry(key).or_default().push(ValueRead {
452 998994 : entry_lsn: *entry_lsn,
453 998994 : read: vectored_dio_read::LogicalRead::new(
454 998994 : pos,
455 998994 : Vec::with_capacity(len as usize),
456 998994 : ),
457 998994 : });
458 998994 :
459 998994 : let io = reconstruct_state.update_key(&key, *entry_lsn, will_init);
460 998994 : ios.insert((key, *entry_lsn), io);
461 998994 :
462 998994 : if will_init {
463 998922 : break;
464 72 : }
465 : }
466 : }
467 : }
468 1213509 : drop(inner); // release the lock before we spawn the IO; if it's serial-mode IO we will deadlock on the read().await below
469 1213509 : let read_from = Arc::clone(self);
470 1213509 : let read_ctx = ctx.attached_child();
471 1213509 : reconstruct_state
472 1213509 : .spawn_io(async move {
473 1213509 : let inner = read_from.inner.read().await;
474 1213509 : let f = vectored_dio_read::execute(
475 1213509 : &inner.file,
476 1213509 : reads
477 1213509 : .iter()
478 1213509 : .flat_map(|(_, value_reads)| value_reads.iter().map(|v| &v.read)),
479 1213509 : &read_ctx,
480 1213509 : );
481 1213509 : send_future::SendFuture::send(f) // https://github.com/rust-lang/rust/issues/96865
482 1213509 : .await;
483 :
484 2212503 : for (key, value_reads) in reads {
485 1997988 : for ValueRead { entry_lsn, read } in value_reads {
486 998994 : let io = ios.remove(&(key, entry_lsn)).expect("sender must exist");
487 998994 : match read.into_result().expect("we run execute() above") {
488 0 : Err(e) => {
489 0 : io.complete(Err(std::io::Error::new(
490 0 : e.kind(),
491 0 : "dio vec read failed",
492 0 : )));
493 0 : }
494 998994 : Ok(value_buf) => {
495 998994 : io.complete(Ok(OnDiskValue::WalRecordOrImage(value_buf.into())));
496 998994 : }
497 : }
498 : }
499 : }
500 :
501 1213509 : assert!(ios.is_empty());
502 :
503 : // Keep layer existent until this IO is done;
504 : // This is kinda forced for InMemoryLayer because we need to inner.read() anyway,
505 : // but it's less obvious for DeltaLayer and ImageLayer. So, keep this explicit
506 : // drop for consistency among all three layer types.
507 1213509 : drop(inner);
508 1213509 : drop(read_from);
509 1213509 : })
510 1213509 : .await;
511 :
512 1213509 : Ok(())
513 1213509 : }
514 : }
515 :
516 4720 : fn inmem_layer_display(mut f: impl Write, start_lsn: Lsn, end_lsn: Lsn) -> std::fmt::Result {
517 4720 : write!(f, "inmem-{:016X}-{:016X}", start_lsn.0, end_lsn.0)
518 4720 : }
519 :
520 2364 : fn inmem_layer_log_display(
521 2364 : mut f: impl Write,
522 2364 : timeline: TimelineId,
523 2364 : start_lsn: Lsn,
524 2364 : end_lsn: Lsn,
525 2364 : ) -> std::fmt::Result {
526 2364 : write!(f, "timeline {} in-memory ", timeline)?;
527 2364 : inmem_layer_display(f, start_lsn, end_lsn)
528 2364 : }
529 :
530 : impl std::fmt::Display for InMemoryLayer {
531 2356 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
532 2356 : let end_lsn = self.end_lsn_or_max();
533 2356 : inmem_layer_display(f, self.start_lsn, end_lsn)
534 2356 : }
535 : }
536 :
537 : impl InMemoryLayer {
538 : /// Get layer size.
539 2604 : pub async fn size(&self) -> Result<u64> {
540 2604 : let inner = self.inner.read().await;
541 2604 : Ok(inner.file.len())
542 2604 : }
543 :
544 2424 : pub fn estimated_in_mem_size(&self) -> u64 {
545 2424 : self.estimated_in_mem_size.load(AtomicOrdering::Relaxed)
546 2424 : }
547 :
548 : /// Create a new, empty, in-memory layer
549 2616 : pub async fn create(
550 2616 : conf: &'static PageServerConf,
551 2616 : timeline_id: TimelineId,
552 2616 : tenant_shard_id: TenantShardId,
553 2616 : start_lsn: Lsn,
554 2616 : gate: &utils::sync::gate::Gate,
555 2616 : ctx: &RequestContext,
556 2616 : ) -> Result<InMemoryLayer> {
557 2616 : trace!(
558 0 : "initializing new empty InMemoryLayer for writing on timeline {timeline_id} at {start_lsn}"
559 : );
560 :
561 2616 : let file = EphemeralFile::create(conf, tenant_shard_id, timeline_id, gate, ctx).await?;
562 2616 : let key = InMemoryLayerFileId(file.page_cache_file_id());
563 2616 :
564 2616 : Ok(InMemoryLayer {
565 2616 : file_id: key,
566 2616 : frozen_local_path_str: OnceLock::new(),
567 2616 : conf,
568 2616 : timeline_id,
569 2616 : tenant_shard_id,
570 2616 : start_lsn,
571 2616 : end_lsn: OnceLock::new(),
572 2616 : opened_at: Instant::now(),
573 2616 : inner: RwLock::new(InMemoryLayerInner {
574 2616 : index: BTreeMap::new(),
575 2616 : file,
576 2616 : resource_units: GlobalResourceUnits::new(),
577 2616 : }),
578 2616 : estimated_in_mem_size: AtomicU64::new(0),
579 2616 : })
580 2616 : }
581 :
582 : /// Write path.
583 : ///
584 : /// Errors are not retryable, the [`InMemoryLayer`] must be discarded, and not be read from.
585 : /// The reason why it's not retryable is that the [`EphemeralFile`] writes are not retryable.
586 : /// TODO: it can be made retryable if we aborted the process on EphemeralFile write errors.
587 9608480 : pub async fn put_batch(
588 9608480 : &self,
589 9608480 : serialized_batch: SerializedValueBatch,
590 9608480 : ctx: &RequestContext,
591 9608480 : ) -> anyhow::Result<()> {
592 9608480 : let mut inner = self.inner.write().await;
593 9608480 : self.assert_writable();
594 9608480 :
595 9608480 : let base_offset = inner.file.len();
596 9608480 :
597 9608480 : let SerializedValueBatch {
598 9608480 : raw,
599 9608480 : metadata,
600 9608480 : max_lsn: _,
601 9608480 : len: _,
602 9608480 : } = serialized_batch;
603 9608480 :
604 9608480 : // Write the batch to the file
605 9608480 : inner.file.write_raw(&raw, ctx).await?;
606 9608480 : let new_size = inner.file.len();
607 9608480 :
608 9608480 : let expected_new_len = base_offset
609 9608480 : .checked_add(raw.len().into_u64())
610 9608480 : // write_raw would error if we were to overflow u64.
611 9608480 : // also IndexEntry and higher levels in
612 9608480 : //the code don't allow the file to grow that large
613 9608480 : .unwrap();
614 9608480 : assert_eq!(new_size, expected_new_len);
615 :
616 : // Update the index with the new entries
617 19789976 : for meta in metadata {
618 : let SerializedValueMeta {
619 10181496 : key,
620 10181496 : lsn,
621 10181496 : batch_offset,
622 10181496 : len,
623 10181496 : will_init,
624 10181496 : } = match meta {
625 10181496 : ValueMeta::Serialized(ser) => ser,
626 : ValueMeta::Observed(_) => {
627 0 : continue;
628 : }
629 : };
630 :
631 : // Add the base_offset to the batch's index entries which are relative to the batch start.
632 10181496 : let index_entry = IndexEntry::new(IndexEntryNewArgs {
633 10181496 : base_offset,
634 10181496 : batch_offset,
635 10181496 : len,
636 10181496 : will_init,
637 10181496 : })?;
638 :
639 10181496 : let vec_map = inner.index.entry(key).or_default();
640 10181496 : let old = vec_map.append_or_update_last(lsn, index_entry).unwrap().0;
641 10181496 : if old.is_some() {
642 : // This should not break anything, but is unexpected: ingestion code aims to filter out
643 : // multiple writes to the same key at the same LSN. This happens in cases where our
644 : // ingenstion code generates some write like an empty page, and we see a write from postgres
645 : // to the same key in the same wal record. If one such write makes it through, we
646 : // index the most recent write, implicitly ignoring the earlier write. We log a warning
647 : // because this case is unexpected, and we would like tests to fail if this happens.
648 0 : warn!("Key {} at {} written twice at same LSN", key, lsn);
649 10181496 : }
650 10181496 : self.estimated_in_mem_size.fetch_add(
651 10181496 : (std::mem::size_of::<CompactKey>()
652 10181496 : + std::mem::size_of::<Lsn>()
653 10181496 : + std::mem::size_of::<IndexEntry>()) as u64,
654 10181496 : AtomicOrdering::Relaxed,
655 10181496 : );
656 : }
657 :
658 9608480 : inner.resource_units.maybe_publish_size(new_size);
659 9608480 :
660 9608480 : Ok(())
661 9608480 : }
662 :
663 9606020 : pub(crate) fn get_opened_at(&self) -> Instant {
664 9606020 : self.opened_at
665 9606020 : }
666 :
667 0 : pub(crate) async fn tick(&self) -> Option<u64> {
668 0 : let mut inner = self.inner.write().await;
669 0 : let size = inner.file.len();
670 0 : inner.resource_units.publish_size(size)
671 0 : }
672 :
673 4 : pub(crate) async fn put_tombstones(&self, _key_ranges: &[(Range<Key>, Lsn)]) -> Result<()> {
674 4 : // TODO: Currently, we just leak the storage for any deleted keys
675 4 : Ok(())
676 4 : }
677 :
678 : /// Records the end_lsn for non-dropped layers.
679 : /// `end_lsn` is exclusive
680 2364 : pub async fn freeze(&self, end_lsn: Lsn) {
681 2364 : assert!(
682 2364 : self.start_lsn < end_lsn,
683 0 : "{} >= {}",
684 : self.start_lsn,
685 : end_lsn
686 : );
687 2364 : self.end_lsn.set(end_lsn).expect("end_lsn set only once");
688 2364 :
689 2364 : self.frozen_local_path_str
690 2364 : .set({
691 2364 : let mut buf = String::new();
692 2364 : inmem_layer_log_display(&mut buf, self.get_timeline_id(), self.start_lsn, end_lsn)
693 2364 : .unwrap();
694 2364 : buf.into()
695 2364 : })
696 2364 : .expect("frozen_local_path_str set only once");
697 :
698 : #[cfg(debug_assertions)]
699 : {
700 2364 : let inner = self.inner.write().await;
701 8512503 : for vec_map in inner.index.values() {
702 8773976 : for (lsn, _) in vec_map.as_slice() {
703 8773976 : assert!(*lsn < end_lsn);
704 : }
705 : }
706 : }
707 2364 : }
708 :
709 : /// Write this frozen in-memory layer to disk. If `key_range` is set, the delta
710 : /// layer will only contain the key range the user specifies, and may return `None`
711 : /// if there are no matching keys.
712 : ///
713 : /// Returns a new delta layer with all the same data as this in-memory layer
714 1936 : pub async fn write_to_disk(
715 1936 : &self,
716 1936 : ctx: &RequestContext,
717 1936 : key_range: Option<Range<Key>>,
718 1936 : l0_flush_global_state: &l0_flush::Inner,
719 1936 : ) -> Result<Option<(PersistentLayerDesc, Utf8PathBuf)>> {
720 : // Grab the lock in read-mode. We hold it over the I/O, but because this
721 : // layer is not writeable anymore, no one should be trying to acquire the
722 : // write lock on it, so we shouldn't block anyone. There's one exception
723 : // though: another thread might have grabbed a reference to this layer
724 : // in `get_layer_for_write' just before the checkpointer called
725 : // `freeze`, and then `write_to_disk` on it. When the thread gets the
726 : // lock, it will see that it's not writeable anymore and retry, but it
727 : // would have to wait until we release it. That race condition is very
728 : // rare though, so we just accept the potential latency hit for now.
729 1936 : let inner = self.inner.read().await;
730 :
731 : use l0_flush::Inner;
732 1936 : let _concurrency_permit = match l0_flush_global_state {
733 1936 : Inner::Direct { semaphore, .. } => Some(semaphore.acquire().await),
734 : };
735 :
736 1936 : let end_lsn = *self.end_lsn.get().unwrap();
737 :
738 1936 : let key_count = if let Some(key_range) = key_range {
739 0 : let key_range = key_range.start.to_compact()..key_range.end.to_compact();
740 0 :
741 0 : inner
742 0 : .index
743 0 : .iter()
744 0 : .filter(|(k, _)| key_range.contains(k))
745 0 : .count()
746 : } else {
747 1936 : inner.index.len()
748 : };
749 1936 : if key_count == 0 {
750 0 : return Ok(None);
751 1936 : }
752 :
753 1936 : let mut delta_layer_writer = DeltaLayerWriter::new(
754 1936 : self.conf,
755 1936 : self.timeline_id,
756 1936 : self.tenant_shard_id,
757 1936 : Key::MIN,
758 1936 : self.start_lsn..end_lsn,
759 1936 : ctx,
760 1936 : )
761 1936 : .await?;
762 :
763 1936 : match l0_flush_global_state {
764 : l0_flush::Inner::Direct { .. } => {
765 1936 : let file_contents = inner.file.load_to_io_buf(ctx).await?;
766 1936 : let file_contents = file_contents.freeze();
767 :
768 8509559 : for (key, vec_map) in inner.index.iter() {
769 : // Write all page versions
770 8771032 : for (lsn, entry) in vec_map
771 8509559 : .as_slice()
772 8509559 : .iter()
773 8771032 : .map(|(lsn, entry)| (lsn, entry.unpack()))
774 : {
775 : let IndexEntryUnpacked {
776 8771032 : pos,
777 8771032 : len,
778 8771032 : will_init,
779 8771032 : } = entry;
780 8771032 : let buf = file_contents.slice(pos as usize..(pos + len) as usize);
781 8771032 : let (_buf, res) = delta_layer_writer
782 8771032 : .put_value_bytes(
783 8771032 : Key::from_compact(*key),
784 8771032 : *lsn,
785 8771032 : buf.slice_len(),
786 8771032 : will_init,
787 8771032 : ctx,
788 8771032 : )
789 8771032 : .await;
790 8771032 : res?;
791 : }
792 : }
793 : }
794 : }
795 :
796 : // MAX is used here because we identify L0 layers by full key range
797 1936 : let (desc, path) = delta_layer_writer.finish(Key::MAX, ctx).await?;
798 :
799 : // Hold the permit until all the IO is done, including the fsync in `delta_layer_writer.finish()``.
800 : //
801 : // If we didn't and our caller drops this future, tokio-epoll-uring would extend the lifetime of
802 : // the `file_contents: Vec<u8>` until the IO is done, but not the permit's lifetime.
803 : // Thus, we'd have more concurrenct `Vec<u8>` in existence than the semaphore allows.
804 : //
805 : // We hold across the fsync so that on ext4 mounted with data=ordered, all the kernel page cache pages
806 : // we dirtied when writing to the filesystem have been flushed and marked !dirty.
807 1936 : drop(_concurrency_permit);
808 1936 :
809 1936 : Ok(Some((desc, path)))
810 1936 : }
811 : }
812 :
813 : #[cfg(test)]
814 : mod tests {
815 : use super::*;
816 :
817 : #[test]
818 4 : fn test_index_entry() {
819 : const MAX_SUPPORTED_POS: usize = IndexEntry::MAX_SUPPORTED_POS;
820 : use {IndexEntryNewArgs as Args, IndexEntryUnpacked as Unpacked};
821 :
822 80 : let roundtrip = |args, expect: Unpacked| {
823 80 : let res = IndexEntry::new(args).expect("this tests expects no errors");
824 80 : let IndexEntryUnpacked {
825 80 : will_init,
826 80 : len,
827 80 : pos,
828 80 : } = res.unpack();
829 80 : assert_eq!(will_init, expect.will_init);
830 80 : assert_eq!(len, expect.len);
831 80 : assert_eq!(pos, expect.pos);
832 80 : };
833 :
834 : // basic roundtrip
835 12 : for pos in [0, MAX_SUPPORTED_POS] {
836 24 : for len in [0, MAX_SUPPORTED_BLOB_LEN] {
837 48 : for will_init in [true, false] {
838 32 : let expect = Unpacked {
839 32 : will_init,
840 32 : len: len.into_u64(),
841 32 : pos: pos.into_u64(),
842 32 : };
843 32 : roundtrip(
844 32 : Args {
845 32 : will_init,
846 32 : base_offset: pos.into_u64(),
847 32 : batch_offset: 0,
848 32 : len,
849 32 : },
850 32 : expect,
851 32 : );
852 32 : roundtrip(
853 32 : Args {
854 32 : will_init,
855 32 : base_offset: 0,
856 32 : batch_offset: pos.into_u64(),
857 32 : len,
858 32 : },
859 32 : expect,
860 32 : );
861 32 : }
862 : }
863 : }
864 :
865 : // too-large len
866 4 : let too_large = Args {
867 4 : will_init: false,
868 4 : len: MAX_SUPPORTED_BLOB_LEN + 1,
869 4 : base_offset: 0,
870 4 : batch_offset: 0,
871 4 : };
872 4 : assert!(IndexEntry::new(too_large).is_err());
873 :
874 : // too-large pos
875 : {
876 4 : let too_large = Args {
877 4 : will_init: false,
878 4 : len: 0,
879 4 : base_offset: MAX_SUPPORTED_POS.into_u64() + 1,
880 4 : batch_offset: 0,
881 4 : };
882 4 : assert!(IndexEntry::new(too_large).is_err());
883 4 : let too_large = Args {
884 4 : will_init: false,
885 4 : len: 0,
886 4 : base_offset: 0,
887 4 : batch_offset: MAX_SUPPORTED_POS.into_u64() + 1,
888 4 : };
889 4 : assert!(IndexEntry::new(too_large).is_err());
890 : }
891 :
892 : // too large (base_offset + batch_offset)
893 : {
894 4 : let too_large = Args {
895 4 : will_init: false,
896 4 : len: 0,
897 4 : base_offset: MAX_SUPPORTED_POS.into_u64(),
898 4 : batch_offset: 1,
899 4 : };
900 4 : assert!(IndexEntry::new(too_large).is_err());
901 4 : let too_large = Args {
902 4 : will_init: false,
903 4 : len: 0,
904 4 : base_offset: MAX_SUPPORTED_POS.into_u64() - 1,
905 4 : batch_offset: MAX_SUPPORTED_POS.into_u64() - 1,
906 4 : };
907 4 : assert!(IndexEntry::new(too_large).is_err());
908 : }
909 :
910 : // valid special cases
911 : // - area past the max supported pos that is accessible by len
912 12 : for len in [1, MAX_SUPPORTED_BLOB_LEN] {
913 8 : roundtrip(
914 8 : Args {
915 8 : will_init: false,
916 8 : len,
917 8 : base_offset: MAX_SUPPORTED_POS.into_u64(),
918 8 : batch_offset: 0,
919 8 : },
920 8 : Unpacked {
921 8 : will_init: false,
922 8 : len: len as u64,
923 8 : pos: MAX_SUPPORTED_POS.into_u64(),
924 8 : },
925 8 : );
926 8 : roundtrip(
927 8 : Args {
928 8 : will_init: false,
929 8 : len,
930 8 : base_offset: 0,
931 8 : batch_offset: MAX_SUPPORTED_POS.into_u64(),
932 8 : },
933 8 : Unpacked {
934 8 : will_init: false,
935 8 : len: len as u64,
936 8 : pos: MAX_SUPPORTED_POS.into_u64(),
937 8 : },
938 8 : );
939 8 : }
940 4 : }
941 : }
|