Line data Source code
1 : //! An in-memory layer stores recently received key-value pairs.
2 : //!
3 : //! The "in-memory" part of the name is a bit misleading: the actual page versions are
4 : //! held in an ephemeral file, not in memory. The metadata for each page version, i.e.
5 : //! its position in the file, is kept in memory, though.
6 : //!
7 : use std::cmp::Ordering;
8 : use std::collections::{BTreeMap, HashMap};
9 : use std::fmt::Write;
10 : use std::ops::Range;
11 : use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering as AtomicOrdering};
12 : use std::sync::{Arc, OnceLock};
13 : use std::time::Instant;
14 :
15 : use anyhow::Result;
16 : use camino::Utf8PathBuf;
17 : use pageserver_api::key::{CompactKey, Key};
18 : use pageserver_api::keyspace::KeySpace;
19 : use pageserver_api::models::InMemoryLayerInfo;
20 : use pageserver_api::shard::TenantShardId;
21 : use tokio::sync::RwLock;
22 : use tokio_util::sync::CancellationToken;
23 : use tracing::*;
24 : use utils::id::TimelineId;
25 : use utils::lsn::Lsn;
26 : use utils::vec_map::VecMap;
27 : use wal_decoder::serialized_batch::{SerializedValueBatch, SerializedValueMeta, ValueMeta};
28 :
29 : use super::{DeltaLayerWriter, PersistentLayerDesc, ValuesReconstructState};
30 : use crate::assert_u64_eq_usize::{U64IsUsize, UsizeIsU64, u64_to_usize};
31 : use crate::config::PageServerConf;
32 : use crate::context::{PageContentKind, RequestContext, RequestContextBuilder};
33 : // avoid binding to Write (conflicts with std::io::Write)
34 : // while being able to use std::fmt::Write's methods
35 : use crate::metrics::TIMELINE_EPHEMERAL_BYTES;
36 : use crate::tenant::ephemeral_file::EphemeralFile;
37 : use crate::tenant::storage_layer::{OnDiskValue, OnDiskValueIo};
38 : use crate::tenant::timeline::GetVectoredError;
39 : use crate::virtual_file::owned_buffers_io::io_buf_ext::IoBufExt;
40 : use crate::{l0_flush, page_cache};
41 :
42 : pub(crate) mod vectored_dio_read;
43 :
44 : #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
45 : pub(crate) struct InMemoryLayerFileId(page_cache::FileId);
46 :
47 : pub struct InMemoryLayer {
48 : conf: &'static PageServerConf,
49 : tenant_shard_id: TenantShardId,
50 : timeline_id: TimelineId,
51 : file_id: InMemoryLayerFileId,
52 :
53 : /// This layer contains all the changes from 'start_lsn'. The
54 : /// start is inclusive.
55 : start_lsn: Lsn,
56 :
57 : /// Frozen layers have an exclusive end LSN.
58 : /// Writes are only allowed when this is `None`.
59 : pub(crate) end_lsn: OnceLock<Lsn>,
60 :
61 : /// Used for traversal path. Cached representation of the in-memory layer after frozen.
62 : frozen_local_path_str: OnceLock<Arc<str>>,
63 :
64 : opened_at: Instant,
65 :
66 : /// The above fields never change, except for `end_lsn`, which is only set once.
67 : /// All other changing parts are in `inner`, and protected by a mutex.
68 : inner: RwLock<InMemoryLayerInner>,
69 :
70 : estimated_in_mem_size: AtomicU64,
71 : }
72 :
73 : impl std::fmt::Debug for InMemoryLayer {
74 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
75 0 : f.debug_struct("InMemoryLayer")
76 0 : .field("start_lsn", &self.start_lsn)
77 0 : .field("end_lsn", &self.end_lsn)
78 0 : .field("inner", &self.inner)
79 0 : .finish()
80 0 : }
81 : }
82 :
83 : pub struct InMemoryLayerInner {
84 : /// All versions of all pages in the layer are kept here. Indexed
85 : /// by block number and LSN. The [`IndexEntry`] is an offset into the
86 : /// ephemeral file where the page version is stored.
87 : index: BTreeMap<CompactKey, VecMap<Lsn, IndexEntry>>,
88 :
89 : /// The values are stored in a serialized format in this file.
90 : /// Each serialized Value is preceded by a 'u32' length field.
91 : /// PerSeg::page_versions map stores offsets into this file.
92 : file: EphemeralFile,
93 :
94 : resource_units: GlobalResourceUnits,
95 : }
96 :
97 : /// Support the same max blob length as blob_io, because ultimately
98 : /// all the InMemoryLayer contents end up being written into a delta layer,
99 : /// using the [`crate::tenant::blob_io`].
100 : const MAX_SUPPORTED_BLOB_LEN: usize = crate::tenant::blob_io::MAX_SUPPORTED_BLOB_LEN;
101 : const MAX_SUPPORTED_BLOB_LEN_BITS: usize = {
102 : let trailing_ones = MAX_SUPPORTED_BLOB_LEN.trailing_ones() as usize;
103 : let leading_zeroes = MAX_SUPPORTED_BLOB_LEN.leading_zeros() as usize;
104 : assert!(trailing_ones + leading_zeroes == std::mem::size_of::<usize>() * 8);
105 : trailing_ones
106 : };
107 :
108 : /// See [`InMemoryLayerInner::index`].
109 : ///
110 : /// For memory efficiency, the data is packed into a u64.
111 : ///
112 : /// Layout:
113 : /// - 1 bit: `will_init`
114 : /// - [`MAX_SUPPORTED_BLOB_LEN_BITS`][]: `len`
115 : /// - [`MAX_SUPPORTED_POS_BITS`](IndexEntry::MAX_SUPPORTED_POS_BITS): `pos`
116 : #[derive(Debug, Clone, Copy, PartialEq, Eq)]
117 : pub struct IndexEntry(u64);
118 :
119 : impl IndexEntry {
120 : /// See [`Self::MAX_SUPPORTED_POS`].
121 : const MAX_SUPPORTED_POS_BITS: usize = {
122 : let remainder = 64 - 1 - MAX_SUPPORTED_BLOB_LEN_BITS;
123 : if remainder < 32 {
124 : panic!("pos can be u32 as per type system, support that");
125 : }
126 : remainder
127 : };
128 : /// The maximum supported blob offset that can be represented by [`Self`].
129 : /// See also [`Self::validate_checkpoint_distance`].
130 : const MAX_SUPPORTED_POS: usize = (1 << Self::MAX_SUPPORTED_POS_BITS) - 1;
131 :
132 : // Layout
133 : const WILL_INIT_RANGE: Range<usize> = 0..1;
134 : const LEN_RANGE: Range<usize> =
135 : Self::WILL_INIT_RANGE.end..Self::WILL_INIT_RANGE.end + MAX_SUPPORTED_BLOB_LEN_BITS;
136 : const POS_RANGE: Range<usize> =
137 : Self::LEN_RANGE.end..Self::LEN_RANGE.end + Self::MAX_SUPPORTED_POS_BITS;
138 : const _ASSERT: () = {
139 : if Self::POS_RANGE.end != 64 {
140 : panic!("we don't want undefined bits for our own sanity")
141 : }
142 : };
143 :
144 : /// Fails if and only if the offset or length encoded in `arg` is too large to be represented by [`Self`].
145 : ///
146 : /// The only reason why that can happen in the system is if the [`InMemoryLayer`] grows too long.
147 : /// The [`InMemoryLayer`] size is determined by the checkpoint distance, enforced by [`crate::tenant::Timeline::should_roll`].
148 : ///
149 : /// Thus, to avoid failure of this function, whenever we start up and/or change checkpoint distance,
150 : /// call [`Self::validate_checkpoint_distance`] with the new checkpoint distance value.
151 : ///
152 : /// TODO: this check should happen ideally at config parsing time (and in the request handler when a change to checkpoint distance is requested)
153 : /// When cleaning this up, also look into the s3 max file size check that is performed in delta layer writer.
154 : #[inline(always)]
155 10181596 : fn new(arg: IndexEntryNewArgs) -> anyhow::Result<Self> {
156 10181596 : let IndexEntryNewArgs {
157 10181596 : base_offset,
158 10181596 : batch_offset,
159 10181596 : len,
160 10181596 : will_init,
161 10181596 : } = arg;
162 :
163 10181596 : let pos = base_offset
164 10181596 : .checked_add(batch_offset)
165 10181596 : .ok_or_else(|| anyhow::anyhow!("base_offset + batch_offset overflows u64: base_offset={base_offset} batch_offset={batch_offset}"))?;
166 :
167 10181596 : if pos.into_usize() > Self::MAX_SUPPORTED_POS {
168 16 : anyhow::bail!(
169 16 : "base_offset+batch_offset exceeds the maximum supported value: base_offset={base_offset} batch_offset={batch_offset} (+)={pos} max={max}",
170 16 : max = Self::MAX_SUPPORTED_POS
171 16 : );
172 10181580 : }
173 10181580 :
174 10181580 : if len > MAX_SUPPORTED_BLOB_LEN {
175 4 : anyhow::bail!(
176 4 : "len exceeds the maximum supported length: len={len} max={MAX_SUPPORTED_BLOB_LEN}",
177 4 : );
178 10181576 : }
179 10181576 :
180 10181576 : let mut data: u64 = 0;
181 : use bit_field::BitField;
182 10181576 : data.set_bits(Self::WILL_INIT_RANGE, if will_init { 1 } else { 0 });
183 10181576 : data.set_bits(Self::LEN_RANGE, len.into_u64());
184 10181576 : data.set_bits(Self::POS_RANGE, pos);
185 10181576 :
186 10181576 : Ok(Self(data))
187 10181596 : }
188 :
189 : #[inline(always)]
190 9770148 : fn unpack(&self) -> IndexEntryUnpacked {
191 : use bit_field::BitField;
192 9770148 : IndexEntryUnpacked {
193 9770148 : will_init: self.0.get_bits(Self::WILL_INIT_RANGE) != 0,
194 9770148 : len: self.0.get_bits(Self::LEN_RANGE),
195 9770148 : pos: self.0.get_bits(Self::POS_RANGE),
196 9770148 : }
197 9770148 : }
198 :
199 : /// See [`Self::new`].
200 488 : pub(crate) const fn validate_checkpoint_distance(
201 488 : checkpoint_distance: u64,
202 488 : ) -> Result<(), &'static str> {
203 488 : if checkpoint_distance > Self::MAX_SUPPORTED_POS as u64 {
204 0 : return Err("exceeds the maximum supported value");
205 488 : }
206 488 : let res = u64_to_usize(checkpoint_distance).checked_add(MAX_SUPPORTED_BLOB_LEN);
207 488 : if res.is_none() {
208 0 : return Err(
209 0 : "checkpoint distance + max supported blob len overflows in-memory addition",
210 0 : );
211 488 : }
212 488 :
213 488 : // NB: it is ok for the result of the addition to be larger than MAX_SUPPORTED_POS
214 488 :
215 488 : Ok(())
216 488 : }
217 :
218 : const _ASSERT_DEFAULT_CHECKPOINT_DISTANCE_IS_VALID: () = {
219 : let res = Self::validate_checkpoint_distance(
220 : pageserver_api::config::tenant_conf_defaults::DEFAULT_CHECKPOINT_DISTANCE,
221 : );
222 : if res.is_err() {
223 : panic!("default checkpoint distance is valid")
224 : }
225 : };
226 : }
227 :
228 : /// Args to [`IndexEntry::new`].
229 : #[derive(Clone, Copy)]
230 : struct IndexEntryNewArgs {
231 : base_offset: u64,
232 : batch_offset: u64,
233 : len: usize,
234 : will_init: bool,
235 : }
236 :
237 : /// Unpacked representation of the bitfielded [`IndexEntry`].
238 : #[derive(Clone, Copy, PartialEq, Eq, Debug)]
239 : struct IndexEntryUnpacked {
240 : will_init: bool,
241 : len: u64,
242 : pos: u64,
243 : }
244 :
245 : impl std::fmt::Debug for InMemoryLayerInner {
246 0 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
247 0 : f.debug_struct("InMemoryLayerInner").finish()
248 0 : }
249 : }
250 :
251 : /// State shared by all in-memory (ephemeral) layers. Updated infrequently during background ticks in Timeline,
252 : /// to minimize contention.
253 : ///
254 : /// This global state is used to implement behaviors that require a global view of the system, e.g.
255 : /// rolling layers proactively to limit the total amount of dirty data.
256 : pub(crate) struct GlobalResources {
257 : // Limit on how high dirty_bytes may grow before we start freezing layers to reduce it.
258 : // Zero means unlimited.
259 : pub(crate) max_dirty_bytes: AtomicU64,
260 : // How many bytes are in all EphemeralFile objects
261 : dirty_bytes: AtomicU64,
262 : // How many layers are contributing to dirty_bytes
263 : dirty_layers: AtomicUsize,
264 : }
265 :
266 : // Per-timeline RAII struct for its contribution to [`GlobalResources`]
267 : struct GlobalResourceUnits {
268 : // How many dirty bytes have I added to the global dirty_bytes: this guard object is responsible
269 : // for decrementing the global counter by this many bytes when dropped.
270 : dirty_bytes: u64,
271 : }
272 :
273 : impl GlobalResourceUnits {
274 : // Hint for the layer append path to update us when the layer size differs from the last
275 : // call to update_size by this much. If we don't reach this threshold, we'll still get
276 : // updated when the Timeline "ticks" in the background.
277 : const MAX_SIZE_DRIFT: u64 = 10 * 1024 * 1024;
278 :
279 2616 : fn new() -> Self {
280 2616 : GLOBAL_RESOURCES
281 2616 : .dirty_layers
282 2616 : .fetch_add(1, AtomicOrdering::Relaxed);
283 2616 : Self { dirty_bytes: 0 }
284 2616 : }
285 :
286 : /// Do not call this frequently: all timelines will write to these same global atomics,
287 : /// so this is a relatively expensive operation. Wait at least a few seconds between calls.
288 : ///
289 : /// Returns the effective layer size limit that should be applied, if any, to keep
290 : /// the total number of dirty bytes below the configured maximum.
291 2380 : fn publish_size(&mut self, size: u64) -> Option<u64> {
292 2380 : let new_global_dirty_bytes = match size.cmp(&self.dirty_bytes) {
293 2360 : Ordering::Equal => GLOBAL_RESOURCES.dirty_bytes.load(AtomicOrdering::Relaxed),
294 : Ordering::Greater => {
295 16 : let delta = size - self.dirty_bytes;
296 16 : let old = GLOBAL_RESOURCES
297 16 : .dirty_bytes
298 16 : .fetch_add(delta, AtomicOrdering::Relaxed);
299 16 : old + delta
300 : }
301 : Ordering::Less => {
302 4 : let delta = self.dirty_bytes - size;
303 4 : let old = GLOBAL_RESOURCES
304 4 : .dirty_bytes
305 4 : .fetch_sub(delta, AtomicOrdering::Relaxed);
306 4 : old - delta
307 : }
308 : };
309 :
310 : // This is a sloppy update: concurrent updates to the counter will race, and the exact
311 : // value of the metric might not be the exact latest value of GLOBAL_RESOURCES::dirty_bytes.
312 : // That's okay: as long as the metric contains some recent value, it doesn't have to always
313 : // be literally the last update.
314 2380 : TIMELINE_EPHEMERAL_BYTES.set(new_global_dirty_bytes);
315 2380 :
316 2380 : self.dirty_bytes = size;
317 2380 :
318 2380 : let max_dirty_bytes = GLOBAL_RESOURCES
319 2380 : .max_dirty_bytes
320 2380 : .load(AtomicOrdering::Relaxed);
321 2380 : if max_dirty_bytes > 0 && new_global_dirty_bytes > max_dirty_bytes {
322 : // Set the layer file limit to the average layer size: this implies that all above-average
323 : // sized layers will be elegible for freezing. They will be frozen in the order they
324 : // next enter publish_size.
325 0 : Some(
326 0 : new_global_dirty_bytes
327 0 : / GLOBAL_RESOURCES.dirty_layers.load(AtomicOrdering::Relaxed) as u64,
328 0 : )
329 : } else {
330 2380 : None
331 : }
332 2380 : }
333 :
334 : // Call publish_size if the input size differs from last published size by more than
335 : // the drift limit
336 9608480 : fn maybe_publish_size(&mut self, size: u64) {
337 9608480 : let publish = match size.cmp(&self.dirty_bytes) {
338 0 : Ordering::Equal => false,
339 9608480 : Ordering::Greater => size - self.dirty_bytes > Self::MAX_SIZE_DRIFT,
340 0 : Ordering::Less => self.dirty_bytes - size > Self::MAX_SIZE_DRIFT,
341 : };
342 :
343 9608480 : if publish {
344 16 : self.publish_size(size);
345 9608464 : }
346 9608480 : }
347 : }
348 :
349 : impl Drop for GlobalResourceUnits {
350 2364 : fn drop(&mut self) {
351 2364 : GLOBAL_RESOURCES
352 2364 : .dirty_layers
353 2364 : .fetch_sub(1, AtomicOrdering::Relaxed);
354 2364 :
355 2364 : // Subtract our contribution to the global total dirty bytes
356 2364 : self.publish_size(0);
357 2364 : }
358 : }
359 :
360 : pub(crate) static GLOBAL_RESOURCES: GlobalResources = GlobalResources {
361 : max_dirty_bytes: AtomicU64::new(0),
362 : dirty_bytes: AtomicU64::new(0),
363 : dirty_layers: AtomicUsize::new(0),
364 : };
365 :
366 : impl InMemoryLayer {
367 1214235 : pub(crate) fn file_id(&self) -> InMemoryLayerFileId {
368 1214235 : self.file_id
369 1214235 : }
370 :
371 2364 : pub(crate) fn get_timeline_id(&self) -> TimelineId {
372 2364 : self.timeline_id
373 2364 : }
374 :
375 4720 : pub(crate) fn info(&self) -> InMemoryLayerInfo {
376 4720 : let lsn_start = self.start_lsn;
377 :
378 4720 : if let Some(&lsn_end) = self.end_lsn.get() {
379 4716 : InMemoryLayerInfo::Frozen { lsn_start, lsn_end }
380 : } else {
381 4 : InMemoryLayerInfo::Open { lsn_start }
382 : }
383 4720 : }
384 :
385 4712 : pub(crate) fn try_len(&self) -> Option<u64> {
386 4712 : self.inner.try_read().map(|i| i.file.len()).ok()
387 4712 : }
388 :
389 9608480 : pub(crate) fn assert_writable(&self) {
390 9608480 : assert!(self.end_lsn.get().is_none());
391 9608480 : }
392 :
393 4563394 : pub(crate) fn end_lsn_or_max(&self) -> Lsn {
394 4563394 : self.end_lsn.get().copied().unwrap_or(Lsn::MAX)
395 4563394 : }
396 :
397 4561038 : pub(crate) fn get_lsn_range(&self) -> Range<Lsn> {
398 4561038 : self.start_lsn..self.end_lsn_or_max()
399 4561038 : }
400 :
401 : /// debugging function to print out the contents of the layer
402 : ///
403 : /// this is likely completly unused
404 0 : pub async fn dump(&self, _verbose: bool, _ctx: &RequestContext) -> Result<()> {
405 0 : let end_str = self.end_lsn_or_max();
406 0 :
407 0 : println!(
408 0 : "----- in-memory layer for tli {} LSNs {}-{} ----",
409 0 : self.timeline_id, self.start_lsn, end_str,
410 0 : );
411 0 :
412 0 : Ok(())
413 0 : }
414 :
415 : // Look up the keys in the provided keyspace and update
416 : // the reconstruct state with whatever is found.
417 1213395 : pub(crate) async fn get_values_reconstruct_data(
418 1213395 : self: &Arc<InMemoryLayer>,
419 1213395 : keyspace: KeySpace,
420 1213395 : lsn_range: Range<Lsn>,
421 1213395 : reconstruct_state: &mut ValuesReconstructState,
422 1213395 : ctx: &RequestContext,
423 1213395 : ) -> Result<(), GetVectoredError> {
424 1213395 : let ctx = RequestContextBuilder::extend(ctx)
425 1213395 : .page_content_kind(PageContentKind::InMemoryLayer)
426 1213395 : .build();
427 :
428 1213395 : let inner = self.inner.read().await;
429 :
430 : struct ValueRead {
431 : entry_lsn: Lsn,
432 : read: vectored_dio_read::LogicalRead<Vec<u8>>,
433 : }
434 1213395 : let mut reads: HashMap<Key, Vec<ValueRead>> = HashMap::new();
435 1213395 : let mut ios: HashMap<(Key, Lsn), OnDiskValueIo> = Default::default();
436 :
437 1213815 : for range in keyspace.ranges.iter() {
438 1213815 : for (key, vec_map) in inner
439 1213815 : .index
440 1213815 : .range(range.start.to_compact()..range.end.to_compact())
441 : {
442 999044 : let key = Key::from_compact(*key);
443 999044 : let slice = vec_map.slice_range(lsn_range.clone());
444 :
445 999044 : for (entry_lsn, index_entry) in slice.iter().rev() {
446 : let IndexEntryUnpacked {
447 999036 : pos,
448 999036 : len,
449 999036 : will_init,
450 999036 : } = index_entry.unpack();
451 999036 :
452 999036 : reads.entry(key).or_default().push(ValueRead {
453 999036 : entry_lsn: *entry_lsn,
454 999036 : read: vectored_dio_read::LogicalRead::new(
455 999036 : pos,
456 999036 : Vec::with_capacity(len as usize),
457 999036 : ),
458 999036 : });
459 999036 :
460 999036 : let io = reconstruct_state.update_key(&key, *entry_lsn, will_init);
461 999036 : ios.insert((key, *entry_lsn), io);
462 999036 :
463 999036 : if will_init {
464 998964 : break;
465 72 : }
466 : }
467 : }
468 : }
469 1213395 : drop(inner); // release the lock before we spawn the IO; if it's serial-mode IO we will deadlock on the read().await below
470 1213395 : let read_from = Arc::clone(self);
471 1213395 : let read_ctx = ctx.attached_child();
472 1213395 : reconstruct_state
473 1213395 : .spawn_io(async move {
474 1213395 : let inner = read_from.inner.read().await;
475 1213395 : let f = vectored_dio_read::execute(
476 1213395 : &inner.file,
477 1213395 : reads
478 1213395 : .iter()
479 1213395 : .flat_map(|(_, value_reads)| value_reads.iter().map(|v| &v.read)),
480 1213395 : &read_ctx,
481 1213395 : );
482 1213395 : send_future::SendFuture::send(f) // https://github.com/rust-lang/rust/issues/96865
483 1213395 : .await;
484 :
485 2212431 : for (key, value_reads) in reads {
486 1998072 : for ValueRead { entry_lsn, read } in value_reads {
487 999036 : let io = ios.remove(&(key, entry_lsn)).expect("sender must exist");
488 999036 : match read.into_result().expect("we run execute() above") {
489 0 : Err(e) => {
490 0 : io.complete(Err(std::io::Error::new(
491 0 : e.kind(),
492 0 : "dio vec read failed",
493 0 : )));
494 0 : }
495 999036 : Ok(value_buf) => {
496 999036 : io.complete(Ok(OnDiskValue::WalRecordOrImage(value_buf.into())));
497 999036 : }
498 : }
499 : }
500 : }
501 :
502 1213395 : assert!(ios.is_empty());
503 :
504 : // Keep layer existent until this IO is done;
505 : // This is kinda forced for InMemoryLayer because we need to inner.read() anyway,
506 : // but it's less obvious for DeltaLayer and ImageLayer. So, keep this explicit
507 : // drop for consistency among all three layer types.
508 1213395 : drop(inner);
509 1213395 : drop(read_from);
510 1213395 : })
511 1213395 : .await;
512 :
513 1213395 : Ok(())
514 1213395 : }
515 : }
516 :
517 4720 : fn inmem_layer_display(mut f: impl Write, start_lsn: Lsn, end_lsn: Lsn) -> std::fmt::Result {
518 4720 : write!(f, "inmem-{:016X}-{:016X}", start_lsn.0, end_lsn.0)
519 4720 : }
520 :
521 2364 : fn inmem_layer_log_display(
522 2364 : mut f: impl Write,
523 2364 : timeline: TimelineId,
524 2364 : start_lsn: Lsn,
525 2364 : end_lsn: Lsn,
526 2364 : ) -> std::fmt::Result {
527 2364 : write!(f, "timeline {} in-memory ", timeline)?;
528 2364 : inmem_layer_display(f, start_lsn, end_lsn)
529 2364 : }
530 :
531 : impl std::fmt::Display for InMemoryLayer {
532 2356 : fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
533 2356 : let end_lsn = self.end_lsn_or_max();
534 2356 : inmem_layer_display(f, self.start_lsn, end_lsn)
535 2356 : }
536 : }
537 :
538 : impl InMemoryLayer {
539 : /// Get layer size.
540 2604 : pub async fn size(&self) -> Result<u64> {
541 2604 : let inner = self.inner.read().await;
542 2604 : Ok(inner.file.len())
543 2604 : }
544 :
545 2401 : pub fn estimated_in_mem_size(&self) -> u64 {
546 2401 : self.estimated_in_mem_size.load(AtomicOrdering::Relaxed)
547 2401 : }
548 :
549 : /// Create a new, empty, in-memory layer
550 2616 : pub async fn create(
551 2616 : conf: &'static PageServerConf,
552 2616 : timeline_id: TimelineId,
553 2616 : tenant_shard_id: TenantShardId,
554 2616 : start_lsn: Lsn,
555 2616 : gate: &utils::sync::gate::Gate,
556 2616 : cancel: &CancellationToken,
557 2616 : ctx: &RequestContext,
558 2616 : ) -> Result<InMemoryLayer> {
559 2616 : trace!(
560 0 : "initializing new empty InMemoryLayer for writing on timeline {timeline_id} at {start_lsn}"
561 : );
562 :
563 2616 : let file =
564 2616 : EphemeralFile::create(conf, tenant_shard_id, timeline_id, gate, cancel, ctx).await?;
565 2616 : let key = InMemoryLayerFileId(file.page_cache_file_id());
566 2616 :
567 2616 : Ok(InMemoryLayer {
568 2616 : file_id: key,
569 2616 : frozen_local_path_str: OnceLock::new(),
570 2616 : conf,
571 2616 : timeline_id,
572 2616 : tenant_shard_id,
573 2616 : start_lsn,
574 2616 : end_lsn: OnceLock::new(),
575 2616 : opened_at: Instant::now(),
576 2616 : inner: RwLock::new(InMemoryLayerInner {
577 2616 : index: BTreeMap::new(),
578 2616 : file,
579 2616 : resource_units: GlobalResourceUnits::new(),
580 2616 : }),
581 2616 : estimated_in_mem_size: AtomicU64::new(0),
582 2616 : })
583 2616 : }
584 :
585 : /// Write path.
586 : ///
587 : /// Errors are not retryable, the [`InMemoryLayer`] must be discarded, and not be read from.
588 : /// The reason why it's not retryable is that the [`EphemeralFile`] writes are not retryable.
589 : /// TODO: it can be made retryable if we aborted the process on EphemeralFile write errors.
590 9608480 : pub async fn put_batch(
591 9608480 : &self,
592 9608480 : serialized_batch: SerializedValueBatch,
593 9608480 : ctx: &RequestContext,
594 9608480 : ) -> anyhow::Result<()> {
595 9608480 : let mut inner = self.inner.write().await;
596 9608480 : self.assert_writable();
597 9608480 :
598 9608480 : let base_offset = inner.file.len();
599 9608480 :
600 9608480 : let SerializedValueBatch {
601 9608480 : raw,
602 9608480 : metadata,
603 9608480 : max_lsn: _,
604 9608480 : len: _,
605 9608480 : } = serialized_batch;
606 9608480 :
607 9608480 : // Write the batch to the file
608 9608480 : inner.file.write_raw(&raw, ctx).await?;
609 9608480 : let new_size = inner.file.len();
610 9608480 :
611 9608480 : let expected_new_len = base_offset
612 9608480 : .checked_add(raw.len().into_u64())
613 9608480 : // write_raw would error if we were to overflow u64.
614 9608480 : // also IndexEntry and higher levels in
615 9608480 : //the code don't allow the file to grow that large
616 9608480 : .unwrap();
617 9608480 : assert_eq!(new_size, expected_new_len);
618 :
619 : // Update the index with the new entries
620 19789976 : for meta in metadata {
621 : let SerializedValueMeta {
622 10181496 : key,
623 10181496 : lsn,
624 10181496 : batch_offset,
625 10181496 : len,
626 10181496 : will_init,
627 10181496 : } = match meta {
628 10181496 : ValueMeta::Serialized(ser) => ser,
629 : ValueMeta::Observed(_) => {
630 0 : continue;
631 : }
632 : };
633 :
634 : // Add the base_offset to the batch's index entries which are relative to the batch start.
635 10181496 : let index_entry = IndexEntry::new(IndexEntryNewArgs {
636 10181496 : base_offset,
637 10181496 : batch_offset,
638 10181496 : len,
639 10181496 : will_init,
640 10181496 : })?;
641 :
642 10181496 : let vec_map = inner.index.entry(key).or_default();
643 10181496 : let old = vec_map.append_or_update_last(lsn, index_entry).unwrap().0;
644 10181496 : if old.is_some() {
645 : // This should not break anything, but is unexpected: ingestion code aims to filter out
646 : // multiple writes to the same key at the same LSN. This happens in cases where our
647 : // ingenstion code generates some write like an empty page, and we see a write from postgres
648 : // to the same key in the same wal record. If one such write makes it through, we
649 : // index the most recent write, implicitly ignoring the earlier write. We log a warning
650 : // because this case is unexpected, and we would like tests to fail if this happens.
651 0 : warn!("Key {} at {} written twice at same LSN", key, lsn);
652 10181496 : }
653 10181496 : self.estimated_in_mem_size.fetch_add(
654 10181496 : (std::mem::size_of::<CompactKey>()
655 10181496 : + std::mem::size_of::<Lsn>()
656 10181496 : + std::mem::size_of::<IndexEntry>()) as u64,
657 10181496 : AtomicOrdering::Relaxed,
658 10181496 : );
659 : }
660 :
661 9608480 : inner.resource_units.maybe_publish_size(new_size);
662 9608480 :
663 9608480 : Ok(())
664 9608480 : }
665 :
666 9606020 : pub(crate) fn get_opened_at(&self) -> Instant {
667 9606020 : self.opened_at
668 9606020 : }
669 :
670 0 : pub(crate) async fn tick(&self) -> Option<u64> {
671 0 : let mut inner = self.inner.write().await;
672 0 : let size = inner.file.len();
673 0 : inner.resource_units.publish_size(size)
674 0 : }
675 :
676 4 : pub(crate) async fn put_tombstones(&self, _key_ranges: &[(Range<Key>, Lsn)]) -> Result<()> {
677 4 : // TODO: Currently, we just leak the storage for any deleted keys
678 4 : Ok(())
679 4 : }
680 :
681 : /// Records the end_lsn for non-dropped layers.
682 : /// `end_lsn` is exclusive
683 2364 : pub async fn freeze(&self, end_lsn: Lsn) {
684 2364 : assert!(
685 2364 : self.start_lsn < end_lsn,
686 0 : "{} >= {}",
687 : self.start_lsn,
688 : end_lsn
689 : );
690 2364 : self.end_lsn.set(end_lsn).expect("end_lsn set only once");
691 2364 :
692 2364 : self.frozen_local_path_str
693 2364 : .set({
694 2364 : let mut buf = String::new();
695 2364 : inmem_layer_log_display(&mut buf, self.get_timeline_id(), self.start_lsn, end_lsn)
696 2364 : .unwrap();
697 2364 : buf.into()
698 2364 : })
699 2364 : .expect("frozen_local_path_str set only once");
700 :
701 : #[cfg(debug_assertions)]
702 : {
703 2364 : let inner = self.inner.write().await;
704 8512569 : for vec_map in inner.index.values() {
705 8773976 : for (lsn, _) in vec_map.as_slice() {
706 8773976 : assert!(*lsn < end_lsn);
707 : }
708 : }
709 : }
710 2364 : }
711 :
712 : /// Write this frozen in-memory layer to disk. If `key_range` is set, the delta
713 : /// layer will only contain the key range the user specifies, and may return `None`
714 : /// if there are no matching keys.
715 : ///
716 : /// Returns a new delta layer with all the same data as this in-memory layer
717 1936 : pub async fn write_to_disk(
718 1936 : &self,
719 1936 : ctx: &RequestContext,
720 1936 : key_range: Option<Range<Key>>,
721 1936 : l0_flush_global_state: &l0_flush::Inner,
722 1936 : ) -> Result<Option<(PersistentLayerDesc, Utf8PathBuf)>> {
723 : // Grab the lock in read-mode. We hold it over the I/O, but because this
724 : // layer is not writeable anymore, no one should be trying to acquire the
725 : // write lock on it, so we shouldn't block anyone. There's one exception
726 : // though: another thread might have grabbed a reference to this layer
727 : // in `get_layer_for_write' just before the checkpointer called
728 : // `freeze`, and then `write_to_disk` on it. When the thread gets the
729 : // lock, it will see that it's not writeable anymore and retry, but it
730 : // would have to wait until we release it. That race condition is very
731 : // rare though, so we just accept the potential latency hit for now.
732 1936 : let inner = self.inner.read().await;
733 :
734 : use l0_flush::Inner;
735 1936 : let _concurrency_permit = match l0_flush_global_state {
736 1936 : Inner::Direct { semaphore, .. } => Some(semaphore.acquire().await),
737 : };
738 :
739 1936 : let end_lsn = *self.end_lsn.get().unwrap();
740 :
741 1936 : let key_count = if let Some(key_range) = key_range {
742 0 : let key_range = key_range.start.to_compact()..key_range.end.to_compact();
743 0 :
744 0 : inner
745 0 : .index
746 0 : .iter()
747 0 : .filter(|(k, _)| key_range.contains(k))
748 0 : .count()
749 : } else {
750 1936 : inner.index.len()
751 : };
752 1936 : if key_count == 0 {
753 0 : return Ok(None);
754 1936 : }
755 :
756 1936 : let mut delta_layer_writer = DeltaLayerWriter::new(
757 1936 : self.conf,
758 1936 : self.timeline_id,
759 1936 : self.tenant_shard_id,
760 1936 : Key::MIN,
761 1936 : self.start_lsn..end_lsn,
762 1936 : ctx,
763 1936 : )
764 1936 : .await?;
765 :
766 1936 : match l0_flush_global_state {
767 : l0_flush::Inner::Direct { .. } => {
768 1936 : let file_contents = inner.file.load_to_io_buf(ctx).await?;
769 1936 : let file_contents = file_contents.freeze();
770 :
771 8509625 : for (key, vec_map) in inner.index.iter() {
772 : // Write all page versions
773 8771032 : for (lsn, entry) in vec_map
774 8509625 : .as_slice()
775 8509625 : .iter()
776 8771032 : .map(|(lsn, entry)| (lsn, entry.unpack()))
777 : {
778 : let IndexEntryUnpacked {
779 8771032 : pos,
780 8771032 : len,
781 8771032 : will_init,
782 8771032 : } = entry;
783 8771032 : let buf = file_contents.slice(pos as usize..(pos + len) as usize);
784 8771032 : let (_buf, res) = delta_layer_writer
785 8771032 : .put_value_bytes(
786 8771032 : Key::from_compact(*key),
787 8771032 : *lsn,
788 8771032 : buf.slice_len(),
789 8771032 : will_init,
790 8771032 : ctx,
791 8771032 : )
792 8771032 : .await;
793 8771032 : res?;
794 : }
795 : }
796 : }
797 : }
798 :
799 : // MAX is used here because we identify L0 layers by full key range
800 1936 : let (desc, path) = delta_layer_writer.finish(Key::MAX, ctx).await?;
801 :
802 : // Hold the permit until all the IO is done, including the fsync in `delta_layer_writer.finish()``.
803 : //
804 : // If we didn't and our caller drops this future, tokio-epoll-uring would extend the lifetime of
805 : // the `file_contents: Vec<u8>` until the IO is done, but not the permit's lifetime.
806 : // Thus, we'd have more concurrenct `Vec<u8>` in existence than the semaphore allows.
807 : //
808 : // We hold across the fsync so that on ext4 mounted with data=ordered, all the kernel page cache pages
809 : // we dirtied when writing to the filesystem have been flushed and marked !dirty.
810 1936 : drop(_concurrency_permit);
811 1936 :
812 1936 : Ok(Some((desc, path)))
813 1936 : }
814 : }
815 :
816 : #[cfg(test)]
817 : mod tests {
818 : use super::*;
819 :
820 : #[test]
821 4 : fn test_index_entry() {
822 : const MAX_SUPPORTED_POS: usize = IndexEntry::MAX_SUPPORTED_POS;
823 : use {IndexEntryNewArgs as Args, IndexEntryUnpacked as Unpacked};
824 :
825 80 : let roundtrip = |args, expect: Unpacked| {
826 80 : let res = IndexEntry::new(args).expect("this tests expects no errors");
827 80 : let IndexEntryUnpacked {
828 80 : will_init,
829 80 : len,
830 80 : pos,
831 80 : } = res.unpack();
832 80 : assert_eq!(will_init, expect.will_init);
833 80 : assert_eq!(len, expect.len);
834 80 : assert_eq!(pos, expect.pos);
835 80 : };
836 :
837 : // basic roundtrip
838 12 : for pos in [0, MAX_SUPPORTED_POS] {
839 24 : for len in [0, MAX_SUPPORTED_BLOB_LEN] {
840 48 : for will_init in [true, false] {
841 32 : let expect = Unpacked {
842 32 : will_init,
843 32 : len: len.into_u64(),
844 32 : pos: pos.into_u64(),
845 32 : };
846 32 : roundtrip(
847 32 : Args {
848 32 : will_init,
849 32 : base_offset: pos.into_u64(),
850 32 : batch_offset: 0,
851 32 : len,
852 32 : },
853 32 : expect,
854 32 : );
855 32 : roundtrip(
856 32 : Args {
857 32 : will_init,
858 32 : base_offset: 0,
859 32 : batch_offset: pos.into_u64(),
860 32 : len,
861 32 : },
862 32 : expect,
863 32 : );
864 32 : }
865 : }
866 : }
867 :
868 : // too-large len
869 4 : let too_large = Args {
870 4 : will_init: false,
871 4 : len: MAX_SUPPORTED_BLOB_LEN + 1,
872 4 : base_offset: 0,
873 4 : batch_offset: 0,
874 4 : };
875 4 : assert!(IndexEntry::new(too_large).is_err());
876 :
877 : // too-large pos
878 : {
879 4 : let too_large = Args {
880 4 : will_init: false,
881 4 : len: 0,
882 4 : base_offset: MAX_SUPPORTED_POS.into_u64() + 1,
883 4 : batch_offset: 0,
884 4 : };
885 4 : assert!(IndexEntry::new(too_large).is_err());
886 4 : let too_large = Args {
887 4 : will_init: false,
888 4 : len: 0,
889 4 : base_offset: 0,
890 4 : batch_offset: MAX_SUPPORTED_POS.into_u64() + 1,
891 4 : };
892 4 : assert!(IndexEntry::new(too_large).is_err());
893 : }
894 :
895 : // too large (base_offset + batch_offset)
896 : {
897 4 : let too_large = Args {
898 4 : will_init: false,
899 4 : len: 0,
900 4 : base_offset: MAX_SUPPORTED_POS.into_u64(),
901 4 : batch_offset: 1,
902 4 : };
903 4 : assert!(IndexEntry::new(too_large).is_err());
904 4 : let too_large = Args {
905 4 : will_init: false,
906 4 : len: 0,
907 4 : base_offset: MAX_SUPPORTED_POS.into_u64() - 1,
908 4 : batch_offset: MAX_SUPPORTED_POS.into_u64() - 1,
909 4 : };
910 4 : assert!(IndexEntry::new(too_large).is_err());
911 : }
912 :
913 : // valid special cases
914 : // - area past the max supported pos that is accessible by len
915 12 : for len in [1, MAX_SUPPORTED_BLOB_LEN] {
916 8 : roundtrip(
917 8 : Args {
918 8 : will_init: false,
919 8 : len,
920 8 : base_offset: MAX_SUPPORTED_POS.into_u64(),
921 8 : batch_offset: 0,
922 8 : },
923 8 : Unpacked {
924 8 : will_init: false,
925 8 : len: len as u64,
926 8 : pos: MAX_SUPPORTED_POS.into_u64(),
927 8 : },
928 8 : );
929 8 : roundtrip(
930 8 : Args {
931 8 : will_init: false,
932 8 : len,
933 8 : base_offset: 0,
934 8 : batch_offset: MAX_SUPPORTED_POS.into_u64(),
935 8 : },
936 8 : Unpacked {
937 8 : will_init: false,
938 8 : len: len as u64,
939 8 : pos: MAX_SUPPORTED_POS.into_u64(),
940 8 : },
941 8 : );
942 8 : }
943 4 : }
944 : }
|