Line data Source code
1 : //!
2 : //! This provides an abstraction to store PostgreSQL relations and other files
3 : //! in the key-value store that implements the Repository interface.
4 : //!
5 : //! (TODO: The line between PUT-functions here and walingest.rs is a bit blurry, as
6 : //! walingest.rs handles a few things like implicit relation creation and extension.
7 : //! Clarify that)
8 : //!
9 : use super::tenant::{PageReconstructError, Timeline};
10 : use crate::aux_file;
11 : use crate::context::RequestContext;
12 : use crate::keyspace::{KeySpace, KeySpaceAccum};
13 : use crate::metrics::{
14 : RELSIZE_CACHE_ENTRIES, RELSIZE_CACHE_HITS, RELSIZE_CACHE_MISSES, RELSIZE_CACHE_MISSES_OLD,
15 : };
16 : use crate::span::{
17 : debug_assert_current_span_has_tenant_and_timeline_id,
18 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id,
19 : };
20 : use crate::tenant::storage_layer::IoConcurrency;
21 : use crate::tenant::timeline::GetVectoredError;
22 : use anyhow::{ensure, Context};
23 : use bytes::{Buf, Bytes, BytesMut};
24 : use enum_map::Enum;
25 : use itertools::Itertools;
26 : use pageserver_api::key::Key;
27 : use pageserver_api::key::{
28 : dbdir_key_range, rel_block_to_key, rel_dir_to_key, rel_key_range, rel_size_to_key,
29 : relmap_file_key, repl_origin_key, repl_origin_key_range, slru_block_to_key, slru_dir_to_key,
30 : slru_segment_key_range, slru_segment_size_to_key, twophase_file_key, twophase_key_range,
31 : CompactKey, AUX_FILES_KEY, CHECKPOINT_KEY, CONTROLFILE_KEY, DBDIR_KEY, TWOPHASEDIR_KEY,
32 : };
33 : use pageserver_api::keyspace::SparseKeySpace;
34 : use pageserver_api::record::NeonWalRecord;
35 : use pageserver_api::reltag::{BlockNumber, RelTag, SlruKind};
36 : use pageserver_api::shard::ShardIdentity;
37 : use pageserver_api::value::Value;
38 : use postgres_ffi::relfile_utils::{FSM_FORKNUM, VISIBILITYMAP_FORKNUM};
39 : use postgres_ffi::BLCKSZ;
40 : use postgres_ffi::{Oid, RepOriginId, TimestampTz, TransactionId};
41 : use serde::{Deserialize, Serialize};
42 : use std::collections::{hash_map, BTreeMap, HashMap, HashSet};
43 : use std::ops::ControlFlow;
44 : use std::ops::Range;
45 : use strum::IntoEnumIterator;
46 : use tokio_util::sync::CancellationToken;
47 : use tracing::{debug, trace, warn};
48 : use utils::bin_ser::DeserializeError;
49 : use utils::pausable_failpoint;
50 : use utils::{bin_ser::BeSer, lsn::Lsn};
51 : use wal_decoder::serialized_batch::SerializedValueBatch;
52 :
53 : /// Max delta records appended to the AUX_FILES_KEY (for aux v1). The write path will write a full image once this threshold is reached.
54 : pub const MAX_AUX_FILE_DELTAS: usize = 1024;
55 :
56 : /// Max number of aux-file-related delta layers. The compaction will create a new image layer once this threshold is reached.
57 : pub const MAX_AUX_FILE_V2_DELTAS: usize = 16;
58 :
59 : #[derive(Debug)]
60 : pub enum LsnForTimestamp {
61 : /// Found commits both before and after the given timestamp
62 : Present(Lsn),
63 :
64 : /// Found no commits after the given timestamp, this means
65 : /// that the newest data in the branch is older than the given
66 : /// timestamp.
67 : ///
68 : /// All commits <= LSN happened before the given timestamp
69 : Future(Lsn),
70 :
71 : /// The queried timestamp is past our horizon we look back at (PITR)
72 : ///
73 : /// All commits > LSN happened after the given timestamp,
74 : /// but any commits < LSN might have happened before or after
75 : /// the given timestamp. We don't know because no data before
76 : /// the given lsn is available.
77 : Past(Lsn),
78 :
79 : /// We have found no commit with a timestamp,
80 : /// so we can't return anything meaningful.
81 : ///
82 : /// The associated LSN is the lower bound value we can safely
83 : /// create branches on, but no statement is made if it is
84 : /// older or newer than the timestamp.
85 : ///
86 : /// This variant can e.g. be returned right after a
87 : /// cluster import.
88 : NoData(Lsn),
89 : }
90 :
91 : #[derive(Debug, thiserror::Error)]
92 : pub(crate) enum CalculateLogicalSizeError {
93 : #[error("cancelled")]
94 : Cancelled,
95 :
96 : /// Something went wrong while reading the metadata we use to calculate logical size
97 : /// Note that cancellation variants of `PageReconstructError` are transformed to [`Self::Cancelled`]
98 : /// in the `From` implementation for this variant.
99 : #[error(transparent)]
100 : PageRead(PageReconstructError),
101 :
102 : /// Something went wrong deserializing metadata that we read to calculate logical size
103 : #[error("decode error: {0}")]
104 : Decode(#[from] DeserializeError),
105 : }
106 :
107 : #[derive(Debug, thiserror::Error)]
108 : pub(crate) enum CollectKeySpaceError {
109 : #[error(transparent)]
110 : Decode(#[from] DeserializeError),
111 : #[error(transparent)]
112 : PageRead(PageReconstructError),
113 : #[error("cancelled")]
114 : Cancelled,
115 : }
116 :
117 : impl From<PageReconstructError> for CollectKeySpaceError {
118 0 : fn from(err: PageReconstructError) -> Self {
119 0 : match err {
120 0 : PageReconstructError::Cancelled => Self::Cancelled,
121 0 : err => Self::PageRead(err),
122 : }
123 0 : }
124 : }
125 :
126 : impl From<PageReconstructError> for CalculateLogicalSizeError {
127 0 : fn from(pre: PageReconstructError) -> Self {
128 0 : match pre {
129 0 : PageReconstructError::Cancelled => Self::Cancelled,
130 0 : _ => Self::PageRead(pre),
131 : }
132 0 : }
133 : }
134 :
135 : #[derive(Debug, thiserror::Error)]
136 : pub enum RelationError {
137 : #[error("Relation Already Exists")]
138 : AlreadyExists,
139 : #[error("invalid relnode")]
140 : InvalidRelnode,
141 : #[error(transparent)]
142 : Other(#[from] anyhow::Error),
143 : }
144 :
145 : ///
146 : /// This impl provides all the functionality to store PostgreSQL relations, SLRUs,
147 : /// and other special kinds of files, in a versioned key-value store. The
148 : /// Timeline struct provides the key-value store.
149 : ///
150 : /// This is a separate impl, so that we can easily include all these functions in a Timeline
151 : /// implementation, and might be moved into a separate struct later.
152 : impl Timeline {
153 : /// Start ingesting a WAL record, or other atomic modification of
154 : /// the timeline.
155 : ///
156 : /// This provides a transaction-like interface to perform a bunch
157 : /// of modifications atomically.
158 : ///
159 : /// To ingest a WAL record, call begin_modification(lsn) to get a
160 : /// DatadirModification object. Use the functions in the object to
161 : /// modify the repository state, updating all the pages and metadata
162 : /// that the WAL record affects. When you're done, call commit() to
163 : /// commit the changes.
164 : ///
165 : /// Lsn stored in modification is advanced by `ingest_record` and
166 : /// is used by `commit()` to update `last_record_lsn`.
167 : ///
168 : /// Calling commit() will flush all the changes and reset the state,
169 : /// so the `DatadirModification` struct can be reused to perform the next modification.
170 : ///
171 : /// Note that any pending modifications you make through the
172 : /// modification object won't be visible to calls to the 'get' and list
173 : /// functions of the timeline until you finish! And if you update the
174 : /// same page twice, the last update wins.
175 : ///
176 536816 : pub fn begin_modification(&self, lsn: Lsn) -> DatadirModification
177 536816 : where
178 536816 : Self: Sized,
179 536816 : {
180 536816 : DatadirModification {
181 536816 : tline: self,
182 536816 : pending_lsns: Vec::new(),
183 536816 : pending_metadata_pages: HashMap::new(),
184 536816 : pending_data_batch: None,
185 536816 : pending_deletions: Vec::new(),
186 536816 : pending_nblocks: 0,
187 536816 : pending_directory_entries: Vec::new(),
188 536816 : pending_metadata_bytes: 0,
189 536816 : lsn,
190 536816 : }
191 536816 : }
192 :
193 : //------------------------------------------------------------------------------
194 : // Public GET functions
195 : //------------------------------------------------------------------------------
196 :
197 : /// Look up given page version.
198 36768 : pub(crate) async fn get_rel_page_at_lsn(
199 36768 : &self,
200 36768 : tag: RelTag,
201 36768 : blknum: BlockNumber,
202 36768 : version: Version<'_>,
203 36768 : ctx: &RequestContext,
204 36768 : io_concurrency: IoConcurrency,
205 36768 : ) -> Result<Bytes, PageReconstructError> {
206 36768 : match version {
207 36768 : Version::Lsn(effective_lsn) => {
208 36768 : let pages: smallvec::SmallVec<[_; 1]> = smallvec::smallvec![(tag, blknum)];
209 36768 : let res = self
210 36768 : .get_rel_page_at_lsn_batched(
211 36768 : pages.iter().map(|(tag, blknum)| (tag, blknum)),
212 36768 : effective_lsn,
213 36768 : io_concurrency.clone(),
214 36768 : ctx,
215 36768 : )
216 36768 : .await;
217 36768 : assert_eq!(res.len(), 1);
218 36768 : res.into_iter().next().unwrap()
219 : }
220 0 : Version::Modified(modification) => {
221 0 : if tag.relnode == 0 {
222 0 : return Err(PageReconstructError::Other(
223 0 : RelationError::InvalidRelnode.into(),
224 0 : ));
225 0 : }
226 :
227 0 : let nblocks = self.get_rel_size(tag, version, ctx).await?;
228 0 : if blknum >= nblocks {
229 0 : debug!(
230 0 : "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page",
231 0 : tag,
232 0 : blknum,
233 0 : version.get_lsn(),
234 : nblocks
235 : );
236 0 : return Ok(ZERO_PAGE.clone());
237 0 : }
238 0 :
239 0 : let key = rel_block_to_key(tag, blknum);
240 0 : modification.get(key, ctx).await
241 : }
242 : }
243 36768 : }
244 :
245 : /// Like [`Self::get_rel_page_at_lsn`], but returns a batch of pages.
246 : ///
247 : /// The ordering of the returned vec corresponds to the ordering of `pages`.
248 36768 : pub(crate) async fn get_rel_page_at_lsn_batched(
249 36768 : &self,
250 36768 : pages: impl ExactSizeIterator<Item = (&RelTag, &BlockNumber)>,
251 36768 : effective_lsn: Lsn,
252 36768 : io_concurrency: IoConcurrency,
253 36768 : ctx: &RequestContext,
254 36768 : ) -> Vec<Result<Bytes, PageReconstructError>> {
255 36768 : debug_assert_current_span_has_tenant_and_timeline_id();
256 36768 :
257 36768 : let mut slots_filled = 0;
258 36768 : let page_count = pages.len();
259 36768 :
260 36768 : // Would be nice to use smallvec here but it doesn't provide the spare_capacity_mut() API.
261 36768 : let mut result = Vec::with_capacity(pages.len());
262 36768 : let result_slots = result.spare_capacity_mut();
263 36768 :
264 36768 : let mut keys_slots: BTreeMap<Key, smallvec::SmallVec<[usize; 1]>> = BTreeMap::default();
265 36768 : for (response_slot_idx, (tag, blknum)) in pages.enumerate() {
266 36768 : if tag.relnode == 0 {
267 0 : result_slots[response_slot_idx].write(Err(PageReconstructError::Other(
268 0 : RelationError::InvalidRelnode.into(),
269 0 : )));
270 0 :
271 0 : slots_filled += 1;
272 0 : continue;
273 36768 : }
274 :
275 36768 : let nblocks = match self
276 36768 : .get_rel_size(*tag, Version::Lsn(effective_lsn), ctx)
277 36768 : .await
278 : {
279 36768 : Ok(nblocks) => nblocks,
280 0 : Err(err) => {
281 0 : result_slots[response_slot_idx].write(Err(err));
282 0 : slots_filled += 1;
283 0 : continue;
284 : }
285 : };
286 :
287 36768 : if *blknum >= nblocks {
288 0 : debug!(
289 0 : "read beyond EOF at {} blk {} at {}, size is {}: returning all-zeros page",
290 : tag, blknum, effective_lsn, nblocks
291 : );
292 0 : result_slots[response_slot_idx].write(Ok(ZERO_PAGE.clone()));
293 0 : slots_filled += 1;
294 0 : continue;
295 36768 : }
296 36768 :
297 36768 : let key = rel_block_to_key(*tag, *blknum);
298 36768 :
299 36768 : let key_slots = keys_slots.entry(key).or_default();
300 36768 : key_slots.push(response_slot_idx);
301 : }
302 :
303 36768 : let keyspace = {
304 : // add_key requires monotonicity
305 36768 : let mut acc = KeySpaceAccum::new();
306 36768 : for key in keys_slots
307 36768 : .keys()
308 36768 : // in fact it requires strong monotonicity
309 36768 : .dedup()
310 36768 : {
311 36768 : acc.add_key(*key);
312 36768 : }
313 36768 : acc.to_keyspace()
314 36768 : };
315 36768 :
316 36768 : match self
317 36768 : .get_vectored(keyspace, effective_lsn, io_concurrency, ctx)
318 36768 : .await
319 : {
320 36768 : Ok(results) => {
321 73536 : for (key, res) in results {
322 36768 : let mut key_slots = keys_slots.remove(&key).unwrap().into_iter();
323 36768 : let first_slot = key_slots.next().unwrap();
324 :
325 36768 : for slot in key_slots {
326 0 : let clone = match &res {
327 0 : Ok(buf) => Ok(buf.clone()),
328 0 : Err(err) => Err(match err {
329 : PageReconstructError::Cancelled => {
330 0 : PageReconstructError::Cancelled
331 : }
332 :
333 0 : x @ PageReconstructError::Other(_) |
334 0 : x @ PageReconstructError::AncestorLsnTimeout(_) |
335 0 : x @ PageReconstructError::WalRedo(_) |
336 0 : x @ PageReconstructError::MissingKey(_) => {
337 0 : PageReconstructError::Other(anyhow::anyhow!("there was more than one request for this key in the batch, error logged once: {x:?}"))
338 : },
339 : }),
340 : };
341 :
342 0 : result_slots[slot].write(clone);
343 0 : slots_filled += 1;
344 : }
345 :
346 36768 : result_slots[first_slot].write(res);
347 36768 : slots_filled += 1;
348 : }
349 : }
350 0 : Err(err) => {
351 : // this cannot really happen because get_vectored only errors globally on invalid LSN or too large batch size
352 : // (We enforce the max batch size outside of this function, in the code that constructs the batch request.)
353 0 : for slot in keys_slots.values().flatten() {
354 : // this whole `match` is a lot like `From<GetVectoredError> for PageReconstructError`
355 : // but without taking ownership of the GetVectoredError
356 0 : let err = match &err {
357 : GetVectoredError::Cancelled => {
358 0 : Err(PageReconstructError::Cancelled)
359 : }
360 : // TODO: restructure get_vectored API to make this error per-key
361 0 : GetVectoredError::MissingKey(err) => {
362 0 : Err(PageReconstructError::Other(anyhow::anyhow!("whole vectored get request failed because one or more of the requested keys were missing: {err:?}")))
363 : }
364 : // TODO: restructure get_vectored API to make this error per-key
365 0 : GetVectoredError::GetReadyAncestorError(err) => {
366 0 : Err(PageReconstructError::Other(anyhow::anyhow!("whole vectored get request failed because one or more key required ancestor that wasn't ready: {err:?}")))
367 : }
368 : // TODO: restructure get_vectored API to make this error per-key
369 0 : GetVectoredError::Other(err) => {
370 0 : Err(PageReconstructError::Other(
371 0 : anyhow::anyhow!("whole vectored get request failed: {err:?}"),
372 0 : ))
373 : }
374 : // TODO: we can prevent this error class by moving this check into the type system
375 0 : GetVectoredError::InvalidLsn(e) => {
376 0 : Err(anyhow::anyhow!("invalid LSN: {e:?}").into())
377 : }
378 : // NB: this should never happen in practice because we limit MAX_GET_VECTORED_KEYS
379 : // TODO: we can prevent this error class by moving this check into the type system
380 0 : GetVectoredError::Oversized(err) => {
381 0 : Err(anyhow::anyhow!(
382 0 : "batching oversized: {err:?}"
383 0 : )
384 0 : .into())
385 : }
386 : };
387 :
388 0 : result_slots[*slot].write(err);
389 : }
390 :
391 0 : slots_filled += keys_slots.values().map(|slots| slots.len()).sum::<usize>();
392 0 : }
393 : };
394 :
395 36768 : assert_eq!(slots_filled, page_count);
396 : // SAFETY:
397 : // 1. `result` and any of its uninint members are not read from until this point
398 : // 2. The length below is tracked at run-time and matches the number of requested pages.
399 36768 : unsafe {
400 36768 : result.set_len(page_count);
401 36768 : }
402 36768 :
403 36768 : result
404 36768 : }
405 :
406 : /// Get size of a database in blocks. This is only accurate on shard 0. It will undercount on
407 : /// other shards, by only accounting for relations the shard has pages for, and only accounting
408 : /// for pages up to the highest page number it has stored.
409 0 : pub(crate) async fn get_db_size(
410 0 : &self,
411 0 : spcnode: Oid,
412 0 : dbnode: Oid,
413 0 : version: Version<'_>,
414 0 : ctx: &RequestContext,
415 0 : ) -> Result<usize, PageReconstructError> {
416 0 : let mut total_blocks = 0;
417 :
418 0 : let rels = self.list_rels(spcnode, dbnode, version, ctx).await?;
419 :
420 0 : for rel in rels {
421 0 : let n_blocks = self.get_rel_size(rel, version, ctx).await?;
422 0 : total_blocks += n_blocks as usize;
423 : }
424 0 : Ok(total_blocks)
425 0 : }
426 :
427 : /// Get size of a relation file. The relation must exist, otherwise an error is returned.
428 : ///
429 : /// This is only accurate on shard 0. On other shards, it will return the size up to the highest
430 : /// page number stored in the shard.
431 48868 : pub(crate) async fn get_rel_size(
432 48868 : &self,
433 48868 : tag: RelTag,
434 48868 : version: Version<'_>,
435 48868 : ctx: &RequestContext,
436 48868 : ) -> Result<BlockNumber, PageReconstructError> {
437 48868 : if tag.relnode == 0 {
438 0 : return Err(PageReconstructError::Other(
439 0 : RelationError::InvalidRelnode.into(),
440 0 : ));
441 48868 : }
442 :
443 48868 : if let Some(nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
444 38588 : return Ok(nblocks);
445 10280 : }
446 10280 :
447 10280 : if (tag.forknum == FSM_FORKNUM || tag.forknum == VISIBILITYMAP_FORKNUM)
448 0 : && !self.get_rel_exists(tag, version, ctx).await?
449 : {
450 : // FIXME: Postgres sometimes calls smgrcreate() to create
451 : // FSM, and smgrnblocks() on it immediately afterwards,
452 : // without extending it. Tolerate that by claiming that
453 : // any non-existent FSM fork has size 0.
454 0 : return Ok(0);
455 10280 : }
456 10280 :
457 10280 : let key = rel_size_to_key(tag);
458 10280 : let mut buf = version.get(self, key, ctx).await?;
459 10272 : let nblocks = buf.get_u32_le();
460 10272 :
461 10272 : self.update_cached_rel_size(tag, version.get_lsn(), nblocks);
462 10272 :
463 10272 : Ok(nblocks)
464 48868 : }
465 :
466 : /// Does the relation exist?
467 : ///
468 : /// Only shard 0 has a full view of the relations. Other shards only know about relations that
469 : /// the shard stores pages for.
470 12100 : pub(crate) async fn get_rel_exists(
471 12100 : &self,
472 12100 : tag: RelTag,
473 12100 : version: Version<'_>,
474 12100 : ctx: &RequestContext,
475 12100 : ) -> Result<bool, PageReconstructError> {
476 12100 : if tag.relnode == 0 {
477 0 : return Err(PageReconstructError::Other(
478 0 : RelationError::InvalidRelnode.into(),
479 0 : ));
480 12100 : }
481 :
482 : // first try to lookup relation in cache
483 12100 : if let Some(_nblocks) = self.get_cached_rel_size(&tag, version.get_lsn()) {
484 12064 : return Ok(true);
485 36 : }
486 : // then check if the database was already initialized.
487 : // get_rel_exists can be called before dbdir is created.
488 36 : let buf = version.get(self, DBDIR_KEY, ctx).await?;
489 36 : let dbdirs = DbDirectory::des(&buf)?.dbdirs;
490 36 : if !dbdirs.contains_key(&(tag.spcnode, tag.dbnode)) {
491 0 : return Ok(false);
492 36 : }
493 36 : // fetch directory listing
494 36 : let key = rel_dir_to_key(tag.spcnode, tag.dbnode);
495 36 : let buf = version.get(self, key, ctx).await?;
496 :
497 36 : let dir = RelDirectory::des(&buf)?;
498 36 : Ok(dir.rels.contains(&(tag.relnode, tag.forknum)))
499 12100 : }
500 :
501 : /// Get a list of all existing relations in given tablespace and database.
502 : ///
503 : /// Only shard 0 has a full view of the relations. Other shards only know about relations that
504 : /// the shard stores pages for.
505 : ///
506 : /// # Cancel-Safety
507 : ///
508 : /// This method is cancellation-safe.
509 0 : pub(crate) async fn list_rels(
510 0 : &self,
511 0 : spcnode: Oid,
512 0 : dbnode: Oid,
513 0 : version: Version<'_>,
514 0 : ctx: &RequestContext,
515 0 : ) -> Result<HashSet<RelTag>, PageReconstructError> {
516 0 : // fetch directory listing
517 0 : let key = rel_dir_to_key(spcnode, dbnode);
518 0 : let buf = version.get(self, key, ctx).await?;
519 :
520 0 : let dir = RelDirectory::des(&buf)?;
521 0 : let rels: HashSet<RelTag> =
522 0 : HashSet::from_iter(dir.rels.iter().map(|(relnode, forknum)| RelTag {
523 0 : spcnode,
524 0 : dbnode,
525 0 : relnode: *relnode,
526 0 : forknum: *forknum,
527 0 : }));
528 0 :
529 0 : Ok(rels)
530 0 : }
531 :
532 : /// Get the whole SLRU segment
533 0 : pub(crate) async fn get_slru_segment(
534 0 : &self,
535 0 : kind: SlruKind,
536 0 : segno: u32,
537 0 : lsn: Lsn,
538 0 : ctx: &RequestContext,
539 0 : ) -> Result<Bytes, PageReconstructError> {
540 0 : assert!(self.tenant_shard_id.is_shard_zero());
541 0 : let n_blocks = self
542 0 : .get_slru_segment_size(kind, segno, Version::Lsn(lsn), ctx)
543 0 : .await?;
544 0 : let mut segment = BytesMut::with_capacity(n_blocks as usize * BLCKSZ as usize);
545 0 : for blkno in 0..n_blocks {
546 0 : let block = self
547 0 : .get_slru_page_at_lsn(kind, segno, blkno, lsn, ctx)
548 0 : .await?;
549 0 : segment.extend_from_slice(&block[..BLCKSZ as usize]);
550 : }
551 0 : Ok(segment.freeze())
552 0 : }
553 :
554 : /// Look up given SLRU page version.
555 0 : pub(crate) async fn get_slru_page_at_lsn(
556 0 : &self,
557 0 : kind: SlruKind,
558 0 : segno: u32,
559 0 : blknum: BlockNumber,
560 0 : lsn: Lsn,
561 0 : ctx: &RequestContext,
562 0 : ) -> Result<Bytes, PageReconstructError> {
563 0 : assert!(self.tenant_shard_id.is_shard_zero());
564 0 : let key = slru_block_to_key(kind, segno, blknum);
565 0 : self.get(key, lsn, ctx).await
566 0 : }
567 :
568 : /// Get size of an SLRU segment
569 0 : pub(crate) async fn get_slru_segment_size(
570 0 : &self,
571 0 : kind: SlruKind,
572 0 : segno: u32,
573 0 : version: Version<'_>,
574 0 : ctx: &RequestContext,
575 0 : ) -> Result<BlockNumber, PageReconstructError> {
576 0 : assert!(self.tenant_shard_id.is_shard_zero());
577 0 : let key = slru_segment_size_to_key(kind, segno);
578 0 : let mut buf = version.get(self, key, ctx).await?;
579 0 : Ok(buf.get_u32_le())
580 0 : }
581 :
582 : /// Get size of an SLRU segment
583 0 : pub(crate) async fn get_slru_segment_exists(
584 0 : &self,
585 0 : kind: SlruKind,
586 0 : segno: u32,
587 0 : version: Version<'_>,
588 0 : ctx: &RequestContext,
589 0 : ) -> Result<bool, PageReconstructError> {
590 0 : assert!(self.tenant_shard_id.is_shard_zero());
591 : // fetch directory listing
592 0 : let key = slru_dir_to_key(kind);
593 0 : let buf = version.get(self, key, ctx).await?;
594 :
595 0 : let dir = SlruSegmentDirectory::des(&buf)?;
596 0 : Ok(dir.segments.contains(&segno))
597 0 : }
598 :
599 : /// Locate LSN, such that all transactions that committed before
600 : /// 'search_timestamp' are visible, but nothing newer is.
601 : ///
602 : /// This is not exact. Commit timestamps are not guaranteed to be ordered,
603 : /// so it's not well defined which LSN you get if there were multiple commits
604 : /// "in flight" at that point in time.
605 : ///
606 0 : pub(crate) async fn find_lsn_for_timestamp(
607 0 : &self,
608 0 : search_timestamp: TimestampTz,
609 0 : cancel: &CancellationToken,
610 0 : ctx: &RequestContext,
611 0 : ) -> Result<LsnForTimestamp, PageReconstructError> {
612 0 : pausable_failpoint!("find-lsn-for-timestamp-pausable");
613 :
614 0 : let gc_cutoff_lsn_guard = self.get_latest_gc_cutoff_lsn();
615 0 : // We use this method to figure out the branching LSN for the new branch, but the
616 0 : // GC cutoff could be before the branching point and we cannot create a new branch
617 0 : // with LSN < `ancestor_lsn`. Thus, pick the maximum of these two to be
618 0 : // on the safe side.
619 0 : let min_lsn = std::cmp::max(*gc_cutoff_lsn_guard, self.get_ancestor_lsn());
620 0 : let max_lsn = self.get_last_record_lsn();
621 0 :
622 0 : // LSNs are always 8-byte aligned. low/mid/high represent the
623 0 : // LSN divided by 8.
624 0 : let mut low = min_lsn.0 / 8;
625 0 : let mut high = max_lsn.0 / 8 + 1;
626 0 :
627 0 : let mut found_smaller = false;
628 0 : let mut found_larger = false;
629 :
630 0 : while low < high {
631 0 : if cancel.is_cancelled() {
632 0 : return Err(PageReconstructError::Cancelled);
633 0 : }
634 0 : // cannot overflow, high and low are both smaller than u64::MAX / 2
635 0 : let mid = (high + low) / 2;
636 :
637 0 : let cmp = match self
638 0 : .is_latest_commit_timestamp_ge_than(
639 0 : search_timestamp,
640 0 : Lsn(mid * 8),
641 0 : &mut found_smaller,
642 0 : &mut found_larger,
643 0 : ctx,
644 0 : )
645 0 : .await
646 : {
647 0 : Ok(res) => res,
648 0 : Err(PageReconstructError::MissingKey(e)) => {
649 0 : warn!("Missing key while find_lsn_for_timestamp. Either we might have already garbage-collected that data or the key is really missing. Last error: {:#}", e);
650 : // Return that we didn't find any requests smaller than the LSN, and logging the error.
651 0 : return Ok(LsnForTimestamp::Past(min_lsn));
652 : }
653 0 : Err(e) => return Err(e),
654 : };
655 :
656 0 : if cmp {
657 0 : high = mid;
658 0 : } else {
659 0 : low = mid + 1;
660 0 : }
661 : }
662 :
663 : // If `found_smaller == true`, `low = t + 1` where `t` is the target LSN,
664 : // so the LSN of the last commit record before or at `search_timestamp`.
665 : // Remove one from `low` to get `t`.
666 : //
667 : // FIXME: it would be better to get the LSN of the previous commit.
668 : // Otherwise, if you restore to the returned LSN, the database will
669 : // include physical changes from later commits that will be marked
670 : // as aborted, and will need to be vacuumed away.
671 0 : let commit_lsn = Lsn((low - 1) * 8);
672 0 : match (found_smaller, found_larger) {
673 : (false, false) => {
674 : // This can happen if no commit records have been processed yet, e.g.
675 : // just after importing a cluster.
676 0 : Ok(LsnForTimestamp::NoData(min_lsn))
677 : }
678 : (false, true) => {
679 : // Didn't find any commit timestamps smaller than the request
680 0 : Ok(LsnForTimestamp::Past(min_lsn))
681 : }
682 0 : (true, _) if commit_lsn < min_lsn => {
683 0 : // the search above did set found_smaller to true but it never increased the lsn.
684 0 : // Then, low is still the old min_lsn, and the subtraction above gave a value
685 0 : // below the min_lsn. We should never do that.
686 0 : Ok(LsnForTimestamp::Past(min_lsn))
687 : }
688 : (true, false) => {
689 : // Only found commits with timestamps smaller than the request.
690 : // It's still a valid case for branch creation, return it.
691 : // And `update_gc_info()` ignores LSN for a `LsnForTimestamp::Future`
692 : // case, anyway.
693 0 : Ok(LsnForTimestamp::Future(commit_lsn))
694 : }
695 0 : (true, true) => Ok(LsnForTimestamp::Present(commit_lsn)),
696 : }
697 0 : }
698 :
699 : /// Subroutine of find_lsn_for_timestamp(). Returns true, if there are any
700 : /// commits that committed after 'search_timestamp', at LSN 'probe_lsn'.
701 : ///
702 : /// Additionally, sets 'found_smaller'/'found_Larger, if encounters any commits
703 : /// with a smaller/larger timestamp.
704 : ///
705 0 : pub(crate) async fn is_latest_commit_timestamp_ge_than(
706 0 : &self,
707 0 : search_timestamp: TimestampTz,
708 0 : probe_lsn: Lsn,
709 0 : found_smaller: &mut bool,
710 0 : found_larger: &mut bool,
711 0 : ctx: &RequestContext,
712 0 : ) -> Result<bool, PageReconstructError> {
713 0 : self.map_all_timestamps(probe_lsn, ctx, |timestamp| {
714 0 : if timestamp >= search_timestamp {
715 0 : *found_larger = true;
716 0 : return ControlFlow::Break(true);
717 0 : } else {
718 0 : *found_smaller = true;
719 0 : }
720 0 : ControlFlow::Continue(())
721 0 : })
722 0 : .await
723 0 : }
724 :
725 : /// Obtain the possible timestamp range for the given lsn.
726 : ///
727 : /// If the lsn has no timestamps, returns None. returns `(min, max, median)` if it has timestamps.
728 0 : pub(crate) async fn get_timestamp_for_lsn(
729 0 : &self,
730 0 : probe_lsn: Lsn,
731 0 : ctx: &RequestContext,
732 0 : ) -> Result<Option<TimestampTz>, PageReconstructError> {
733 0 : let mut max: Option<TimestampTz> = None;
734 0 : self.map_all_timestamps::<()>(probe_lsn, ctx, |timestamp| {
735 0 : if let Some(max_prev) = max {
736 0 : max = Some(max_prev.max(timestamp));
737 0 : } else {
738 0 : max = Some(timestamp);
739 0 : }
740 0 : ControlFlow::Continue(())
741 0 : })
742 0 : .await?;
743 :
744 0 : Ok(max)
745 0 : }
746 :
747 : /// Runs the given function on all the timestamps for a given lsn
748 : ///
749 : /// The return value is either given by the closure, or set to the `Default`
750 : /// impl's output.
751 0 : async fn map_all_timestamps<T: Default>(
752 0 : &self,
753 0 : probe_lsn: Lsn,
754 0 : ctx: &RequestContext,
755 0 : mut f: impl FnMut(TimestampTz) -> ControlFlow<T>,
756 0 : ) -> Result<T, PageReconstructError> {
757 0 : for segno in self
758 0 : .list_slru_segments(SlruKind::Clog, Version::Lsn(probe_lsn), ctx)
759 0 : .await?
760 : {
761 0 : let nblocks = self
762 0 : .get_slru_segment_size(SlruKind::Clog, segno, Version::Lsn(probe_lsn), ctx)
763 0 : .await?;
764 0 : for blknum in (0..nblocks).rev() {
765 0 : let clog_page = self
766 0 : .get_slru_page_at_lsn(SlruKind::Clog, segno, blknum, probe_lsn, ctx)
767 0 : .await?;
768 :
769 0 : if clog_page.len() == BLCKSZ as usize + 8 {
770 0 : let mut timestamp_bytes = [0u8; 8];
771 0 : timestamp_bytes.copy_from_slice(&clog_page[BLCKSZ as usize..]);
772 0 : let timestamp = TimestampTz::from_be_bytes(timestamp_bytes);
773 0 :
774 0 : match f(timestamp) {
775 0 : ControlFlow::Break(b) => return Ok(b),
776 0 : ControlFlow::Continue(()) => (),
777 : }
778 0 : }
779 : }
780 : }
781 0 : Ok(Default::default())
782 0 : }
783 :
784 0 : pub(crate) async fn get_slru_keyspace(
785 0 : &self,
786 0 : version: Version<'_>,
787 0 : ctx: &RequestContext,
788 0 : ) -> Result<KeySpace, PageReconstructError> {
789 0 : let mut accum = KeySpaceAccum::new();
790 :
791 0 : for kind in SlruKind::iter() {
792 0 : let mut segments: Vec<u32> = self
793 0 : .list_slru_segments(kind, version, ctx)
794 0 : .await?
795 0 : .into_iter()
796 0 : .collect();
797 0 : segments.sort_unstable();
798 :
799 0 : for seg in segments {
800 0 : let block_count = self.get_slru_segment_size(kind, seg, version, ctx).await?;
801 :
802 0 : accum.add_range(
803 0 : slru_block_to_key(kind, seg, 0)..slru_block_to_key(kind, seg, block_count),
804 0 : );
805 : }
806 : }
807 :
808 0 : Ok(accum.to_keyspace())
809 0 : }
810 :
811 : /// Get a list of SLRU segments
812 0 : pub(crate) async fn list_slru_segments(
813 0 : &self,
814 0 : kind: SlruKind,
815 0 : version: Version<'_>,
816 0 : ctx: &RequestContext,
817 0 : ) -> Result<HashSet<u32>, PageReconstructError> {
818 0 : // fetch directory entry
819 0 : let key = slru_dir_to_key(kind);
820 :
821 0 : let buf = version.get(self, key, ctx).await?;
822 0 : Ok(SlruSegmentDirectory::des(&buf)?.segments)
823 0 : }
824 :
825 0 : pub(crate) async fn get_relmap_file(
826 0 : &self,
827 0 : spcnode: Oid,
828 0 : dbnode: Oid,
829 0 : version: Version<'_>,
830 0 : ctx: &RequestContext,
831 0 : ) -> Result<Bytes, PageReconstructError> {
832 0 : let key = relmap_file_key(spcnode, dbnode);
833 :
834 0 : let buf = version.get(self, key, ctx).await?;
835 0 : Ok(buf)
836 0 : }
837 :
838 640 : pub(crate) async fn list_dbdirs(
839 640 : &self,
840 640 : lsn: Lsn,
841 640 : ctx: &RequestContext,
842 640 : ) -> Result<HashMap<(Oid, Oid), bool>, PageReconstructError> {
843 : // fetch directory entry
844 640 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
845 :
846 640 : Ok(DbDirectory::des(&buf)?.dbdirs)
847 640 : }
848 :
849 0 : pub(crate) async fn get_twophase_file(
850 0 : &self,
851 0 : xid: u64,
852 0 : lsn: Lsn,
853 0 : ctx: &RequestContext,
854 0 : ) -> Result<Bytes, PageReconstructError> {
855 0 : let key = twophase_file_key(xid);
856 0 : let buf = self.get(key, lsn, ctx).await?;
857 0 : Ok(buf)
858 0 : }
859 :
860 644 : pub(crate) async fn list_twophase_files(
861 644 : &self,
862 644 : lsn: Lsn,
863 644 : ctx: &RequestContext,
864 644 : ) -> Result<HashSet<u64>, PageReconstructError> {
865 : // fetch directory entry
866 644 : let buf = self.get(TWOPHASEDIR_KEY, lsn, ctx).await?;
867 :
868 644 : if self.pg_version >= 17 {
869 0 : Ok(TwoPhaseDirectoryV17::des(&buf)?.xids)
870 : } else {
871 644 : Ok(TwoPhaseDirectory::des(&buf)?
872 : .xids
873 644 : .iter()
874 644 : .map(|x| u64::from(*x))
875 644 : .collect())
876 : }
877 644 : }
878 :
879 0 : pub(crate) async fn get_control_file(
880 0 : &self,
881 0 : lsn: Lsn,
882 0 : ctx: &RequestContext,
883 0 : ) -> Result<Bytes, PageReconstructError> {
884 0 : self.get(CONTROLFILE_KEY, lsn, ctx).await
885 0 : }
886 :
887 24 : pub(crate) async fn get_checkpoint(
888 24 : &self,
889 24 : lsn: Lsn,
890 24 : ctx: &RequestContext,
891 24 : ) -> Result<Bytes, PageReconstructError> {
892 24 : self.get(CHECKPOINT_KEY, lsn, ctx).await
893 24 : }
894 :
895 24 : async fn list_aux_files_v2(
896 24 : &self,
897 24 : lsn: Lsn,
898 24 : ctx: &RequestContext,
899 24 : io_concurrency: IoConcurrency,
900 24 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
901 24 : let kv = self
902 24 : .scan(
903 24 : KeySpace::single(Key::metadata_aux_key_range()),
904 24 : lsn,
905 24 : ctx,
906 24 : io_concurrency,
907 24 : )
908 24 : .await?;
909 24 : let mut result = HashMap::new();
910 24 : let mut sz = 0;
911 60 : for (_, v) in kv {
912 36 : let v = v?;
913 36 : let v = aux_file::decode_file_value_bytes(&v)
914 36 : .context("value decode")
915 36 : .map_err(PageReconstructError::Other)?;
916 68 : for (fname, content) in v {
917 32 : sz += fname.len();
918 32 : sz += content.len();
919 32 : result.insert(fname, content);
920 32 : }
921 : }
922 24 : self.aux_file_size_estimator.on_initial(sz);
923 24 : Ok(result)
924 24 : }
925 :
926 0 : pub(crate) async fn trigger_aux_file_size_computation(
927 0 : &self,
928 0 : lsn: Lsn,
929 0 : ctx: &RequestContext,
930 0 : io_concurrency: IoConcurrency,
931 0 : ) -> Result<(), PageReconstructError> {
932 0 : self.list_aux_files_v2(lsn, ctx, io_concurrency).await?;
933 0 : Ok(())
934 0 : }
935 :
936 24 : pub(crate) async fn list_aux_files(
937 24 : &self,
938 24 : lsn: Lsn,
939 24 : ctx: &RequestContext,
940 24 : io_concurrency: IoConcurrency,
941 24 : ) -> Result<HashMap<String, Bytes>, PageReconstructError> {
942 24 : self.list_aux_files_v2(lsn, ctx, io_concurrency).await
943 24 : }
944 :
945 0 : pub(crate) async fn get_replorigins(
946 0 : &self,
947 0 : lsn: Lsn,
948 0 : ctx: &RequestContext,
949 0 : io_concurrency: IoConcurrency,
950 0 : ) -> Result<HashMap<RepOriginId, Lsn>, PageReconstructError> {
951 0 : let kv = self
952 0 : .scan(
953 0 : KeySpace::single(repl_origin_key_range()),
954 0 : lsn,
955 0 : ctx,
956 0 : io_concurrency,
957 0 : )
958 0 : .await?;
959 0 : let mut result = HashMap::new();
960 0 : for (k, v) in kv {
961 0 : let v = v?;
962 0 : let origin_id = k.field6 as RepOriginId;
963 0 : let origin_lsn = Lsn::des(&v).unwrap();
964 0 : if origin_lsn != Lsn::INVALID {
965 0 : result.insert(origin_id, origin_lsn);
966 0 : }
967 : }
968 0 : Ok(result)
969 0 : }
970 :
971 : /// Does the same as get_current_logical_size but counted on demand.
972 : /// Used to initialize the logical size tracking on startup.
973 : ///
974 : /// Only relation blocks are counted currently. That excludes metadata,
975 : /// SLRUs, twophase files etc.
976 : ///
977 : /// # Cancel-Safety
978 : ///
979 : /// This method is cancellation-safe.
980 0 : pub(crate) async fn get_current_logical_size_non_incremental(
981 0 : &self,
982 0 : lsn: Lsn,
983 0 : ctx: &RequestContext,
984 0 : ) -> Result<u64, CalculateLogicalSizeError> {
985 0 : debug_assert_current_span_has_tenant_and_timeline_id_no_shard_id();
986 :
987 : // Fetch list of database dirs and iterate them
988 0 : let buf = self.get(DBDIR_KEY, lsn, ctx).await?;
989 0 : let dbdir = DbDirectory::des(&buf)?;
990 :
991 0 : let mut total_size: u64 = 0;
992 0 : for (spcnode, dbnode) in dbdir.dbdirs.keys() {
993 0 : for rel in self
994 0 : .list_rels(*spcnode, *dbnode, Version::Lsn(lsn), ctx)
995 0 : .await?
996 : {
997 0 : if self.cancel.is_cancelled() {
998 0 : return Err(CalculateLogicalSizeError::Cancelled);
999 0 : }
1000 0 : let relsize_key = rel_size_to_key(rel);
1001 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
1002 0 : let relsize = buf.get_u32_le();
1003 0 :
1004 0 : total_size += relsize as u64;
1005 : }
1006 : }
1007 0 : Ok(total_size * BLCKSZ as u64)
1008 0 : }
1009 :
1010 : /// Get a KeySpace that covers all the Keys that are in use at AND below the given LSN. This is only used
1011 : /// for gc-compaction.
1012 : ///
1013 : /// gc-compaction cannot use the same `collect_keyspace` function as the legacy compaction because it
1014 : /// processes data at multiple LSNs and needs to be aware of the fact that some key ranges might need to
1015 : /// be kept only for a specific range of LSN.
1016 : ///
1017 : /// Consider the case that the user created branches at LSN 10 and 20, where the user created a table A at
1018 : /// LSN 10 and dropped that table at LSN 20. `collect_keyspace` at LSN 10 will return the key range
1019 : /// corresponding to that table, while LSN 20 won't. The keyspace info at a single LSN is not enough to
1020 : /// determine which keys to retain/drop for gc-compaction.
1021 : ///
1022 : /// For now, it only drops AUX-v1 keys. But in the future, the function will be extended to return the keyspace
1023 : /// to be retained for each of the branch LSN.
1024 : ///
1025 : /// The return value is (dense keyspace, sparse keyspace).
1026 104 : pub(crate) async fn collect_gc_compaction_keyspace(
1027 104 : &self,
1028 104 : ) -> Result<(KeySpace, SparseKeySpace), CollectKeySpaceError> {
1029 104 : let metadata_key_begin = Key::metadata_key_range().start;
1030 104 : let aux_v1_key = AUX_FILES_KEY;
1031 104 : let dense_keyspace = KeySpace {
1032 104 : ranges: vec![Key::MIN..aux_v1_key, aux_v1_key.next()..metadata_key_begin],
1033 104 : };
1034 104 : Ok((
1035 104 : dense_keyspace,
1036 104 : SparseKeySpace(KeySpace::single(Key::metadata_key_range())),
1037 104 : ))
1038 104 : }
1039 :
1040 : ///
1041 : /// Get a KeySpace that covers all the Keys that are in use at the given LSN.
1042 : /// Anything that's not listed maybe removed from the underlying storage (from
1043 : /// that LSN forwards).
1044 : ///
1045 : /// The return value is (dense keyspace, sparse keyspace).
1046 640 : pub(crate) async fn collect_keyspace(
1047 640 : &self,
1048 640 : lsn: Lsn,
1049 640 : ctx: &RequestContext,
1050 640 : ) -> Result<(KeySpace, SparseKeySpace), CollectKeySpaceError> {
1051 640 : // Iterate through key ranges, greedily packing them into partitions
1052 640 : let mut result = KeySpaceAccum::new();
1053 640 :
1054 640 : // The dbdir metadata always exists
1055 640 : result.add_key(DBDIR_KEY);
1056 :
1057 : // Fetch list of database dirs and iterate them
1058 640 : let dbdir = self.list_dbdirs(lsn, ctx).await?;
1059 640 : let mut dbs: Vec<((Oid, Oid), bool)> = dbdir.into_iter().collect();
1060 640 :
1061 640 : dbs.sort_unstable_by(|(k_a, _), (k_b, _)| k_a.cmp(k_b));
1062 640 : for ((spcnode, dbnode), has_relmap_file) in dbs {
1063 0 : if has_relmap_file {
1064 0 : result.add_key(relmap_file_key(spcnode, dbnode));
1065 0 : }
1066 0 : result.add_key(rel_dir_to_key(spcnode, dbnode));
1067 :
1068 0 : let mut rels: Vec<RelTag> = self
1069 0 : .list_rels(spcnode, dbnode, Version::Lsn(lsn), ctx)
1070 0 : .await?
1071 0 : .into_iter()
1072 0 : .collect();
1073 0 : rels.sort_unstable();
1074 0 : for rel in rels {
1075 0 : let relsize_key = rel_size_to_key(rel);
1076 0 : let mut buf = self.get(relsize_key, lsn, ctx).await?;
1077 0 : let relsize = buf.get_u32_le();
1078 0 :
1079 0 : result.add_range(rel_block_to_key(rel, 0)..rel_block_to_key(rel, relsize));
1080 0 : result.add_key(relsize_key);
1081 : }
1082 : }
1083 :
1084 : // Iterate SLRUs next
1085 640 : if self.tenant_shard_id.is_shard_zero() {
1086 1884 : for kind in [
1087 628 : SlruKind::Clog,
1088 628 : SlruKind::MultiXactMembers,
1089 628 : SlruKind::MultiXactOffsets,
1090 : ] {
1091 1884 : let slrudir_key = slru_dir_to_key(kind);
1092 1884 : result.add_key(slrudir_key);
1093 1884 : let buf = self.get(slrudir_key, lsn, ctx).await?;
1094 1884 : let dir = SlruSegmentDirectory::des(&buf)?;
1095 1884 : let mut segments: Vec<u32> = dir.segments.iter().cloned().collect();
1096 1884 : segments.sort_unstable();
1097 1884 : for segno in segments {
1098 0 : let segsize_key = slru_segment_size_to_key(kind, segno);
1099 0 : let mut buf = self.get(segsize_key, lsn, ctx).await?;
1100 0 : let segsize = buf.get_u32_le();
1101 0 :
1102 0 : result.add_range(
1103 0 : slru_block_to_key(kind, segno, 0)..slru_block_to_key(kind, segno, segsize),
1104 0 : );
1105 0 : result.add_key(segsize_key);
1106 : }
1107 : }
1108 12 : }
1109 :
1110 : // Then pg_twophase
1111 640 : result.add_key(TWOPHASEDIR_KEY);
1112 :
1113 640 : let mut xids: Vec<u64> = self
1114 640 : .list_twophase_files(lsn, ctx)
1115 640 : .await?
1116 640 : .iter()
1117 640 : .cloned()
1118 640 : .collect();
1119 640 : xids.sort_unstable();
1120 640 : for xid in xids {
1121 0 : result.add_key(twophase_file_key(xid));
1122 0 : }
1123 :
1124 640 : result.add_key(CONTROLFILE_KEY);
1125 640 : result.add_key(CHECKPOINT_KEY);
1126 640 :
1127 640 : // Add extra keyspaces in the test cases. Some test cases write keys into the storage without
1128 640 : // creating directory keys. These test cases will add such keyspaces into `extra_test_dense_keyspace`
1129 640 : // and the keys will not be garbage-colllected.
1130 640 : #[cfg(test)]
1131 640 : {
1132 640 : let guard = self.extra_test_dense_keyspace.load();
1133 640 : for kr in &guard.ranges {
1134 0 : result.add_range(kr.clone());
1135 0 : }
1136 0 : }
1137 0 :
1138 640 : let dense_keyspace = result.to_keyspace();
1139 640 : let sparse_keyspace = SparseKeySpace(KeySpace {
1140 640 : ranges: vec![Key::metadata_aux_key_range(), repl_origin_key_range()],
1141 640 : });
1142 640 :
1143 640 : if cfg!(debug_assertions) {
1144 : // Verify if the sparse keyspaces are ordered and non-overlapping.
1145 :
1146 : // We do not use KeySpaceAccum for sparse_keyspace because we want to ensure each
1147 : // category of sparse keys are split into their own image/delta files. If there
1148 : // are overlapping keyspaces, they will be automatically merged by keyspace accum,
1149 : // and we want the developer to keep the keyspaces separated.
1150 :
1151 640 : let ranges = &sparse_keyspace.0.ranges;
1152 :
1153 : // TODO: use a single overlaps_with across the codebase
1154 640 : fn overlaps_with<T: Ord>(a: &Range<T>, b: &Range<T>) -> bool {
1155 640 : !(a.end <= b.start || b.end <= a.start)
1156 640 : }
1157 1280 : for i in 0..ranges.len() {
1158 1280 : for j in 0..i {
1159 640 : if overlaps_with(&ranges[i], &ranges[j]) {
1160 0 : panic!(
1161 0 : "overlapping sparse keyspace: {}..{} and {}..{}",
1162 0 : ranges[i].start, ranges[i].end, ranges[j].start, ranges[j].end
1163 0 : );
1164 640 : }
1165 : }
1166 : }
1167 640 : for i in 1..ranges.len() {
1168 640 : assert!(
1169 640 : ranges[i - 1].end <= ranges[i].start,
1170 0 : "unordered sparse keyspace: {}..{} and {}..{}",
1171 0 : ranges[i - 1].start,
1172 0 : ranges[i - 1].end,
1173 0 : ranges[i].start,
1174 0 : ranges[i].end
1175 : );
1176 : }
1177 0 : }
1178 :
1179 640 : Ok((dense_keyspace, sparse_keyspace))
1180 640 : }
1181 :
1182 : /// Get cached size of relation if it not updated after specified LSN
1183 897080 : pub fn get_cached_rel_size(&self, tag: &RelTag, lsn: Lsn) -> Option<BlockNumber> {
1184 897080 : let rel_size_cache = self.rel_size_cache.read().unwrap();
1185 897080 : if let Some((cached_lsn, nblocks)) = rel_size_cache.map.get(tag) {
1186 897036 : if lsn >= *cached_lsn {
1187 886744 : RELSIZE_CACHE_HITS.inc();
1188 886744 : return Some(*nblocks);
1189 10292 : }
1190 10292 : RELSIZE_CACHE_MISSES_OLD.inc();
1191 44 : }
1192 10336 : RELSIZE_CACHE_MISSES.inc();
1193 10336 : None
1194 897080 : }
1195 :
1196 : /// Update cached relation size if there is no more recent update
1197 10272 : pub fn update_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
1198 10272 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1199 10272 :
1200 10272 : if lsn < rel_size_cache.complete_as_of {
1201 : // Do not cache old values. It's safe to cache the size on read, as long as
1202 : // the read was at an LSN since we started the WAL ingestion. Reasoning: we
1203 : // never evict values from the cache, so if the relation size changed after
1204 : // 'lsn', the new value is already in the cache.
1205 0 : return;
1206 10272 : }
1207 10272 :
1208 10272 : match rel_size_cache.map.entry(tag) {
1209 10272 : hash_map::Entry::Occupied(mut entry) => {
1210 10272 : let cached_lsn = entry.get_mut();
1211 10272 : if lsn >= cached_lsn.0 {
1212 0 : *cached_lsn = (lsn, nblocks);
1213 10272 : }
1214 : }
1215 0 : hash_map::Entry::Vacant(entry) => {
1216 0 : entry.insert((lsn, nblocks));
1217 0 : RELSIZE_CACHE_ENTRIES.inc();
1218 0 : }
1219 : }
1220 10272 : }
1221 :
1222 : /// Store cached relation size
1223 565440 : pub fn set_cached_rel_size(&self, tag: RelTag, lsn: Lsn, nblocks: BlockNumber) {
1224 565440 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1225 565440 : if rel_size_cache.map.insert(tag, (lsn, nblocks)).is_none() {
1226 3840 : RELSIZE_CACHE_ENTRIES.inc();
1227 561600 : }
1228 565440 : }
1229 :
1230 : /// Remove cached relation size
1231 4 : pub fn remove_cached_rel_size(&self, tag: &RelTag) {
1232 4 : let mut rel_size_cache = self.rel_size_cache.write().unwrap();
1233 4 : if rel_size_cache.map.remove(tag).is_some() {
1234 4 : RELSIZE_CACHE_ENTRIES.dec();
1235 4 : }
1236 4 : }
1237 : }
1238 :
1239 : /// DatadirModification represents an operation to ingest an atomic set of
1240 : /// updates to the repository.
1241 : ///
1242 : /// It is created by the 'begin_record' function. It is called for each WAL
1243 : /// record, so that all the modifications by a one WAL record appear atomic.
1244 : pub struct DatadirModification<'a> {
1245 : /// The timeline this modification applies to. You can access this to
1246 : /// read the state, but note that any pending updates are *not* reflected
1247 : /// in the state in 'tline' yet.
1248 : pub tline: &'a Timeline,
1249 :
1250 : /// Current LSN of the modification
1251 : lsn: Lsn,
1252 :
1253 : // The modifications are not applied directly to the underlying key-value store.
1254 : // The put-functions add the modifications here, and they are flushed to the
1255 : // underlying key-value store by the 'finish' function.
1256 : pending_lsns: Vec<Lsn>,
1257 : pending_deletions: Vec<(Range<Key>, Lsn)>,
1258 : pending_nblocks: i64,
1259 :
1260 : /// Metadata writes, indexed by key so that they can be read from not-yet-committed modifications
1261 : /// while ingesting subsequent records. See [`Self::is_data_key`] for the definition of 'metadata'.
1262 : pending_metadata_pages: HashMap<CompactKey, Vec<(Lsn, usize, Value)>>,
1263 :
1264 : /// Data writes, ready to be flushed into an ephemeral layer. See [`Self::is_data_key`] for
1265 : /// which keys are stored here.
1266 : pending_data_batch: Option<SerializedValueBatch>,
1267 :
1268 : /// For special "directory" keys that store key-value maps, track the size of the map
1269 : /// if it was updated in this modification.
1270 : pending_directory_entries: Vec<(DirectoryKind, usize)>,
1271 :
1272 : /// An **approximation** of how many metadata bytes will be written to the EphemeralFile.
1273 : pending_metadata_bytes: usize,
1274 : }
1275 :
1276 : impl DatadirModification<'_> {
1277 : // When a DatadirModification is committed, we do a monolithic serialization of all its contents. WAL records can
1278 : // contain multiple pages, so the pageserver's record-based batch size isn't sufficient to bound this allocation: we
1279 : // additionally specify a limit on how much payload a DatadirModification may contain before it should be committed.
1280 : pub(crate) const MAX_PENDING_BYTES: usize = 8 * 1024 * 1024;
1281 :
1282 : /// Get the current lsn
1283 836116 : pub(crate) fn get_lsn(&self) -> Lsn {
1284 836116 : self.lsn
1285 836116 : }
1286 :
1287 0 : pub(crate) fn approx_pending_bytes(&self) -> usize {
1288 0 : self.pending_data_batch
1289 0 : .as_ref()
1290 0 : .map_or(0, |b| b.buffer_size())
1291 0 : + self.pending_metadata_bytes
1292 0 : }
1293 :
1294 0 : pub(crate) fn has_dirty_data(&self) -> bool {
1295 0 : self.pending_data_batch
1296 0 : .as_ref()
1297 0 : .is_some_and(|b| b.has_data())
1298 0 : }
1299 :
1300 : /// Set the current lsn
1301 291716 : pub(crate) fn set_lsn(&mut self, lsn: Lsn) -> anyhow::Result<()> {
1302 291716 : ensure!(
1303 291716 : lsn >= self.lsn,
1304 0 : "setting an older lsn {} than {} is not allowed",
1305 : lsn,
1306 : self.lsn
1307 : );
1308 :
1309 291716 : if lsn > self.lsn {
1310 291716 : self.pending_lsns.push(self.lsn);
1311 291716 : self.lsn = lsn;
1312 291716 : }
1313 291716 : Ok(())
1314 291716 : }
1315 :
1316 : /// In this context, 'metadata' means keys that are only read by the pageserver internally, and 'data' means
1317 : /// keys that represent literal blocks that postgres can read. So data includes relation blocks and
1318 : /// SLRU blocks, which are read directly by postgres, and everything else is considered metadata.
1319 : ///
1320 : /// The distinction is important because data keys are handled on a fast path where dirty writes are
1321 : /// not readable until this modification is committed, whereas metadata keys are visible for read
1322 : /// via [`Self::get`] as soon as their record has been ingested.
1323 1701220 : fn is_data_key(key: &Key) -> bool {
1324 1701220 : key.is_rel_block_key() || key.is_slru_block_key()
1325 1701220 : }
1326 :
1327 : /// Initialize a completely new repository.
1328 : ///
1329 : /// This inserts the directory metadata entries that are assumed to
1330 : /// always exist.
1331 412 : pub fn init_empty(&mut self) -> anyhow::Result<()> {
1332 412 : let buf = DbDirectory::ser(&DbDirectory {
1333 412 : dbdirs: HashMap::new(),
1334 412 : })?;
1335 412 : self.pending_directory_entries.push((DirectoryKind::Db, 0));
1336 412 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1337 :
1338 412 : let buf = if self.tline.pg_version >= 17 {
1339 0 : TwoPhaseDirectoryV17::ser(&TwoPhaseDirectoryV17 {
1340 0 : xids: HashSet::new(),
1341 0 : })
1342 : } else {
1343 412 : TwoPhaseDirectory::ser(&TwoPhaseDirectory {
1344 412 : xids: HashSet::new(),
1345 412 : })
1346 0 : }?;
1347 412 : self.pending_directory_entries
1348 412 : .push((DirectoryKind::TwoPhase, 0));
1349 412 : self.put(TWOPHASEDIR_KEY, Value::Image(buf.into()));
1350 :
1351 412 : let buf: Bytes = SlruSegmentDirectory::ser(&SlruSegmentDirectory::default())?.into();
1352 412 : let empty_dir = Value::Image(buf);
1353 412 :
1354 412 : // Initialize SLRUs on shard 0 only: creating these on other shards would be
1355 412 : // harmless but they'd just be dropped on later compaction.
1356 412 : if self.tline.tenant_shard_id.is_shard_zero() {
1357 400 : self.put(slru_dir_to_key(SlruKind::Clog), empty_dir.clone());
1358 400 : self.pending_directory_entries
1359 400 : .push((DirectoryKind::SlruSegment(SlruKind::Clog), 0));
1360 400 : self.put(
1361 400 : slru_dir_to_key(SlruKind::MultiXactMembers),
1362 400 : empty_dir.clone(),
1363 400 : );
1364 400 : self.pending_directory_entries
1365 400 : .push((DirectoryKind::SlruSegment(SlruKind::Clog), 0));
1366 400 : self.put(slru_dir_to_key(SlruKind::MultiXactOffsets), empty_dir);
1367 400 : self.pending_directory_entries
1368 400 : .push((DirectoryKind::SlruSegment(SlruKind::MultiXactOffsets), 0));
1369 400 : }
1370 :
1371 412 : Ok(())
1372 412 : }
1373 :
1374 : #[cfg(test)]
1375 408 : pub fn init_empty_test_timeline(&mut self) -> anyhow::Result<()> {
1376 408 : self.init_empty()?;
1377 408 : self.put_control_file(bytes::Bytes::from_static(
1378 408 : b"control_file contents do not matter",
1379 408 : ))
1380 408 : .context("put_control_file")?;
1381 408 : self.put_checkpoint(bytes::Bytes::from_static(
1382 408 : b"checkpoint_file contents do not matter",
1383 408 : ))
1384 408 : .context("put_checkpoint_file")?;
1385 408 : Ok(())
1386 408 : }
1387 :
1388 : /// Creates a relation if it is not already present.
1389 : /// Returns the current size of the relation
1390 836112 : pub(crate) async fn create_relation_if_required(
1391 836112 : &mut self,
1392 836112 : rel: RelTag,
1393 836112 : ctx: &RequestContext,
1394 836112 : ) -> Result<u32, PageReconstructError> {
1395 : // Get current size and put rel creation if rel doesn't exist
1396 : //
1397 : // NOTE: we check the cache first even though get_rel_exists and get_rel_size would
1398 : // check the cache too. This is because eagerly checking the cache results in
1399 : // less work overall and 10% better performance. It's more work on cache miss
1400 : // but cache miss is rare.
1401 836112 : if let Some(nblocks) = self.tline.get_cached_rel_size(&rel, self.get_lsn()) {
1402 836092 : Ok(nblocks)
1403 20 : } else if !self
1404 20 : .tline
1405 20 : .get_rel_exists(rel, Version::Modified(self), ctx)
1406 20 : .await?
1407 : {
1408 : // create it with 0 size initially, the logic below will extend it
1409 20 : self.put_rel_creation(rel, 0, ctx)
1410 20 : .await
1411 20 : .context("Relation Error")?;
1412 20 : Ok(0)
1413 : } else {
1414 0 : self.tline
1415 0 : .get_rel_size(rel, Version::Modified(self), ctx)
1416 0 : .await
1417 : }
1418 836112 : }
1419 :
1420 : /// Given a block number for a relation (which represents a newly written block),
1421 : /// the previous block count of the relation, and the shard info, find the gaps
1422 : /// that were created by the newly written block if any.
1423 291340 : fn find_gaps(
1424 291340 : rel: RelTag,
1425 291340 : blkno: u32,
1426 291340 : previous_nblocks: u32,
1427 291340 : shard: &ShardIdentity,
1428 291340 : ) -> Option<KeySpace> {
1429 291340 : let mut key = rel_block_to_key(rel, blkno);
1430 291340 : let mut gap_accum = None;
1431 :
1432 291340 : for gap_blkno in previous_nblocks..blkno {
1433 64 : key.field6 = gap_blkno;
1434 64 :
1435 64 : if shard.get_shard_number(&key) != shard.number {
1436 16 : continue;
1437 48 : }
1438 48 :
1439 48 : gap_accum
1440 48 : .get_or_insert_with(KeySpaceAccum::new)
1441 48 : .add_key(key);
1442 : }
1443 :
1444 291340 : gap_accum.map(|accum| accum.to_keyspace())
1445 291340 : }
1446 :
1447 291704 : pub async fn ingest_batch(
1448 291704 : &mut self,
1449 291704 : mut batch: SerializedValueBatch,
1450 291704 : // TODO(vlad): remove this argument and replace the shard check with is_key_local
1451 291704 : shard: &ShardIdentity,
1452 291704 : ctx: &RequestContext,
1453 291704 : ) -> anyhow::Result<()> {
1454 291704 : let mut gaps_at_lsns = Vec::default();
1455 :
1456 291704 : for meta in batch.metadata.iter() {
1457 291284 : let (rel, blkno) = Key::from_compact(meta.key()).to_rel_block()?;
1458 291284 : let new_nblocks = blkno + 1;
1459 :
1460 291284 : let old_nblocks = self.create_relation_if_required(rel, ctx).await?;
1461 291284 : if new_nblocks > old_nblocks {
1462 4780 : self.put_rel_extend(rel, new_nblocks, ctx).await?;
1463 286504 : }
1464 :
1465 291284 : if let Some(gaps) = Self::find_gaps(rel, blkno, old_nblocks, shard) {
1466 0 : gaps_at_lsns.push((gaps, meta.lsn()));
1467 291284 : }
1468 : }
1469 :
1470 291704 : if !gaps_at_lsns.is_empty() {
1471 0 : batch.zero_gaps(gaps_at_lsns);
1472 291704 : }
1473 :
1474 291704 : match self.pending_data_batch.as_mut() {
1475 40 : Some(pending_batch) => {
1476 40 : pending_batch.extend(batch);
1477 40 : }
1478 291664 : None if batch.has_data() => {
1479 291260 : self.pending_data_batch = Some(batch);
1480 291260 : }
1481 404 : None => {
1482 404 : // Nothing to initialize the batch with
1483 404 : }
1484 : }
1485 :
1486 291704 : Ok(())
1487 291704 : }
1488 :
1489 : /// Put a new page version that can be constructed from a WAL record
1490 : ///
1491 : /// NOTE: this will *not* implicitly extend the relation, if the page is beyond the
1492 : /// current end-of-file. It's up to the caller to check that the relation size
1493 : /// matches the blocks inserted!
1494 24 : pub fn put_rel_wal_record(
1495 24 : &mut self,
1496 24 : rel: RelTag,
1497 24 : blknum: BlockNumber,
1498 24 : rec: NeonWalRecord,
1499 24 : ) -> anyhow::Result<()> {
1500 24 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1501 24 : self.put(rel_block_to_key(rel, blknum), Value::WalRecord(rec));
1502 24 : Ok(())
1503 24 : }
1504 :
1505 : // Same, but for an SLRU.
1506 16 : pub fn put_slru_wal_record(
1507 16 : &mut self,
1508 16 : kind: SlruKind,
1509 16 : segno: u32,
1510 16 : blknum: BlockNumber,
1511 16 : rec: NeonWalRecord,
1512 16 : ) -> anyhow::Result<()> {
1513 16 : if !self.tline.tenant_shard_id.is_shard_zero() {
1514 0 : return Ok(());
1515 16 : }
1516 16 :
1517 16 : self.put(
1518 16 : slru_block_to_key(kind, segno, blknum),
1519 16 : Value::WalRecord(rec),
1520 16 : );
1521 16 : Ok(())
1522 16 : }
1523 :
1524 : /// Like put_wal_record, but with ready-made image of the page.
1525 555684 : pub fn put_rel_page_image(
1526 555684 : &mut self,
1527 555684 : rel: RelTag,
1528 555684 : blknum: BlockNumber,
1529 555684 : img: Bytes,
1530 555684 : ) -> anyhow::Result<()> {
1531 555684 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1532 555684 : let key = rel_block_to_key(rel, blknum);
1533 555684 : if !key.is_valid_key_on_write_path() {
1534 0 : anyhow::bail!(
1535 0 : "the request contains data not supported by pageserver at {}",
1536 0 : key
1537 0 : );
1538 555684 : }
1539 555684 : self.put(rel_block_to_key(rel, blknum), Value::Image(img));
1540 555684 : Ok(())
1541 555684 : }
1542 :
1543 12 : pub fn put_slru_page_image(
1544 12 : &mut self,
1545 12 : kind: SlruKind,
1546 12 : segno: u32,
1547 12 : blknum: BlockNumber,
1548 12 : img: Bytes,
1549 12 : ) -> anyhow::Result<()> {
1550 12 : assert!(self.tline.tenant_shard_id.is_shard_zero());
1551 :
1552 12 : let key = slru_block_to_key(kind, segno, blknum);
1553 12 : if !key.is_valid_key_on_write_path() {
1554 0 : anyhow::bail!(
1555 0 : "the request contains data not supported by pageserver at {}",
1556 0 : key
1557 0 : );
1558 12 : }
1559 12 : self.put(key, Value::Image(img));
1560 12 : Ok(())
1561 12 : }
1562 :
1563 5996 : pub(crate) fn put_rel_page_image_zero(
1564 5996 : &mut self,
1565 5996 : rel: RelTag,
1566 5996 : blknum: BlockNumber,
1567 5996 : ) -> anyhow::Result<()> {
1568 5996 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1569 5996 : let key = rel_block_to_key(rel, blknum);
1570 5996 : if !key.is_valid_key_on_write_path() {
1571 0 : anyhow::bail!(
1572 0 : "the request contains data not supported by pageserver: {} @ {}",
1573 0 : key,
1574 0 : self.lsn
1575 0 : );
1576 5996 : }
1577 5996 :
1578 5996 : let batch = self
1579 5996 : .pending_data_batch
1580 5996 : .get_or_insert_with(SerializedValueBatch::default);
1581 5996 :
1582 5996 : batch.put(key.to_compact(), Value::Image(ZERO_PAGE.clone()), self.lsn);
1583 5996 :
1584 5996 : Ok(())
1585 5996 : }
1586 :
1587 0 : pub(crate) fn put_slru_page_image_zero(
1588 0 : &mut self,
1589 0 : kind: SlruKind,
1590 0 : segno: u32,
1591 0 : blknum: BlockNumber,
1592 0 : ) -> anyhow::Result<()> {
1593 0 : assert!(self.tline.tenant_shard_id.is_shard_zero());
1594 0 : let key = slru_block_to_key(kind, segno, blknum);
1595 0 : if !key.is_valid_key_on_write_path() {
1596 0 : anyhow::bail!(
1597 0 : "the request contains data not supported by pageserver: {} @ {}",
1598 0 : key,
1599 0 : self.lsn
1600 0 : );
1601 0 : }
1602 0 :
1603 0 : let batch = self
1604 0 : .pending_data_batch
1605 0 : .get_or_insert_with(SerializedValueBatch::default);
1606 0 :
1607 0 : batch.put(key.to_compact(), Value::Image(ZERO_PAGE.clone()), self.lsn);
1608 0 :
1609 0 : Ok(())
1610 0 : }
1611 :
1612 : /// Store a relmapper file (pg_filenode.map) in the repository
1613 32 : pub async fn put_relmap_file(
1614 32 : &mut self,
1615 32 : spcnode: Oid,
1616 32 : dbnode: Oid,
1617 32 : img: Bytes,
1618 32 : ctx: &RequestContext,
1619 32 : ) -> anyhow::Result<()> {
1620 : // Add it to the directory (if it doesn't exist already)
1621 32 : let buf = self.get(DBDIR_KEY, ctx).await?;
1622 32 : let mut dbdir = DbDirectory::des(&buf)?;
1623 :
1624 32 : let r = dbdir.dbdirs.insert((spcnode, dbnode), true);
1625 32 : if r.is_none() || r == Some(false) {
1626 : // The dbdir entry didn't exist, or it contained a
1627 : // 'false'. The 'insert' call already updated it with
1628 : // 'true', now write the updated 'dbdirs' map back.
1629 32 : let buf = DbDirectory::ser(&dbdir)?;
1630 32 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1631 0 : }
1632 32 : if r.is_none() {
1633 : // Create RelDirectory
1634 16 : let buf = RelDirectory::ser(&RelDirectory {
1635 16 : rels: HashSet::new(),
1636 16 : })?;
1637 16 : self.pending_directory_entries.push((DirectoryKind::Rel, 0));
1638 16 : self.put(
1639 16 : rel_dir_to_key(spcnode, dbnode),
1640 16 : Value::Image(Bytes::from(buf)),
1641 16 : );
1642 16 : }
1643 :
1644 32 : self.put(relmap_file_key(spcnode, dbnode), Value::Image(img));
1645 32 : Ok(())
1646 32 : }
1647 :
1648 0 : pub async fn put_twophase_file(
1649 0 : &mut self,
1650 0 : xid: u64,
1651 0 : img: Bytes,
1652 0 : ctx: &RequestContext,
1653 0 : ) -> anyhow::Result<()> {
1654 : // Add it to the directory entry
1655 0 : let dirbuf = self.get(TWOPHASEDIR_KEY, ctx).await?;
1656 0 : let newdirbuf = if self.tline.pg_version >= 17 {
1657 0 : let mut dir = TwoPhaseDirectoryV17::des(&dirbuf)?;
1658 0 : if !dir.xids.insert(xid) {
1659 0 : anyhow::bail!("twophase file for xid {} already exists", xid);
1660 0 : }
1661 0 : self.pending_directory_entries
1662 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
1663 0 : Bytes::from(TwoPhaseDirectoryV17::ser(&dir)?)
1664 : } else {
1665 0 : let xid = xid as u32;
1666 0 : let mut dir = TwoPhaseDirectory::des(&dirbuf)?;
1667 0 : if !dir.xids.insert(xid) {
1668 0 : anyhow::bail!("twophase file for xid {} already exists", xid);
1669 0 : }
1670 0 : self.pending_directory_entries
1671 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
1672 0 : Bytes::from(TwoPhaseDirectory::ser(&dir)?)
1673 : };
1674 0 : self.put(TWOPHASEDIR_KEY, Value::Image(newdirbuf));
1675 0 :
1676 0 : self.put(twophase_file_key(xid), Value::Image(img));
1677 0 : Ok(())
1678 0 : }
1679 :
1680 0 : pub async fn set_replorigin(
1681 0 : &mut self,
1682 0 : origin_id: RepOriginId,
1683 0 : origin_lsn: Lsn,
1684 0 : ) -> anyhow::Result<()> {
1685 0 : let key = repl_origin_key(origin_id);
1686 0 : self.put(key, Value::Image(origin_lsn.ser().unwrap().into()));
1687 0 : Ok(())
1688 0 : }
1689 :
1690 0 : pub async fn drop_replorigin(&mut self, origin_id: RepOriginId) -> anyhow::Result<()> {
1691 0 : self.set_replorigin(origin_id, Lsn::INVALID).await
1692 0 : }
1693 :
1694 412 : pub fn put_control_file(&mut self, img: Bytes) -> anyhow::Result<()> {
1695 412 : self.put(CONTROLFILE_KEY, Value::Image(img));
1696 412 : Ok(())
1697 412 : }
1698 :
1699 440 : pub fn put_checkpoint(&mut self, img: Bytes) -> anyhow::Result<()> {
1700 440 : self.put(CHECKPOINT_KEY, Value::Image(img));
1701 440 : Ok(())
1702 440 : }
1703 :
1704 0 : pub async fn drop_dbdir(
1705 0 : &mut self,
1706 0 : spcnode: Oid,
1707 0 : dbnode: Oid,
1708 0 : ctx: &RequestContext,
1709 0 : ) -> anyhow::Result<()> {
1710 0 : let total_blocks = self
1711 0 : .tline
1712 0 : .get_db_size(spcnode, dbnode, Version::Modified(self), ctx)
1713 0 : .await?;
1714 :
1715 : // Remove entry from dbdir
1716 0 : let buf = self.get(DBDIR_KEY, ctx).await?;
1717 0 : let mut dir = DbDirectory::des(&buf)?;
1718 0 : if dir.dbdirs.remove(&(spcnode, dbnode)).is_some() {
1719 0 : let buf = DbDirectory::ser(&dir)?;
1720 0 : self.pending_directory_entries
1721 0 : .push((DirectoryKind::Db, dir.dbdirs.len()));
1722 0 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1723 : } else {
1724 0 : warn!(
1725 0 : "dropped dbdir for spcnode {} dbnode {} did not exist in db directory",
1726 : spcnode, dbnode
1727 : );
1728 : }
1729 :
1730 : // Update logical database size.
1731 0 : self.pending_nblocks -= total_blocks as i64;
1732 0 :
1733 0 : // Delete all relations and metadata files for the spcnode/dnode
1734 0 : self.delete(dbdir_key_range(spcnode, dbnode));
1735 0 : Ok(())
1736 0 : }
1737 :
1738 : /// Create a relation fork.
1739 : ///
1740 : /// 'nblocks' is the initial size.
1741 3840 : pub async fn put_rel_creation(
1742 3840 : &mut self,
1743 3840 : rel: RelTag,
1744 3840 : nblocks: BlockNumber,
1745 3840 : ctx: &RequestContext,
1746 3840 : ) -> Result<(), RelationError> {
1747 3840 : if rel.relnode == 0 {
1748 0 : return Err(RelationError::InvalidRelnode);
1749 3840 : }
1750 : // It's possible that this is the first rel for this db in this
1751 : // tablespace. Create the reldir entry for it if so.
1752 3840 : let mut dbdir = DbDirectory::des(&self.get(DBDIR_KEY, ctx).await.context("read db")?)
1753 3840 : .context("deserialize db")?;
1754 3840 : let rel_dir_key = rel_dir_to_key(rel.spcnode, rel.dbnode);
1755 3840 : let mut rel_dir =
1756 3840 : if let hash_map::Entry::Vacant(e) = dbdir.dbdirs.entry((rel.spcnode, rel.dbnode)) {
1757 : // Didn't exist. Update dbdir
1758 16 : e.insert(false);
1759 16 : let buf = DbDirectory::ser(&dbdir).context("serialize db")?;
1760 16 : self.pending_directory_entries
1761 16 : .push((DirectoryKind::Db, dbdir.dbdirs.len()));
1762 16 : self.put(DBDIR_KEY, Value::Image(buf.into()));
1763 16 :
1764 16 : // and create the RelDirectory
1765 16 : RelDirectory::default()
1766 : } else {
1767 : // reldir already exists, fetch it
1768 3824 : RelDirectory::des(&self.get(rel_dir_key, ctx).await.context("read db")?)
1769 3824 : .context("deserialize db")?
1770 : };
1771 :
1772 : // Add the new relation to the rel directory entry, and write it back
1773 3840 : if !rel_dir.rels.insert((rel.relnode, rel.forknum)) {
1774 0 : return Err(RelationError::AlreadyExists);
1775 3840 : }
1776 3840 :
1777 3840 : self.pending_directory_entries
1778 3840 : .push((DirectoryKind::Rel, rel_dir.rels.len()));
1779 3840 :
1780 3840 : self.put(
1781 3840 : rel_dir_key,
1782 3840 : Value::Image(Bytes::from(
1783 3840 : RelDirectory::ser(&rel_dir).context("serialize")?,
1784 : )),
1785 : );
1786 :
1787 : // Put size
1788 3840 : let size_key = rel_size_to_key(rel);
1789 3840 : let buf = nblocks.to_le_bytes();
1790 3840 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1791 3840 :
1792 3840 : self.pending_nblocks += nblocks as i64;
1793 3840 :
1794 3840 : // Update relation size cache
1795 3840 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1796 3840 :
1797 3840 : // Even if nblocks > 0, we don't insert any actual blocks here. That's up to the
1798 3840 : // caller.
1799 3840 : Ok(())
1800 3840 : }
1801 :
1802 : /// Truncate relation
1803 12024 : pub async fn put_rel_truncation(
1804 12024 : &mut self,
1805 12024 : rel: RelTag,
1806 12024 : nblocks: BlockNumber,
1807 12024 : ctx: &RequestContext,
1808 12024 : ) -> anyhow::Result<()> {
1809 12024 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1810 12024 : if self
1811 12024 : .tline
1812 12024 : .get_rel_exists(rel, Version::Modified(self), ctx)
1813 12024 : .await?
1814 : {
1815 12024 : let size_key = rel_size_to_key(rel);
1816 : // Fetch the old size first
1817 12024 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
1818 12024 :
1819 12024 : // Update the entry with the new size.
1820 12024 : let buf = nblocks.to_le_bytes();
1821 12024 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1822 12024 :
1823 12024 : // Update relation size cache
1824 12024 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1825 12024 :
1826 12024 : // Update logical database size.
1827 12024 : self.pending_nblocks -= old_size as i64 - nblocks as i64;
1828 0 : }
1829 12024 : Ok(())
1830 12024 : }
1831 :
1832 : /// Extend relation
1833 : /// If new size is smaller, do nothing.
1834 553360 : pub async fn put_rel_extend(
1835 553360 : &mut self,
1836 553360 : rel: RelTag,
1837 553360 : nblocks: BlockNumber,
1838 553360 : ctx: &RequestContext,
1839 553360 : ) -> anyhow::Result<()> {
1840 553360 : anyhow::ensure!(rel.relnode != 0, RelationError::InvalidRelnode);
1841 :
1842 : // Put size
1843 553360 : let size_key = rel_size_to_key(rel);
1844 553360 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
1845 553360 :
1846 553360 : // only extend relation here. never decrease the size
1847 553360 : if nblocks > old_size {
1848 549576 : let buf = nblocks.to_le_bytes();
1849 549576 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1850 549576 :
1851 549576 : // Update relation size cache
1852 549576 : self.tline.set_cached_rel_size(rel, self.lsn, nblocks);
1853 549576 :
1854 549576 : self.pending_nblocks += nblocks as i64 - old_size as i64;
1855 549576 : }
1856 553360 : Ok(())
1857 553360 : }
1858 :
1859 : /// Drop some relations
1860 20 : pub(crate) async fn put_rel_drops(
1861 20 : &mut self,
1862 20 : drop_relations: HashMap<(u32, u32), Vec<RelTag>>,
1863 20 : ctx: &RequestContext,
1864 20 : ) -> anyhow::Result<()> {
1865 24 : for ((spc_node, db_node), rel_tags) in drop_relations {
1866 4 : let dir_key = rel_dir_to_key(spc_node, db_node);
1867 4 : let buf = self.get(dir_key, ctx).await?;
1868 4 : let mut dir = RelDirectory::des(&buf)?;
1869 :
1870 4 : let mut dirty = false;
1871 8 : for rel_tag in rel_tags {
1872 4 : if dir.rels.remove(&(rel_tag.relnode, rel_tag.forknum)) {
1873 4 : dirty = true;
1874 4 :
1875 4 : // update logical size
1876 4 : let size_key = rel_size_to_key(rel_tag);
1877 4 : let old_size = self.get(size_key, ctx).await?.get_u32_le();
1878 4 : self.pending_nblocks -= old_size as i64;
1879 4 :
1880 4 : // Remove entry from relation size cache
1881 4 : self.tline.remove_cached_rel_size(&rel_tag);
1882 4 :
1883 4 : // Delete size entry, as well as all blocks
1884 4 : self.delete(rel_key_range(rel_tag));
1885 0 : }
1886 : }
1887 :
1888 4 : if dirty {
1889 4 : self.put(dir_key, Value::Image(Bytes::from(RelDirectory::ser(&dir)?)));
1890 4 : self.pending_directory_entries
1891 4 : .push((DirectoryKind::Rel, dir.rels.len()));
1892 0 : }
1893 : }
1894 :
1895 20 : Ok(())
1896 20 : }
1897 :
1898 12 : pub async fn put_slru_segment_creation(
1899 12 : &mut self,
1900 12 : kind: SlruKind,
1901 12 : segno: u32,
1902 12 : nblocks: BlockNumber,
1903 12 : ctx: &RequestContext,
1904 12 : ) -> anyhow::Result<()> {
1905 12 : assert!(self.tline.tenant_shard_id.is_shard_zero());
1906 :
1907 : // Add it to the directory entry
1908 12 : let dir_key = slru_dir_to_key(kind);
1909 12 : let buf = self.get(dir_key, ctx).await?;
1910 12 : let mut dir = SlruSegmentDirectory::des(&buf)?;
1911 :
1912 12 : if !dir.segments.insert(segno) {
1913 0 : anyhow::bail!("slru segment {kind:?}/{segno} already exists");
1914 12 : }
1915 12 : self.pending_directory_entries
1916 12 : .push((DirectoryKind::SlruSegment(kind), dir.segments.len()));
1917 12 : self.put(
1918 12 : dir_key,
1919 12 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
1920 : );
1921 :
1922 : // Put size
1923 12 : let size_key = slru_segment_size_to_key(kind, segno);
1924 12 : let buf = nblocks.to_le_bytes();
1925 12 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1926 12 :
1927 12 : // even if nblocks > 0, we don't insert any actual blocks here
1928 12 :
1929 12 : Ok(())
1930 12 : }
1931 :
1932 : /// Extend SLRU segment
1933 0 : pub fn put_slru_extend(
1934 0 : &mut self,
1935 0 : kind: SlruKind,
1936 0 : segno: u32,
1937 0 : nblocks: BlockNumber,
1938 0 : ) -> anyhow::Result<()> {
1939 0 : assert!(self.tline.tenant_shard_id.is_shard_zero());
1940 :
1941 : // Put size
1942 0 : let size_key = slru_segment_size_to_key(kind, segno);
1943 0 : let buf = nblocks.to_le_bytes();
1944 0 : self.put(size_key, Value::Image(Bytes::from(buf.to_vec())));
1945 0 : Ok(())
1946 0 : }
1947 :
1948 : /// This method is used for marking truncated SLRU files
1949 0 : pub async fn drop_slru_segment(
1950 0 : &mut self,
1951 0 : kind: SlruKind,
1952 0 : segno: u32,
1953 0 : ctx: &RequestContext,
1954 0 : ) -> anyhow::Result<()> {
1955 0 : // Remove it from the directory entry
1956 0 : let dir_key = slru_dir_to_key(kind);
1957 0 : let buf = self.get(dir_key, ctx).await?;
1958 0 : let mut dir = SlruSegmentDirectory::des(&buf)?;
1959 :
1960 0 : if !dir.segments.remove(&segno) {
1961 0 : warn!("slru segment {:?}/{} does not exist", kind, segno);
1962 0 : }
1963 0 : self.pending_directory_entries
1964 0 : .push((DirectoryKind::SlruSegment(kind), dir.segments.len()));
1965 0 : self.put(
1966 0 : dir_key,
1967 0 : Value::Image(Bytes::from(SlruSegmentDirectory::ser(&dir)?)),
1968 : );
1969 :
1970 : // Delete size entry, as well as all blocks
1971 0 : self.delete(slru_segment_key_range(kind, segno));
1972 0 :
1973 0 : Ok(())
1974 0 : }
1975 :
1976 : /// Drop a relmapper file (pg_filenode.map)
1977 0 : pub fn drop_relmap_file(&mut self, _spcnode: Oid, _dbnode: Oid) -> anyhow::Result<()> {
1978 0 : // TODO
1979 0 : Ok(())
1980 0 : }
1981 :
1982 : /// This method is used for marking truncated SLRU files
1983 0 : pub async fn drop_twophase_file(
1984 0 : &mut self,
1985 0 : xid: u64,
1986 0 : ctx: &RequestContext,
1987 0 : ) -> anyhow::Result<()> {
1988 : // Remove it from the directory entry
1989 0 : let buf = self.get(TWOPHASEDIR_KEY, ctx).await?;
1990 0 : let newdirbuf = if self.tline.pg_version >= 17 {
1991 0 : let mut dir = TwoPhaseDirectoryV17::des(&buf)?;
1992 :
1993 0 : if !dir.xids.remove(&xid) {
1994 0 : warn!("twophase file for xid {} does not exist", xid);
1995 0 : }
1996 0 : self.pending_directory_entries
1997 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
1998 0 : Bytes::from(TwoPhaseDirectoryV17::ser(&dir)?)
1999 : } else {
2000 0 : let xid: u32 = u32::try_from(xid)?;
2001 0 : let mut dir = TwoPhaseDirectory::des(&buf)?;
2002 :
2003 0 : if !dir.xids.remove(&xid) {
2004 0 : warn!("twophase file for xid {} does not exist", xid);
2005 0 : }
2006 0 : self.pending_directory_entries
2007 0 : .push((DirectoryKind::TwoPhase, dir.xids.len()));
2008 0 : Bytes::from(TwoPhaseDirectory::ser(&dir)?)
2009 : };
2010 0 : self.put(TWOPHASEDIR_KEY, Value::Image(newdirbuf));
2011 0 :
2012 0 : // Delete it
2013 0 : self.delete(twophase_key_range(xid));
2014 0 :
2015 0 : Ok(())
2016 0 : }
2017 :
2018 32 : pub async fn put_file(
2019 32 : &mut self,
2020 32 : path: &str,
2021 32 : content: &[u8],
2022 32 : ctx: &RequestContext,
2023 32 : ) -> anyhow::Result<()> {
2024 32 : let key = aux_file::encode_aux_file_key(path);
2025 : // retrieve the key from the engine
2026 32 : let old_val = match self.get(key, ctx).await {
2027 8 : Ok(val) => Some(val),
2028 24 : Err(PageReconstructError::MissingKey(_)) => None,
2029 0 : Err(e) => return Err(e.into()),
2030 : };
2031 32 : let files: Vec<(&str, &[u8])> = if let Some(ref old_val) = old_val {
2032 8 : aux_file::decode_file_value(old_val)?
2033 : } else {
2034 24 : Vec::new()
2035 : };
2036 32 : let mut other_files = Vec::with_capacity(files.len());
2037 32 : let mut modifying_file = None;
2038 40 : for file @ (p, content) in files {
2039 8 : if path == p {
2040 8 : assert!(
2041 8 : modifying_file.is_none(),
2042 0 : "duplicated entries found for {}",
2043 : path
2044 : );
2045 8 : modifying_file = Some(content);
2046 0 : } else {
2047 0 : other_files.push(file);
2048 0 : }
2049 : }
2050 32 : let mut new_files = other_files;
2051 32 : match (modifying_file, content.is_empty()) {
2052 4 : (Some(old_content), false) => {
2053 4 : self.tline
2054 4 : .aux_file_size_estimator
2055 4 : .on_update(old_content.len(), content.len());
2056 4 : new_files.push((path, content));
2057 4 : }
2058 4 : (Some(old_content), true) => {
2059 4 : self.tline
2060 4 : .aux_file_size_estimator
2061 4 : .on_remove(old_content.len());
2062 4 : // not adding the file key to the final `new_files` vec.
2063 4 : }
2064 24 : (None, false) => {
2065 24 : self.tline.aux_file_size_estimator.on_add(content.len());
2066 24 : new_files.push((path, content));
2067 24 : }
2068 0 : (None, true) => warn!("removing non-existing aux file: {}", path),
2069 : }
2070 32 : let new_val = aux_file::encode_file_value(&new_files)?;
2071 32 : self.put(key, Value::Image(new_val.into()));
2072 32 :
2073 32 : Ok(())
2074 32 : }
2075 :
2076 : ///
2077 : /// Flush changes accumulated so far to the underlying repository.
2078 : ///
2079 : /// Usually, changes made in DatadirModification are atomic, but this allows
2080 : /// you to flush them to the underlying repository before the final `commit`.
2081 : /// That allows to free up the memory used to hold the pending changes.
2082 : ///
2083 : /// Currently only used during bulk import of a data directory. In that
2084 : /// context, breaking the atomicity is OK. If the import is interrupted, the
2085 : /// whole import fails and the timeline will be deleted anyway.
2086 : /// (Or to be precise, it will be left behind for debugging purposes and
2087 : /// ignored, see <https://github.com/neondatabase/neon/pull/1809>)
2088 : ///
2089 : /// Note: A consequence of flushing the pending operations is that they
2090 : /// won't be visible to subsequent operations until `commit`. The function
2091 : /// retains all the metadata, but data pages are flushed. That's again OK
2092 : /// for bulk import, where you are just loading data pages and won't try to
2093 : /// modify the same pages twice.
2094 3860 : pub(crate) async fn flush(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
2095 3860 : // Unless we have accumulated a decent amount of changes, it's not worth it
2096 3860 : // to scan through the pending_updates list.
2097 3860 : let pending_nblocks = self.pending_nblocks;
2098 3860 : if pending_nblocks < 10000 {
2099 3860 : return Ok(());
2100 0 : }
2101 :
2102 0 : let mut writer = self.tline.writer().await;
2103 :
2104 : // Flush relation and SLRU data blocks, keep metadata.
2105 0 : if let Some(batch) = self.pending_data_batch.take() {
2106 0 : tracing::debug!(
2107 0 : "Flushing batch with max_lsn={}. Last record LSN is {}",
2108 0 : batch.max_lsn,
2109 0 : self.tline.get_last_record_lsn()
2110 : );
2111 :
2112 : // This bails out on first error without modifying pending_updates.
2113 : // That's Ok, cf this function's doc comment.
2114 0 : writer.put_batch(batch, ctx).await?;
2115 0 : }
2116 :
2117 0 : if pending_nblocks != 0 {
2118 0 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
2119 0 : self.pending_nblocks = 0;
2120 0 : }
2121 :
2122 0 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
2123 0 : writer.update_directory_entries_count(kind, count as u64);
2124 0 : }
2125 :
2126 0 : Ok(())
2127 3860 : }
2128 :
2129 : ///
2130 : /// Finish this atomic update, writing all the updated keys to the
2131 : /// underlying timeline.
2132 : /// All the modifications in this atomic update are stamped by the specified LSN.
2133 : ///
2134 1486184 : pub async fn commit(&mut self, ctx: &RequestContext) -> anyhow::Result<()> {
2135 1486184 : let mut writer = self.tline.writer().await;
2136 :
2137 1486184 : let pending_nblocks = self.pending_nblocks;
2138 1486184 : self.pending_nblocks = 0;
2139 :
2140 : // Ordering: the items in this batch do not need to be in any global order, but values for
2141 : // a particular Key must be in Lsn order relative to one another. InMemoryLayer relies on
2142 : // this to do efficient updates to its index. See [`wal_decoder::serialized_batch`] for
2143 : // more details.
2144 :
2145 1486184 : let metadata_batch = {
2146 1486184 : let pending_meta = self
2147 1486184 : .pending_metadata_pages
2148 1486184 : .drain()
2149 1486184 : .flat_map(|(key, values)| {
2150 548008 : values
2151 548008 : .into_iter()
2152 548008 : .map(move |(lsn, value_size, value)| (key, lsn, value_size, value))
2153 1486184 : })
2154 1486184 : .collect::<Vec<_>>();
2155 1486184 :
2156 1486184 : if pending_meta.is_empty() {
2157 944556 : None
2158 : } else {
2159 541628 : Some(SerializedValueBatch::from_values(pending_meta))
2160 : }
2161 : };
2162 :
2163 1486184 : let data_batch = self.pending_data_batch.take();
2164 :
2165 1486184 : let maybe_batch = match (data_batch, metadata_batch) {
2166 529112 : (Some(mut data), Some(metadata)) => {
2167 529112 : data.extend(metadata);
2168 529112 : Some(data)
2169 : }
2170 286524 : (Some(data), None) => Some(data),
2171 12516 : (None, Some(metadata)) => Some(metadata),
2172 658032 : (None, None) => None,
2173 : };
2174 :
2175 1486184 : if let Some(batch) = maybe_batch {
2176 828152 : tracing::debug!(
2177 0 : "Flushing batch with max_lsn={}. Last record LSN is {}",
2178 0 : batch.max_lsn,
2179 0 : self.tline.get_last_record_lsn()
2180 : );
2181 :
2182 : // This bails out on first error without modifying pending_updates.
2183 : // That's Ok, cf this function's doc comment.
2184 828152 : writer.put_batch(batch, ctx).await?;
2185 658032 : }
2186 :
2187 1486184 : if !self.pending_deletions.is_empty() {
2188 4 : writer.delete_batch(&self.pending_deletions, ctx).await?;
2189 4 : self.pending_deletions.clear();
2190 1486180 : }
2191 :
2192 1486184 : self.pending_lsns.push(self.lsn);
2193 1777900 : for pending_lsn in self.pending_lsns.drain(..) {
2194 1777900 : // TODO(vlad): pretty sure the comment below is not valid anymore
2195 1777900 : // and we can call finish write with the latest LSN
2196 1777900 : //
2197 1777900 : // Ideally, we should be able to call writer.finish_write() only once
2198 1777900 : // with the highest LSN. However, the last_record_lsn variable in the
2199 1777900 : // timeline keeps track of the latest LSN and the immediate previous LSN
2200 1777900 : // so we need to record every LSN to not leave a gap between them.
2201 1777900 : writer.finish_write(pending_lsn);
2202 1777900 : }
2203 :
2204 1486184 : if pending_nblocks != 0 {
2205 541140 : writer.update_current_logical_size(pending_nblocks * i64::from(BLCKSZ));
2206 945044 : }
2207 :
2208 1486184 : for (kind, count) in std::mem::take(&mut self.pending_directory_entries) {
2209 5912 : writer.update_directory_entries_count(kind, count as u64);
2210 5912 : }
2211 :
2212 1486184 : self.pending_metadata_bytes = 0;
2213 1486184 :
2214 1486184 : Ok(())
2215 1486184 : }
2216 :
2217 583408 : pub(crate) fn len(&self) -> usize {
2218 583408 : self.pending_metadata_pages.len()
2219 583408 : + self.pending_data_batch.as_ref().map_or(0, |b| b.len())
2220 583408 : + self.pending_deletions.len()
2221 583408 : }
2222 :
2223 : /// Read a page from the Timeline we are writing to. For metadata pages, this passes through
2224 : /// a cache in Self, which makes writes earlier in this modification visible to WAL records later
2225 : /// in the modification.
2226 : ///
2227 : /// For data pages, reads pass directly to the owning Timeline: any ingest code which reads a data
2228 : /// page must ensure that the pages they read are already committed in Timeline, for example
2229 : /// DB create operations are always preceded by a call to commit(). This is special cased because
2230 : /// it's rare: all the 'normal' WAL operations will only read metadata pages such as relation sizes,
2231 : /// and not data pages.
2232 573172 : async fn get(&self, key: Key, ctx: &RequestContext) -> Result<Bytes, PageReconstructError> {
2233 573172 : if !Self::is_data_key(&key) {
2234 : // Have we already updated the same key? Read the latest pending updated
2235 : // version in that case.
2236 : //
2237 : // Note: we don't check pending_deletions. It is an error to request a
2238 : // value that has been removed, deletion only avoids leaking storage.
2239 573172 : if let Some(values) = self.pending_metadata_pages.get(&key.to_compact()) {
2240 31856 : if let Some((_, _, value)) = values.last() {
2241 31856 : return if let Value::Image(img) = value {
2242 31856 : Ok(img.clone())
2243 : } else {
2244 : // Currently, we never need to read back a WAL record that we
2245 : // inserted in the same "transaction". All the metadata updates
2246 : // work directly with Images, and we never need to read actual
2247 : // data pages. We could handle this if we had to, by calling
2248 : // the walredo manager, but let's keep it simple for now.
2249 0 : Err(PageReconstructError::Other(anyhow::anyhow!(
2250 0 : "unexpected pending WAL record"
2251 0 : )))
2252 : };
2253 0 : }
2254 541316 : }
2255 : } else {
2256 : // This is an expensive check, so we only do it in debug mode. If reading a data key,
2257 : // this key should never be present in pending_data_pages. We ensure this by committing
2258 : // modifications before ingesting DB create operations, which are the only kind that reads
2259 : // data pages during ingest.
2260 0 : if cfg!(debug_assertions) {
2261 0 : assert!(!self
2262 0 : .pending_data_batch
2263 0 : .as_ref()
2264 0 : .is_some_and(|b| b.updates_key(&key)));
2265 0 : }
2266 : }
2267 :
2268 : // Metadata page cache miss, or we're reading a data page.
2269 541316 : let lsn = Lsn::max(self.tline.get_last_record_lsn(), self.lsn);
2270 541316 : self.tline.get(key, lsn, ctx).await
2271 573172 : }
2272 :
2273 1128048 : fn put(&mut self, key: Key, val: Value) {
2274 1128048 : if Self::is_data_key(&key) {
2275 555736 : self.put_data(key.to_compact(), val)
2276 : } else {
2277 572312 : self.put_metadata(key.to_compact(), val)
2278 : }
2279 1128048 : }
2280 :
2281 555736 : fn put_data(&mut self, key: CompactKey, val: Value) {
2282 555736 : let batch = self
2283 555736 : .pending_data_batch
2284 555736 : .get_or_insert_with(SerializedValueBatch::default);
2285 555736 : batch.put(key, val, self.lsn);
2286 555736 : }
2287 :
2288 572312 : fn put_metadata(&mut self, key: CompactKey, val: Value) {
2289 572312 : let values = self.pending_metadata_pages.entry(key).or_default();
2290 : // Replace the previous value if it exists at the same lsn
2291 572312 : if let Some((last_lsn, last_value_ser_size, last_value)) = values.last_mut() {
2292 24304 : if *last_lsn == self.lsn {
2293 : // Update the pending_metadata_bytes contribution from this entry, and update the serialized size in place
2294 24304 : self.pending_metadata_bytes -= *last_value_ser_size;
2295 24304 : *last_value_ser_size = val.serialized_size().unwrap() as usize;
2296 24304 : self.pending_metadata_bytes += *last_value_ser_size;
2297 24304 :
2298 24304 : // Use the latest value, this replaces any earlier write to the same (key,lsn), such as much
2299 24304 : // have been generated by synthesized zero page writes prior to the first real write to a page.
2300 24304 : *last_value = val;
2301 24304 : return;
2302 0 : }
2303 548008 : }
2304 :
2305 548008 : let val_serialized_size = val.serialized_size().unwrap() as usize;
2306 548008 : self.pending_metadata_bytes += val_serialized_size;
2307 548008 : values.push((self.lsn, val_serialized_size, val));
2308 548008 :
2309 548008 : if key == CHECKPOINT_KEY.to_compact() {
2310 440 : tracing::debug!("Checkpoint key added to pending with size {val_serialized_size}");
2311 547568 : }
2312 572312 : }
2313 :
2314 4 : fn delete(&mut self, key_range: Range<Key>) {
2315 4 : trace!("DELETE {}-{}", key_range.start, key_range.end);
2316 4 : self.pending_deletions.push((key_range, self.lsn));
2317 4 : }
2318 : }
2319 :
2320 : /// This struct facilitates accessing either a committed key from the timeline at a
2321 : /// specific LSN, or the latest uncommitted key from a pending modification.
2322 : ///
2323 : /// During WAL ingestion, the records from multiple LSNs may be batched in the same
2324 : /// modification before being flushed to the timeline. Hence, the routines in WalIngest
2325 : /// need to look up the keys in the modification first before looking them up in the
2326 : /// timeline to not miss the latest updates.
2327 : #[derive(Clone, Copy)]
2328 : pub enum Version<'a> {
2329 : Lsn(Lsn),
2330 : Modified(&'a DatadirModification<'a>),
2331 : }
2332 :
2333 : impl Version<'_> {
2334 10352 : async fn get(
2335 10352 : &self,
2336 10352 : timeline: &Timeline,
2337 10352 : key: Key,
2338 10352 : ctx: &RequestContext,
2339 10352 : ) -> Result<Bytes, PageReconstructError> {
2340 10352 : match self {
2341 10312 : Version::Lsn(lsn) => timeline.get(key, *lsn, ctx).await,
2342 40 : Version::Modified(modification) => modification.get(key, ctx).await,
2343 : }
2344 10352 : }
2345 :
2346 71240 : fn get_lsn(&self) -> Lsn {
2347 71240 : match self {
2348 59148 : Version::Lsn(lsn) => *lsn,
2349 12092 : Version::Modified(modification) => modification.lsn,
2350 : }
2351 71240 : }
2352 : }
2353 :
2354 : //--- Metadata structs stored in key-value pairs in the repository.
2355 :
2356 0 : #[derive(Debug, Serialize, Deserialize)]
2357 : pub(crate) struct DbDirectory {
2358 : // (spcnode, dbnode) -> (do relmapper and PG_VERSION files exist)
2359 : pub(crate) dbdirs: HashMap<(Oid, Oid), bool>,
2360 : }
2361 :
2362 : // The format of TwoPhaseDirectory changed in PostgreSQL v17, because the filenames of
2363 : // pg_twophase files was expanded from 32-bit XIDs to 64-bit XIDs. Previously, the files
2364 : // were named like "pg_twophase/000002E5", now they're like
2365 : // "pg_twophsae/0000000A000002E4".
2366 :
2367 0 : #[derive(Debug, Serialize, Deserialize)]
2368 : pub(crate) struct TwoPhaseDirectory {
2369 : pub(crate) xids: HashSet<TransactionId>,
2370 : }
2371 :
2372 0 : #[derive(Debug, Serialize, Deserialize)]
2373 : struct TwoPhaseDirectoryV17 {
2374 : xids: HashSet<u64>,
2375 : }
2376 :
2377 0 : #[derive(Debug, Serialize, Deserialize, Default)]
2378 : pub(crate) struct RelDirectory {
2379 : // Set of relations that exist. (relfilenode, forknum)
2380 : //
2381 : // TODO: Store it as a btree or radix tree or something else that spans multiple
2382 : // key-value pairs, if you have a lot of relations
2383 : pub(crate) rels: HashSet<(Oid, u8)>,
2384 : }
2385 :
2386 0 : #[derive(Debug, Serialize, Deserialize)]
2387 : struct RelSizeEntry {
2388 : nblocks: u32,
2389 : }
2390 :
2391 0 : #[derive(Debug, Serialize, Deserialize, Default)]
2392 : pub(crate) struct SlruSegmentDirectory {
2393 : // Set of SLRU segments that exist.
2394 : pub(crate) segments: HashSet<u32>,
2395 : }
2396 :
2397 : #[derive(Copy, Clone, PartialEq, Eq, Debug, enum_map::Enum)]
2398 : #[repr(u8)]
2399 : pub(crate) enum DirectoryKind {
2400 : Db,
2401 : TwoPhase,
2402 : Rel,
2403 : AuxFiles,
2404 : SlruSegment(SlruKind),
2405 : }
2406 :
2407 : impl DirectoryKind {
2408 : pub(crate) const KINDS_NUM: usize = <DirectoryKind as Enum>::LENGTH;
2409 11824 : pub(crate) fn offset(&self) -> usize {
2410 11824 : self.into_usize()
2411 11824 : }
2412 : }
2413 :
2414 : static ZERO_PAGE: Bytes = Bytes::from_static(&[0u8; BLCKSZ as usize]);
2415 :
2416 : #[allow(clippy::bool_assert_comparison)]
2417 : #[cfg(test)]
2418 : mod tests {
2419 : use hex_literal::hex;
2420 : use pageserver_api::{models::ShardParameters, shard::ShardStripeSize};
2421 : use utils::{
2422 : id::TimelineId,
2423 : shard::{ShardCount, ShardNumber},
2424 : };
2425 :
2426 : use super::*;
2427 :
2428 : use crate::{tenant::harness::TenantHarness, DEFAULT_PG_VERSION};
2429 :
2430 : /// Test a round trip of aux file updates, from DatadirModification to reading back from the Timeline
2431 : #[tokio::test]
2432 4 : async fn aux_files_round_trip() -> anyhow::Result<()> {
2433 4 : let name = "aux_files_round_trip";
2434 4 : let harness = TenantHarness::create(name).await?;
2435 4 :
2436 4 : pub const TIMELINE_ID: TimelineId =
2437 4 : TimelineId::from_array(hex!("11223344556677881122334455667788"));
2438 4 :
2439 4 : let (tenant, ctx) = harness.load().await;
2440 4 : let tline = tenant
2441 4 : .create_empty_timeline(TIMELINE_ID, Lsn(0x10), DEFAULT_PG_VERSION, &ctx)
2442 4 : .await?;
2443 4 : let tline = tline.raw_timeline().unwrap();
2444 4 :
2445 4 : // First modification: insert two keys
2446 4 : let mut modification = tline.begin_modification(Lsn(0x1000));
2447 4 : modification.put_file("foo/bar1", b"content1", &ctx).await?;
2448 4 : modification.set_lsn(Lsn(0x1008))?;
2449 4 : modification.put_file("foo/bar2", b"content2", &ctx).await?;
2450 4 : modification.commit(&ctx).await?;
2451 4 : let expect_1008 = HashMap::from([
2452 4 : ("foo/bar1".to_string(), Bytes::from_static(b"content1")),
2453 4 : ("foo/bar2".to_string(), Bytes::from_static(b"content2")),
2454 4 : ]);
2455 4 :
2456 4 : let io_concurrency = IoConcurrency::spawn_for_test();
2457 4 :
2458 4 : let readback = tline
2459 4 : .list_aux_files(Lsn(0x1008), &ctx, io_concurrency.clone())
2460 4 : .await?;
2461 4 : assert_eq!(readback, expect_1008);
2462 4 :
2463 4 : // Second modification: update one key, remove the other
2464 4 : let mut modification = tline.begin_modification(Lsn(0x2000));
2465 4 : modification.put_file("foo/bar1", b"content3", &ctx).await?;
2466 4 : modification.set_lsn(Lsn(0x2008))?;
2467 4 : modification.put_file("foo/bar2", b"", &ctx).await?;
2468 4 : modification.commit(&ctx).await?;
2469 4 : let expect_2008 =
2470 4 : HashMap::from([("foo/bar1".to_string(), Bytes::from_static(b"content3"))]);
2471 4 :
2472 4 : let readback = tline
2473 4 : .list_aux_files(Lsn(0x2008), &ctx, io_concurrency.clone())
2474 4 : .await?;
2475 4 : assert_eq!(readback, expect_2008);
2476 4 :
2477 4 : // Reading back in time works
2478 4 : let readback = tline
2479 4 : .list_aux_files(Lsn(0x1008), &ctx, io_concurrency.clone())
2480 4 : .await?;
2481 4 : assert_eq!(readback, expect_1008);
2482 4 :
2483 4 : Ok(())
2484 4 : }
2485 :
2486 : #[test]
2487 4 : fn gap_finding() {
2488 4 : let rel = RelTag {
2489 4 : spcnode: 1663,
2490 4 : dbnode: 208101,
2491 4 : relnode: 2620,
2492 4 : forknum: 0,
2493 4 : };
2494 4 : let base_blkno = 1;
2495 4 :
2496 4 : let base_key = rel_block_to_key(rel, base_blkno);
2497 4 : let before_base_key = rel_block_to_key(rel, base_blkno - 1);
2498 4 :
2499 4 : let shard = ShardIdentity::unsharded();
2500 4 :
2501 4 : let mut previous_nblocks = 0;
2502 44 : for i in 0..10 {
2503 40 : let crnt_blkno = base_blkno + i;
2504 40 : let gaps = DatadirModification::find_gaps(rel, crnt_blkno, previous_nblocks, &shard);
2505 40 :
2506 40 : previous_nblocks = crnt_blkno + 1;
2507 40 :
2508 40 : if i == 0 {
2509 : // The first block we write is 1, so we should find the gap.
2510 4 : assert_eq!(gaps.unwrap(), KeySpace::single(before_base_key..base_key));
2511 : } else {
2512 36 : assert!(gaps.is_none());
2513 : }
2514 : }
2515 :
2516 : // This is an update to an already existing block. No gaps here.
2517 4 : let update_blkno = 5;
2518 4 : let gaps = DatadirModification::find_gaps(rel, update_blkno, previous_nblocks, &shard);
2519 4 : assert!(gaps.is_none());
2520 :
2521 : // This is an update past the current end block.
2522 4 : let after_gap_blkno = 20;
2523 4 : let gaps = DatadirModification::find_gaps(rel, after_gap_blkno, previous_nblocks, &shard);
2524 4 :
2525 4 : let gap_start_key = rel_block_to_key(rel, previous_nblocks);
2526 4 : let after_gap_key = rel_block_to_key(rel, after_gap_blkno);
2527 4 : assert_eq!(
2528 4 : gaps.unwrap(),
2529 4 : KeySpace::single(gap_start_key..after_gap_key)
2530 4 : );
2531 4 : }
2532 :
2533 : #[test]
2534 4 : fn sharded_gap_finding() {
2535 4 : let rel = RelTag {
2536 4 : spcnode: 1663,
2537 4 : dbnode: 208101,
2538 4 : relnode: 2620,
2539 4 : forknum: 0,
2540 4 : };
2541 4 :
2542 4 : let first_blkno = 6;
2543 4 :
2544 4 : // This shard will get the even blocks
2545 4 : let shard = ShardIdentity::from_params(
2546 4 : ShardNumber(0),
2547 4 : &ShardParameters {
2548 4 : count: ShardCount(2),
2549 4 : stripe_size: ShardStripeSize(1),
2550 4 : },
2551 4 : );
2552 4 :
2553 4 : // Only keys belonging to this shard are considered as gaps.
2554 4 : let mut previous_nblocks = 0;
2555 4 : let gaps =
2556 4 : DatadirModification::find_gaps(rel, first_blkno, previous_nblocks, &shard).unwrap();
2557 4 : assert!(!gaps.ranges.is_empty());
2558 12 : for gap_range in gaps.ranges {
2559 8 : let mut k = gap_range.start;
2560 16 : while k != gap_range.end {
2561 8 : assert_eq!(shard.get_shard_number(&k), shard.number);
2562 8 : k = k.next();
2563 : }
2564 : }
2565 :
2566 4 : previous_nblocks = first_blkno;
2567 4 :
2568 4 : let update_blkno = 2;
2569 4 : let gaps = DatadirModification::find_gaps(rel, update_blkno, previous_nblocks, &shard);
2570 4 : assert!(gaps.is_none());
2571 4 : }
2572 :
2573 : /*
2574 : fn assert_current_logical_size<R: Repository>(timeline: &DatadirTimeline<R>, lsn: Lsn) {
2575 : let incremental = timeline.get_current_logical_size();
2576 : let non_incremental = timeline
2577 : .get_current_logical_size_non_incremental(lsn)
2578 : .unwrap();
2579 : assert_eq!(incremental, non_incremental);
2580 : }
2581 : */
2582 :
2583 : /*
2584 : ///
2585 : /// Test list_rels() function, with branches and dropped relations
2586 : ///
2587 : #[test]
2588 : fn test_list_rels_drop() -> Result<()> {
2589 : let repo = RepoHarness::create("test_list_rels_drop")?.load();
2590 : let tline = create_empty_timeline(repo, TIMELINE_ID)?;
2591 : const TESTDB: u32 = 111;
2592 :
2593 : // Import initial dummy checkpoint record, otherwise the get_timeline() call
2594 : // after branching fails below
2595 : let mut writer = tline.begin_record(Lsn(0x10));
2596 : writer.put_checkpoint(ZERO_CHECKPOINT.clone())?;
2597 : writer.finish()?;
2598 :
2599 : // Create a relation on the timeline
2600 : let mut writer = tline.begin_record(Lsn(0x20));
2601 : writer.put_rel_page_image(TESTREL_A, 0, TEST_IMG("foo blk 0 at 2"))?;
2602 : writer.finish()?;
2603 :
2604 : let writer = tline.begin_record(Lsn(0x00));
2605 : writer.finish()?;
2606 :
2607 : // Check that list_rels() lists it after LSN 2, but no before it
2608 : assert!(!tline.list_rels(0, TESTDB, Lsn(0x10))?.contains(&TESTREL_A));
2609 : assert!(tline.list_rels(0, TESTDB, Lsn(0x20))?.contains(&TESTREL_A));
2610 : assert!(tline.list_rels(0, TESTDB, Lsn(0x30))?.contains(&TESTREL_A));
2611 :
2612 : // Create a branch, check that the relation is visible there
2613 : repo.branch_timeline(&tline, NEW_TIMELINE_ID, Lsn(0x30))?;
2614 : let newtline = match repo.get_timeline(NEW_TIMELINE_ID)?.local_timeline() {
2615 : Some(timeline) => timeline,
2616 : None => panic!("Should have a local timeline"),
2617 : };
2618 : let newtline = DatadirTimelineImpl::new(newtline);
2619 : assert!(newtline
2620 : .list_rels(0, TESTDB, Lsn(0x30))?
2621 : .contains(&TESTREL_A));
2622 :
2623 : // Drop it on the branch
2624 : let mut new_writer = newtline.begin_record(Lsn(0x40));
2625 : new_writer.drop_relation(TESTREL_A)?;
2626 : new_writer.finish()?;
2627 :
2628 : // Check that it's no longer listed on the branch after the point where it was dropped
2629 : assert!(newtline
2630 : .list_rels(0, TESTDB, Lsn(0x30))?
2631 : .contains(&TESTREL_A));
2632 : assert!(!newtline
2633 : .list_rels(0, TESTDB, Lsn(0x40))?
2634 : .contains(&TESTREL_A));
2635 :
2636 : // Run checkpoint and garbage collection and check that it's still not visible
2637 : newtline.checkpoint(CheckpointConfig::Forced)?;
2638 : repo.gc_iteration(Some(NEW_TIMELINE_ID), 0, true)?;
2639 :
2640 : assert!(!newtline
2641 : .list_rels(0, TESTDB, Lsn(0x40))?
2642 : .contains(&TESTREL_A));
2643 :
2644 : Ok(())
2645 : }
2646 : */
2647 :
2648 : /*
2649 : #[test]
2650 : fn test_read_beyond_eof() -> Result<()> {
2651 : let repo = RepoHarness::create("test_read_beyond_eof")?.load();
2652 : let tline = create_test_timeline(repo, TIMELINE_ID)?;
2653 :
2654 : make_some_layers(&tline, Lsn(0x20))?;
2655 : let mut writer = tline.begin_record(Lsn(0x60));
2656 : walingest.put_rel_page_image(
2657 : &mut writer,
2658 : TESTREL_A,
2659 : 0,
2660 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x60))),
2661 : )?;
2662 : writer.finish()?;
2663 :
2664 : // Test read before rel creation. Should error out.
2665 : assert!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x10), false).is_err());
2666 :
2667 : // Read block beyond end of relation at different points in time.
2668 : // These reads should fall into different delta, image, and in-memory layers.
2669 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x20), false)?, ZERO_PAGE);
2670 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x25), false)?, ZERO_PAGE);
2671 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x30), false)?, ZERO_PAGE);
2672 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x35), false)?, ZERO_PAGE);
2673 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x40), false)?, ZERO_PAGE);
2674 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x45), false)?, ZERO_PAGE);
2675 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x50), false)?, ZERO_PAGE);
2676 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x55), false)?, ZERO_PAGE);
2677 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_A, 1, Lsn(0x60), false)?, ZERO_PAGE);
2678 :
2679 : // Test on an in-memory layer with no preceding layer
2680 : let mut writer = tline.begin_record(Lsn(0x70));
2681 : walingest.put_rel_page_image(
2682 : &mut writer,
2683 : TESTREL_B,
2684 : 0,
2685 : TEST_IMG(&format!("foo blk 0 at {}", Lsn(0x70))),
2686 : )?;
2687 : writer.finish()?;
2688 :
2689 : assert_eq!(tline.get_rel_page_at_lsn(TESTREL_B, 1, Lsn(0x70), false)?6, ZERO_PAGE);
2690 :
2691 : Ok(())
2692 : }
2693 : */
2694 : }
|